hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
4daa6f211fa6e1441cf990e8f2c5495afc106fba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../lavaMD.h" // (in the main program folder) needed to recognized input parameters //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer #include "cudacommon.h" //======================================================================================================================================================150 // KERNEL_GPU_CUDA_WRAPPER FUNCTION HEADER //======================================================================================================================================================150 #include "./kernel_gpu_cuda_wrapper.h" // (in the current directory) //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 #include "./kernel_gpu_cuda.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables //========================================================================================================================================================================================================200 // KERNEL_GPU_CUDA_WRAPPER FUNCTION //========================================================================================================================================================================================================200 void kernel_gpu_cuda_wrapper(par_str par_cpu, dim_str dim_cpu, box_str* box_cpu, FOUR_VECTOR* rv_cpu, fp* qv_cpu, FOUR_VECTOR* fv_cpu, ResultDatabase &resultDB) { float kernelTime = 0.0f; float transferTime = 0.0f; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float elapsedTime; //======================================================================================================================================================150 // CPU VARIABLES //======================================================================================================================================================150 //======================================================================================================================================================150 // GPU SETUP //======================================================================================================================================================150 //====================================================================================================100 // INITIAL DRIVER OVERHEAD //====================================================================================================100 hipDeviceSynchronize(); //====================================================================================================100 // VARIABLES //====================================================================================================100 box_str* d_box_gpu; FOUR_VECTOR* d_rv_gpu; fp* d_qv_gpu; FOUR_VECTOR* d_fv_gpu; dim3 threads; dim3 blocks; //====================================================================================================100 // EXECUTION PARAMETERS //====================================================================================================100 blocks.x = dim_cpu.number_boxes; blocks.y = 1; threads.x = NUMBER_THREADS; // define the number of threads in the block threads.y = 1; //======================================================================================================================================================150 // GPU MEMORY (MALLOC) //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 CUDA_SAFE_CALL(hipMalloc( (void **)&d_box_gpu, dim_cpu.box_mem)); //==================================================50 // rv //==================================================50 CUDA_SAFE_CALL(hipMalloc( (void **)&d_rv_gpu, dim_cpu.space_mem)); //==================================================50 // qv //==================================================50 CUDA_SAFE_CALL(hipMalloc( (void **)&d_qv_gpu, dim_cpu.space_mem2)); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 CUDA_SAFE_CALL(hipMalloc( (void **)&d_fv_gpu, dim_cpu.space_mem)); //======================================================================================================================================================150 // GPU MEMORY COPY //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 hipEventRecord(start, 0); hipMemcpy( d_box_gpu, box_cpu, dim_cpu.box_mem, hipMemcpyHostToDevice); //==================================================50 // rv //==================================================50 hipMemcpy( d_rv_gpu, rv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice); //==================================================50 // qv //==================================================50 hipMemcpy( d_qv_gpu, qv_cpu, dim_cpu.space_mem2, hipMemcpyHostToDevice); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 hipMemcpy( d_fv_gpu, fv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); transferTime += elapsedTime * 1.e-3; //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 // launch kernel - all boxes hipEventRecord(start, 0); hipLaunchKernelGGL(( kernel_gpu_cuda), dim3(blocks), dim3(threads), 0, 0, par_cpu, dim_cpu, d_box_gpu, d_rv_gpu, d_qv_gpu, d_fv_gpu); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR(); hipDeviceSynchronize(); //======================================================================================================================================================150 // GPU MEMORY COPY (CONTD.)kernel //======================================================================================================================================================150 hipEventRecord(start, 0); hipMemcpy( fv_cpu, d_fv_gpu, dim_cpu.space_mem, hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); transferTime += elapsedTime * 1.e-3; char atts[1024]; sprintf(atts, "boxes1d:%d", dim_cpu.boxes1d_arg); resultDB.AddResult("lavamd_kernel_time", atts, "sec", kernelTime); resultDB.AddResult("lavamd_transfer_time", atts, "sec", transferTime); resultDB.AddResult("lavamd_parity", atts, "N", transferTime / kernelTime); //======================================================================================================================================================150 // GPU MEMORY DEALLOCATION //======================================================================================================================================================150 hipFree(d_rv_gpu); hipFree(d_qv_gpu); hipFree(d_fv_gpu); hipFree(d_box_gpu); }
4daa6f211fa6e1441cf990e8f2c5495afc106fba.cu
//========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../lavaMD.h" // (in the main program folder) needed to recognized input parameters //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer #include "cudacommon.h" //======================================================================================================================================================150 // KERNEL_GPU_CUDA_WRAPPER FUNCTION HEADER //======================================================================================================================================================150 #include "./kernel_gpu_cuda_wrapper.h" // (in the current directory) //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 #include "./kernel_gpu_cuda.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables //========================================================================================================================================================================================================200 // KERNEL_GPU_CUDA_WRAPPER FUNCTION //========================================================================================================================================================================================================200 void kernel_gpu_cuda_wrapper(par_str par_cpu, dim_str dim_cpu, box_str* box_cpu, FOUR_VECTOR* rv_cpu, fp* qv_cpu, FOUR_VECTOR* fv_cpu, ResultDatabase &resultDB) { float kernelTime = 0.0f; float transferTime = 0.0f; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float elapsedTime; //======================================================================================================================================================150 // CPU VARIABLES //======================================================================================================================================================150 //======================================================================================================================================================150 // GPU SETUP //======================================================================================================================================================150 //====================================================================================================100 // INITIAL DRIVER OVERHEAD //====================================================================================================100 cudaThreadSynchronize(); //====================================================================================================100 // VARIABLES //====================================================================================================100 box_str* d_box_gpu; FOUR_VECTOR* d_rv_gpu; fp* d_qv_gpu; FOUR_VECTOR* d_fv_gpu; dim3 threads; dim3 blocks; //====================================================================================================100 // EXECUTION PARAMETERS //====================================================================================================100 blocks.x = dim_cpu.number_boxes; blocks.y = 1; threads.x = NUMBER_THREADS; // define the number of threads in the block threads.y = 1; //======================================================================================================================================================150 // GPU MEMORY (MALLOC) //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 CUDA_SAFE_CALL(cudaMalloc( (void **)&d_box_gpu, dim_cpu.box_mem)); //==================================================50 // rv //==================================================50 CUDA_SAFE_CALL(cudaMalloc( (void **)&d_rv_gpu, dim_cpu.space_mem)); //==================================================50 // qv //==================================================50 CUDA_SAFE_CALL(cudaMalloc( (void **)&d_qv_gpu, dim_cpu.space_mem2)); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 CUDA_SAFE_CALL(cudaMalloc( (void **)&d_fv_gpu, dim_cpu.space_mem)); //======================================================================================================================================================150 // GPU MEMORY COPY //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 cudaEventRecord(start, 0); cudaMemcpy( d_box_gpu, box_cpu, dim_cpu.box_mem, cudaMemcpyHostToDevice); //==================================================50 // rv //==================================================50 cudaMemcpy( d_rv_gpu, rv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice); //==================================================50 // qv //==================================================50 cudaMemcpy( d_qv_gpu, qv_cpu, dim_cpu.space_mem2, cudaMemcpyHostToDevice); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 cudaMemcpy( d_fv_gpu, fv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); transferTime += elapsedTime * 1.e-3; //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 // launch kernel - all boxes cudaEventRecord(start, 0); kernel_gpu_cuda<<<blocks, threads>>>( par_cpu, dim_cpu, d_box_gpu, d_rv_gpu, d_qv_gpu, d_fv_gpu); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR(); cudaThreadSynchronize(); //======================================================================================================================================================150 // GPU MEMORY COPY (CONTD.)kernel //======================================================================================================================================================150 cudaEventRecord(start, 0); cudaMemcpy( fv_cpu, d_fv_gpu, dim_cpu.space_mem, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); transferTime += elapsedTime * 1.e-3; char atts[1024]; sprintf(atts, "boxes1d:%d", dim_cpu.boxes1d_arg); resultDB.AddResult("lavamd_kernel_time", atts, "sec", kernelTime); resultDB.AddResult("lavamd_transfer_time", atts, "sec", transferTime); resultDB.AddResult("lavamd_parity", atts, "N", transferTime / kernelTime); //======================================================================================================================================================150 // GPU MEMORY DEALLOCATION //======================================================================================================================================================150 cudaFree(d_rv_gpu); cudaFree(d_qv_gpu); cudaFree(d_fv_gpu); cudaFree(d_box_gpu); }
5491f6beabf6ebc8f11fe894996d2ddf57e50878.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "rocblas.h" #include "common.h" void PrintMatrix(char name[], int rows, int cols, const float* m){ printf("%s\n", name); for(int row = 0; row < rows; ++row){ for(int col = 0; col < cols; ++col){ printf("%f ", m[row * cols + col]); } printf("\n"); } } void copyElements(float* out, float* entry, unsigned long long eRows, unsigned long long eCols, unsigned long long oRows, unsigned long long oCols, unsigned long long x, unsigned long long y, unsigned long long ofA, unsigned long long ofB, float beta){ unsigned long long counterRows = eRows; unsigned long long counterCols = eCols; if(ofA){ counterRows = ofA; } if(ofB){ counterCols = ofB; } for(unsigned long long i = 0; i < counterRows; ++i){ for(unsigned long long j = 0; j < counterCols; ++j){ unsigned long long index = x*eRows*oCols + (i*oCols) + (y*eCols + j); out[index] = entry[i*eCols + j] + beta * out[index]; } } } float * doMultiply2Matrices( int a1Rows, int a1Cols, float * A1, int a2Rows, int a2Cols, float * A2, float* C, float alpha) { float beta = 0; hipblasHandle_t handle; hipblasCreate (&handle) ; hipblasSgemm(handle,HIPBLAS_OP_N, HIPBLAS_OP_N, a2Cols, a1Rows, a1Cols, &alpha, A2, a2Cols, A1, a1Cols, &beta, C, a2Cols ); hipblasDestroy ( handle ) ; return C ; } float * doMultiply2MatricesStreaming( int a1Rows, int a1Cols, float * A1, int a2Rows, int a2Cols, float * A2, float* C, hipStream_t cudaStream, hipblasHandle_t handle, float alpha) { //float alpha = 1.0; float beta = 0.0; hipblasSetStream(handle, cudaStream) ; hipblasStatus_t stat = hipblasSgemm(handle,HIPBLAS_OP_N, HIPBLAS_OP_N, a2Cols, a1Rows, a1Cols, &alpha, A2, a2Cols, A1, a1Cols, &beta, C, a2Cols ); printf("cublas status = %d\n", stat); return C ; }
5491f6beabf6ebc8f11fe894996d2ddf57e50878.cu
#include <stdio.h> #include <cuda_runtime.h> #include "cublas_v2.h" #include "common.h" void PrintMatrix(char name[], int rows, int cols, const float* m){ printf("%s\n", name); for(int row = 0; row < rows; ++row){ for(int col = 0; col < cols; ++col){ printf("%f ", m[row * cols + col]); } printf("\n"); } } void copyElements(float* out, float* entry, unsigned long long eRows, unsigned long long eCols, unsigned long long oRows, unsigned long long oCols, unsigned long long x, unsigned long long y, unsigned long long ofA, unsigned long long ofB, float beta){ unsigned long long counterRows = eRows; unsigned long long counterCols = eCols; if(ofA){ counterRows = ofA; } if(ofB){ counterCols = ofB; } for(unsigned long long i = 0; i < counterRows; ++i){ for(unsigned long long j = 0; j < counterCols; ++j){ unsigned long long index = x*eRows*oCols + (i*oCols) + (y*eCols + j); out[index] = entry[i*eCols + j] + beta * out[index]; } } } float * doMultiply2Matrices( int a1Rows, int a1Cols, float * A1, int a2Rows, int a2Cols, float * A2, float* C, float alpha) { float beta = 0; cublasHandle_t handle; cublasCreate (&handle) ; cublasSgemm(handle,CUBLAS_OP_N, CUBLAS_OP_N, a2Cols, a1Rows, a1Cols, &alpha, A2, a2Cols, A1, a1Cols, &beta, C, a2Cols ); cublasDestroy ( handle ) ; return C ; } float * doMultiply2MatricesStreaming( int a1Rows, int a1Cols, float * A1, int a2Rows, int a2Cols, float * A2, float* C, cudaStream_t cudaStream, cublasHandle_t handle, float alpha) { //float alpha = 1.0; float beta = 0.0; cublasSetStream(handle, cudaStream) ; cublasStatus_t stat = cublasSgemm(handle,CUBLAS_OP_N, CUBLAS_OP_N, a2Cols, a1Rows, a1Cols, &alpha, A2, a2Cols, A1, a1Cols, &beta, C, a2Cols ); printf("cublas status = %d\n", stat); return C ; }
25ace489bbaf0d5dc3df704d94569d691eeddb29.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int vectorAdd(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the vector length to be used, and compute its size int numElements = 50000; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
25ace489bbaf0d5dc3df704d94569d691eeddb29.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int vectorAdd(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = 50000; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
df757899b25bae54260ec4c52e54ceedf9796f91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void gpu_tfm_linear_arb(float* real_result,float* imag_result,const int n,const int combs, const float* real_exp,const float* img_exp,const int* transmit,const int* receive,const float* lookup_time,const float* time, const int tot_pix, const int grid_x, const int grid_y, const int grid_z, const float* lookup_amp){ // get pixel's coordinates int pix = blockIdx.x*blockDim.x+threadIdx.x; if (pix<tot_pix){ //local variable float tot_real = 0, tot_imag = 0; float dt = time[1]-time[0]; for(int ii = 0; ii < combs; ii++){ float real = 0; float imag = 0; int tx = transmit[ii]-1; int rx = receive[ii]-1; int t_ind = (tx*grid_x*grid_y*grid_z)+pix; int r_ind = (rx*grid_x*grid_y*grid_z)+pix; float time_val = lookup_time[t_ind] + lookup_time[r_ind]; float amp_corr = lookup_amp[t_ind]*lookup_amp[r_ind]; float time_diff = time_val-time[0]; if(time_diff<0){ } else if(time_val > time[n-1]){ } else { int time_0 = floorf((time_val-time[0])/dt); int set_val = ii*(n)+time_0; float real_y1 = real_exp[set_val]; float imag_y1 = img_exp[set_val]; float real_y2 = real_exp[set_val+1]; float imag_y2 = img_exp[set_val+1]; float real_dy = real_y2-real_y1; float imag_dy = imag_y2-imag_y1; real = real_y1+real_dy*(time_val-time[time_0])/dt; real = real*amp_corr; imag = imag_y1+imag_dy*(time_val-time[time_0])/dt; imag = imag*amp_corr; } // sum each val tot_real += real; tot_imag += imag; } // store the final value for the pixel real_result[pix] = tot_real; imag_result[pix] = tot_imag; } }
df757899b25bae54260ec4c52e54ceedf9796f91.cu
__global__ void gpu_tfm_linear_arb(float* real_result,float* imag_result,const int n,const int combs, const float* real_exp,const float* img_exp,const int* transmit,const int* receive,const float* lookup_time,const float* time, const int tot_pix, const int grid_x, const int grid_y, const int grid_z, const float* lookup_amp){ // get pixel's coordinates int pix = blockIdx.x*blockDim.x+threadIdx.x; if (pix<tot_pix){ //local variable float tot_real = 0, tot_imag = 0; float dt = time[1]-time[0]; for(int ii = 0; ii < combs; ii++){ float real = 0; float imag = 0; int tx = transmit[ii]-1; int rx = receive[ii]-1; int t_ind = (tx*grid_x*grid_y*grid_z)+pix; int r_ind = (rx*grid_x*grid_y*grid_z)+pix; float time_val = lookup_time[t_ind] + lookup_time[r_ind]; float amp_corr = lookup_amp[t_ind]*lookup_amp[r_ind]; float time_diff = time_val-time[0]; if(time_diff<0){ } else if(time_val > time[n-1]){ } else { int time_0 = floorf((time_val-time[0])/dt); int set_val = ii*(n)+time_0; float real_y1 = real_exp[set_val]; float imag_y1 = img_exp[set_val]; float real_y2 = real_exp[set_val+1]; float imag_y2 = img_exp[set_val+1]; float real_dy = real_y2-real_y1; float imag_dy = imag_y2-imag_y1; real = real_y1+real_dy*(time_val-time[time_0])/dt; real = real*amp_corr; imag = imag_y1+imag_dy*(time_val-time[time_0])/dt; imag = imag*amp_corr; } // sum each val tot_real += real; tot_imag += imag; } // store the final value for the pixel real_result[pix] = tot_real; imag_result[pix] = tot_imag; } }
9bafaf08a1f8ab2a5a7cfa812cbc26c6a540c2e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <ctime> #include <iostream> #include <vector> template<typename ErrorType> void check(ErrorType err, const char* const func, const char* const file, const int line) { if (err != hipSuccess) { std::cerr << "CUDA error at: " << file << ":" << line << std::endl; std::cerr << hipGetErrorString(err) << " " << func << std::endl; exit(1); } } #define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__) __device__ void ReduceCTA(volatile int* smem, int CTA_SIZE) { const int tid = threadIdx.x; if (CTA_SIZE >= 512) { if (tid < 256) { smem[tid] += smem[tid + 256]; } __syncthreads(); } if (CTA_SIZE >= 256) { if (tid < 128) { smem[tid] += smem[tid + 128]; } __syncthreads(); } if (CTA_SIZE >= 128) { if (tid < 64) { smem[tid] += smem[tid + 64]; } __syncthreads(); } if (tid < 32) { // warp synchronous instruction, so declare smem volatile to // avoid compiler mem access optimization volatile int* smemp = smem; if (CTA_SIZE >= 64) smemp[tid] += smemp[tid + 32]; if (CTA_SIZE >= 32) smemp[tid] += smemp[tid + 16]; if (CTA_SIZE >= 16) smemp[tid] += smemp[tid + 8]; if (CTA_SIZE >= 8) smemp[tid] += smemp[tid + 4]; if (CTA_SIZE >= 4) smemp[tid] += smemp[tid + 2]; if (CTA_SIZE >= 2) smemp[tid] += smemp[tid + 1]; } } template <typename T> struct Accum; template <> struct Accum<int> { __device__ __host__ inline static int apply(int a) { return a; } }; template <> struct Accum<int2> { __device__ __host__ inline static int apply(int2 a) { return a.x + a.y; } }; template <> struct Accum<int4> { __device__ __host__ inline static int apply(int4 a) { return a.x + a.y + a.z + a.w; } }; template<typename T> __global__ void ReduceThreadblocksKernel(const T* const array, int* const totals, int N, int B) { const T* datatile = array + B * blockDim.x * blockIdx.x; const int tid = threadIdx.x; int accum = 0; int tile = 0; T element = {0}; while (tile++ < B) { element = datatile[tid]; accum += Accum<T>::apply(element); datatile += blockDim.x; } extern __shared__ int shared[]; shared[tid] = accum; __syncthreads(); ReduceCTA(shared, blockDim.x); if (0 == tid) { accum = shared[0]; totals[blockIdx.x] = accum; } } __global__ void ReduceTotalsKernel(int* const totals) { const int tid = threadIdx.x; extern __shared__ int smem[]; smem[tid] = totals[tid]; __syncthreads(); ReduceCTA(smem, blockDim.x); if (0 == tid) totals[0] = smem[0]; } /* * Minimum number of CTAs to fill GPU GTX560Ti (8 SM cores) */ const int CTAs = 64; // Map number of elements per load to type template<int ELEMENTS_PER_LOAD> struct LoadTraits; template<> struct LoadTraits<1> { typedef int Type; }; template<> struct LoadTraits<2> { typedef int2 Type; }; template<> struct LoadTraits<4> { typedef int4 Type; }; template <typename ElementType, int ELEMENTS_PER_LOAD> void ParallelReduce(const ElementType* const array, ElementType* const totals, int N) { /* * Parallel REDUCE * Assume that size is multiply of C*T (N = k *(C*T) for some k > 0) */ const int C = CTAs; // CTAs number const int T = 256; // Tile size const int E = ELEMENTS_PER_LOAD; // load 1, 2 or 4 consecutive 4-byte words per thread const int B = (N/(T*C))/E; // Tiles per CTA const dim3 gridDim(C); const dim3 blockDim(T); //printf("C: %d\n", C); //printf("T: %d\n", T); //printf("E: %d\n", E); //printf("B: %d\n", B); //printf("GridDim: (%d %d %d)\n", gridDim.x, gridDim.y, gridDim.z); //printf("BlockDim: (%d %d %d)\n", blockDim.x, blockDim.y, blockDim.z); typedef typename LoadTraits<ELEMENTS_PER_LOAD>::Type LoadType; hipLaunchKernelGGL(( ReduceThreadblocksKernel<LoadType>) , dim3(gridDim), dim3(blockDim), T * sizeof(ElementType), 0, (const LoadType*)array, totals, N, B); //checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( ReduceTotalsKernel), dim3(1), dim3(C), C * sizeof(ElementType), 0, totals); //checkCudaErrors(hipDeviceSynchronize()); } template<typename ElementType> void CreateSample(std::vector<ElementType>& array) { std::srand(time(0)); for (int i(0); i < array.size(); ++i) array[i] = 1; //static_cast<ElementType>(std::rand() % 100); } template<typename ElementType> ElementType SequentialReduce(const std::vector<ElementType>& src) { ElementType sum = 0; for (int i(0); i < src.size(); ++i) sum += src[i]; return sum; } template<typename ElementType> void CompareResults(ElementType hostresult, ElementType deviceresult) { bool incorrect = (hostresult != deviceresult); if (incorrect) { printf("Incorrect %d != %d\n", hostresult, deviceresult); } else { printf("Perfectly correct!\nGPU sum reduction: %d\n", deviceresult); } } float PeakBandwidth(int devID) { hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, " "no threads can use ::hipSetDevice().\n"); exit(1); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("\nGPUDevice %d: %s\nCompute cap: %d.%d\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } const int clockRate = deviceProp.memoryClockRate; // [KHz] const int memWidth = deviceProp.memoryBusWidth; // [bits] return 2.0 * clockRate * (memWidth/8.0) / 1.0e6; // [GB/s]; } int main(int argc, char** argv) { int tilesPerCTA = 200; int elementsPerThread = 1; if (argc > 2) { tilesPerCTA = atoi(argv[1]); elementsPerThread = atoi(argv[2]); } int devID = 0; const float peakBandwidth = PeakBandwidth(devID); /* const int ARRAY_SIZE = CTAs * 256 * 10000; $ ./Reduce 10000 2 GPUDevice 0: GeForce GTX 560 Ti Compute cap: 2.1 Problem size: 163840000 CTAs number: 64 Computation time: 5.800864 [ms] Peak bandwidth: 128.256 [GB/s] Effective bandwidth: 112.976 [GB/s] 88.087 % of the peak! Perfectly correct! GPU sum reduction: 163840000 const int ARRAY_SIZE = CTAs * 256 * 400; // 82% of peak const int ARRAY_SIZE = CTAs * 256 * 800; // 84% of peak */ const int ARRAY_SIZE = CTAs * 256 * tilesPerCTA; typedef int Element; std::vector<Element> h_array(ARRAY_SIZE); std::vector<Element> h_totals(CTAs); CreateSample(h_array); Element seqresult = SequentialReduce(h_array); printf("Problem size: %d\n", ARRAY_SIZE); printf("CTAs number: %d\n", CTAs); Element* d_array; Element* d_totals; checkCudaErrors(hipMalloc((void**) &d_array, sizeof(Element) * ARRAY_SIZE)); checkCudaErrors(hipMalloc((void**) &d_totals, sizeof(Element) * CTAs)); checkCudaErrors(hipMemcpy(d_array, h_array.data(), sizeof(Element) * ARRAY_SIZE, hipMemcpyHostToDevice)); hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipEventRecord(start, 0)); switch (elementsPerThread) { default: printf("ERROR: Chose int/int2/int4\n"); break; case 1: ParallelReduce<Element, 1>(d_array, d_totals, ARRAY_SIZE); break; case 2: ParallelReduce<Element, 2>(d_array, d_totals, ARRAY_SIZE); break; case 4: ParallelReduce<Element, 4>(d_array, d_totals, ARRAY_SIZE); break; } checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); float totalTimeMsec = 0.0f; checkCudaErrors(hipEventElapsedTime(&totalTimeMsec, start, stop)); const size_t loadedBytes = ARRAY_SIZE * sizeof(Element) + CTAs * sizeof(Element); const size_t storedBytes = 2 * CTAs * sizeof(Element); const float effectiveBandwidth = (loadedBytes + storedBytes)/totalTimeMsec/1.0e6; printf("Computation time: %f [ms]\n", totalTimeMsec); printf("Peak bandwidth: %.3f [GB/s]\n", peakBandwidth); printf("Effective bandwidth: %.3f [GB/s] %.3f %% of the peak!\n", effectiveBandwidth, (effectiveBandwidth / peakBandwidth) * 100); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); checkCudaErrors(hipMemcpy(h_totals.data(), d_totals, CTAs * sizeof(Element), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_array)); checkCudaErrors(hipFree(d_totals)); checkCudaErrors(hipDeviceReset()); CompareResults(seqresult, h_totals.front()); }
9bafaf08a1f8ab2a5a7cfa812cbc26c6a540c2e1.cu
#include <cstdio> #include <cstdlib> #include <ctime> #include <iostream> #include <vector> template<typename ErrorType> void check(ErrorType err, const char* const func, const char* const file, const int line) { if (err != cudaSuccess) { std::cerr << "CUDA error at: " << file << ":" << line << std::endl; std::cerr << cudaGetErrorString(err) << " " << func << std::endl; exit(1); } } #define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__) __device__ void ReduceCTA(volatile int* smem, int CTA_SIZE) { const int tid = threadIdx.x; if (CTA_SIZE >= 512) { if (tid < 256) { smem[tid] += smem[tid + 256]; } __syncthreads(); } if (CTA_SIZE >= 256) { if (tid < 128) { smem[tid] += smem[tid + 128]; } __syncthreads(); } if (CTA_SIZE >= 128) { if (tid < 64) { smem[tid] += smem[tid + 64]; } __syncthreads(); } if (tid < 32) { // warp synchronous instruction, so declare smem volatile to // avoid compiler mem access optimization volatile int* smemp = smem; if (CTA_SIZE >= 64) smemp[tid] += smemp[tid + 32]; if (CTA_SIZE >= 32) smemp[tid] += smemp[tid + 16]; if (CTA_SIZE >= 16) smemp[tid] += smemp[tid + 8]; if (CTA_SIZE >= 8) smemp[tid] += smemp[tid + 4]; if (CTA_SIZE >= 4) smemp[tid] += smemp[tid + 2]; if (CTA_SIZE >= 2) smemp[tid] += smemp[tid + 1]; } } template <typename T> struct Accum; template <> struct Accum<int> { __device__ __host__ inline static int apply(int a) { return a; } }; template <> struct Accum<int2> { __device__ __host__ inline static int apply(int2 a) { return a.x + a.y; } }; template <> struct Accum<int4> { __device__ __host__ inline static int apply(int4 a) { return a.x + a.y + a.z + a.w; } }; template<typename T> __global__ void ReduceThreadblocksKernel(const T* const array, int* const totals, int N, int B) { const T* datatile = array + B * blockDim.x * blockIdx.x; const int tid = threadIdx.x; int accum = 0; int tile = 0; T element = {0}; while (tile++ < B) { element = datatile[tid]; accum += Accum<T>::apply(element); datatile += blockDim.x; } extern __shared__ int shared[]; shared[tid] = accum; __syncthreads(); ReduceCTA(shared, blockDim.x); if (0 == tid) { accum = shared[0]; totals[blockIdx.x] = accum; } } __global__ void ReduceTotalsKernel(int* const totals) { const int tid = threadIdx.x; extern __shared__ int smem[]; smem[tid] = totals[tid]; __syncthreads(); ReduceCTA(smem, blockDim.x); if (0 == tid) totals[0] = smem[0]; } /* * Minimum number of CTAs to fill GPU GTX560Ti (8 SM cores) */ const int CTAs = 64; // Map number of elements per load to type template<int ELEMENTS_PER_LOAD> struct LoadTraits; template<> struct LoadTraits<1> { typedef int Type; }; template<> struct LoadTraits<2> { typedef int2 Type; }; template<> struct LoadTraits<4> { typedef int4 Type; }; template <typename ElementType, int ELEMENTS_PER_LOAD> void ParallelReduce(const ElementType* const array, ElementType* const totals, int N) { /* * Parallel REDUCE * Assume that size is multiply of C*T (N = k *(C*T) for some k > 0) */ const int C = CTAs; // CTAs number const int T = 256; // Tile size const int E = ELEMENTS_PER_LOAD; // load 1, 2 or 4 consecutive 4-byte words per thread const int B = (N/(T*C))/E; // Tiles per CTA const dim3 gridDim(C); const dim3 blockDim(T); //printf("C: %d\n", C); //printf("T: %d\n", T); //printf("E: %d\n", E); //printf("B: %d\n", B); //printf("GridDim: (%d %d %d)\n", gridDim.x, gridDim.y, gridDim.z); //printf("BlockDim: (%d %d %d)\n", blockDim.x, blockDim.y, blockDim.z); typedef typename LoadTraits<ELEMENTS_PER_LOAD>::Type LoadType; ReduceThreadblocksKernel<LoadType> <<<gridDim, blockDim, T * sizeof(ElementType)>>>((const LoadType*)array, totals, N, B); //checkCudaErrors(cudaDeviceSynchronize()); ReduceTotalsKernel<<<1, C, C * sizeof(ElementType)>>>(totals); //checkCudaErrors(cudaDeviceSynchronize()); } template<typename ElementType> void CreateSample(std::vector<ElementType>& array) { std::srand(time(0)); for (int i(0); i < array.size(); ++i) array[i] = 1; //static_cast<ElementType>(std::rand() % 100); } template<typename ElementType> ElementType SequentialReduce(const std::vector<ElementType>& src) { ElementType sum = 0; for (int i(0); i < src.size(); ++i) sum += src[i]; return sum; } template<typename ElementType> void CompareResults(ElementType hostresult, ElementType deviceresult) { bool incorrect = (hostresult != deviceresult); if (incorrect) { printf("Incorrect %d != %d\n", hostresult, deviceresult); } else { printf("Perfectly correct!\nGPU sum reduction: %d\n", deviceresult); } } float PeakBandwidth(int devID) { cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, " "no threads can use ::cudaSetDevice().\n"); exit(1); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("\nGPUDevice %d: %s\nCompute cap: %d.%d\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } const int clockRate = deviceProp.memoryClockRate; // [KHz] const int memWidth = deviceProp.memoryBusWidth; // [bits] return 2.0 * clockRate * (memWidth/8.0) / 1.0e6; // [GB/s]; } int main(int argc, char** argv) { int tilesPerCTA = 200; int elementsPerThread = 1; if (argc > 2) { tilesPerCTA = atoi(argv[1]); elementsPerThread = atoi(argv[2]); } int devID = 0; const float peakBandwidth = PeakBandwidth(devID); /* const int ARRAY_SIZE = CTAs * 256 * 10000; $ ./Reduce 10000 2 GPUDevice 0: GeForce GTX 560 Ti Compute cap: 2.1 Problem size: 163840000 CTAs number: 64 Computation time: 5.800864 [ms] Peak bandwidth: 128.256 [GB/s] Effective bandwidth: 112.976 [GB/s] 88.087 % of the peak! Perfectly correct! GPU sum reduction: 163840000 const int ARRAY_SIZE = CTAs * 256 * 400; // 82% of peak const int ARRAY_SIZE = CTAs * 256 * 800; // 84% of peak */ const int ARRAY_SIZE = CTAs * 256 * tilesPerCTA; typedef int Element; std::vector<Element> h_array(ARRAY_SIZE); std::vector<Element> h_totals(CTAs); CreateSample(h_array); Element seqresult = SequentialReduce(h_array); printf("Problem size: %d\n", ARRAY_SIZE); printf("CTAs number: %d\n", CTAs); Element* d_array; Element* d_totals; checkCudaErrors(cudaMalloc((void**) &d_array, sizeof(Element) * ARRAY_SIZE)); checkCudaErrors(cudaMalloc((void**) &d_totals, sizeof(Element) * CTAs)); checkCudaErrors(cudaMemcpy(d_array, h_array.data(), sizeof(Element) * ARRAY_SIZE, cudaMemcpyHostToDevice)); cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start, 0)); switch (elementsPerThread) { default: printf("ERROR: Chose int/int2/int4\n"); break; case 1: ParallelReduce<Element, 1>(d_array, d_totals, ARRAY_SIZE); break; case 2: ParallelReduce<Element, 2>(d_array, d_totals, ARRAY_SIZE); break; case 4: ParallelReduce<Element, 4>(d_array, d_totals, ARRAY_SIZE); break; } checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float totalTimeMsec = 0.0f; checkCudaErrors(cudaEventElapsedTime(&totalTimeMsec, start, stop)); const size_t loadedBytes = ARRAY_SIZE * sizeof(Element) + CTAs * sizeof(Element); const size_t storedBytes = 2 * CTAs * sizeof(Element); const float effectiveBandwidth = (loadedBytes + storedBytes)/totalTimeMsec/1.0e6; printf("Computation time: %f [ms]\n", totalTimeMsec); printf("Peak bandwidth: %.3f [GB/s]\n", peakBandwidth); printf("Effective bandwidth: %.3f [GB/s] %.3f %% of the peak!\n", effectiveBandwidth, (effectiveBandwidth / peakBandwidth) * 100); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); checkCudaErrors(cudaMemcpy(h_totals.data(), d_totals, CTAs * sizeof(Element), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_array)); checkCudaErrors(cudaFree(d_totals)); checkCudaErrors(cudaDeviceReset()); CompareResults(seqresult, h_totals.front()); }
1d30b5a1a85735e30b6eee726973b3e9e868e22a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // nvcc -std=c++11 myMatMul.cu -o myMatMul #include <vector> using namespace std; __global__ void mulMat(float* a, float* b, float* c, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int tid = row*n + col; if(tid < n*n) { for(int k = 0; k < n; ++k) { c[tid] += a[row*n + k]*b[col + n*k]; //c[tid] += a[row*n + k]*b[n*col + k]; } } } int main() { const int n = 1<<13; auto vv = vector<vector<float>>(n, vector<float>(n, sqrt(10))); vector<float> v1; for(int i = 0; i < n; ++i) v1.insert(v1.end(), vv[i].begin(), vv[i].end()); vector<float> c = v1; float* da; float* db; float* dc; size_t bytes = n*n*sizeof(float); hipMalloc(&da, bytes); hipMalloc(&db, bytes); hipMalloc(&dc, bytes); int BlockSize = 32; int GridSize = (n + BlockSize - 1)/BlockSize; cout << "BlockSize_1d:" << BlockSize <<" GridSize_1d:" << GridSize << endl; dim3 BS(BlockSize, BlockSize); dim3 GS(GridSize, GridSize); hipMemcpy(da, v1.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(db, v1.data(), bytes, hipMemcpyHostToDevice); hipLaunchKernelGGL(( mulMat), dim3(GS), dim3(BS), 0, 0, da, da, dc, n); hipDeviceSynchronize(); hipMemcpy(c.data(), dc, bytes, hipMemcpyDeviceToHost); hipFree(da); hipFree(db); hipFree(dc); cout << c[0] << endl; cout << c[n*n/2] << endl; }
1d30b5a1a85735e30b6eee726973b3e9e868e22a.cu
// nvcc -std=c++11 myMatMul.cu -o myMatMul #include <vector> using namespace std; __global__ void mulMat(float* a, float* b, float* c, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int tid = row*n + col; if(tid < n*n) { for(int k = 0; k < n; ++k) { c[tid] += a[row*n + k]*b[col + n*k]; //c[tid] += a[row*n + k]*b[n*col + k]; } } } int main() { const int n = 1<<13; auto vv = vector<vector<float>>(n, vector<float>(n, sqrt(10))); vector<float> v1; for(int i = 0; i < n; ++i) v1.insert(v1.end(), vv[i].begin(), vv[i].end()); vector<float> c = v1; float* da; float* db; float* dc; size_t bytes = n*n*sizeof(float); cudaMalloc(&da, bytes); cudaMalloc(&db, bytes); cudaMalloc(&dc, bytes); int BlockSize = 32; int GridSize = (n + BlockSize - 1)/BlockSize; cout << "BlockSize_1d:" << BlockSize <<" GridSize_1d:" << GridSize << endl; dim3 BS(BlockSize, BlockSize); dim3 GS(GridSize, GridSize); cudaMemcpy(da, v1.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(db, v1.data(), bytes, cudaMemcpyHostToDevice); mulMat<<<GS, BS>>>(da, da, dc, n); cudaDeviceSynchronize(); cudaMemcpy(c.data(), dc, bytes, cudaMemcpyDeviceToHost); cudaFree(da); cudaFree(db); cudaFree(dc); cout << c[0] << endl; cout << c[n*n/2] << endl; }
db13b0a6d941d51f54a5b04dadef3d500c9b2295.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include "multistep.h" void computePoints_AdamsBashford2(double tmin, double tmax, int N1, double Data_TMin, double Data_TMax, int Num_launch) { ////////////////////////////////////////////////////////////////////////////////////// // 2nd order Adams-Bashford Method // For Given Initial Value Problem: // (d/dt)y = f(y,t) // y(n) = constant (For our application) // equation is y(n+2) = y(n+1) + ( 3/2 * h * f( t(n+1), y(n+1) ) ) - ( 1/2 * h * f( t(n), y(n) ) ) // y(n) is given. // First y(n+1) is obtained from the Euler method. // Simple Euler Method used is y(n+1) = y(n) + ( h * f( t(n), y(n) ) ) ////////////////////////////////////////////////////////////////////////////////////// double h; // Integration time from tmin to tmax. int n_loops; float elapsedTime; // Total number of loops for integrating particles over a single time step. n_loops = ( fmod((tmax - tmin) , Int_TimeStep) == 0 ) ? fabs((tmax - tmin)/Int_TimeStep) : (fabs((tmax - tmin)/Int_TimeStep) + 1); // Calculation of Block size for single chunk of integration kernel ... if ( N1 % nthreads == 0 ) { nblocks = (int)(N1/nthreads); } else { nblocks = (int)((N1/nthreads) + 1); } double t1, t2, tloc[2], tcurrent; int releases, offset; // Time counters which use GPU to time our entire program.... hipEvent_t start_int,stop_int; hipEventCreate( &start_int ); hipEventCreate( &stop_int ); hipEventRecord( start_int, 0 ); for (releases = 0; releases < Num_launch; releases++) { printf("Integrating Release %d from %f to %f \n", releases, tmin, tmax); offset = releases * N1; // Integrate all points for this release for (int a = 0; a < n_loops; a++) { t1 = fmax (( tmin + a*Int_TimeStep ), Tracer.Start_time[offset]); t2 = t1 + Int_TimeStep; if (t2 > tmax) { t2 = tmax; h = t2 - t1; } else { h = t2 - t1; } tcurrent = t1; tloc[0] = (tcurrent - Data_TMin) / (Data_TMax - Data_TMin); tcurrent = t1 + (h/2); tloc[1] = (tcurrent - Data_TMin) / (Data_TMax - Data_TMin); // Memcpy of constant memory err = hipMemcpyToSymbol( tlocation, &tloc, 2*sizeof(double), 0, hipMemcpyHostToDevice); if(err != hipSuccess) { fprintf(stderr, "Memory copy of constant variable (tloc) from Host to Device failed\n"); printf("CUDA Error: %s \n\n", hipGetErrorString(err)); fprintf(stderr, " \n\n"); exit(1); } // Check which point needs to be integrated ... cudaPrintfInit(); hipLaunchKernelGGL(( check_int), dim3(nblocks), dim3(nthreads), 0, 0, Tracer_dev, integrate, t1, t2, N1, offset); cudaPrintfDisplay(stdout, true); err = hipDeviceSynchronize(); // Synchronize function that will wait till all threads are done computing .... if(err != hipSuccess) { fprintf(stderr, "Error in check_int. \n"); fprintf(stderr,"CUDA Error: %s \n\n", hipGetErrorString(err)); exit(1); } if (Dimensions == 3) { // 3-D case. if (Data_MeshType == UNSTRUCTURED) { //AdamsBashford_3DUnstructured_optimized(N1, h); // This function is faster than the optimized version hipLaunchKernelGGL(( AdamsBashford_3D_Unstructured), dim3(nblocks), dim3(nthreads), 0, 0, Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, velocity_dev, MeshElementArray_device, Tracer_dev.ElementIndex, r, s, t, MeshNodeArray_double_device, N1, integrate, h, offset ); } else { // Cartesian Mesh hipLaunchKernelGGL(( AdamsBashford_2Order_3D_Cartesian), dim3(nblocks), dim3(nthreads), 0, 0, Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, velocity_dev, N1, integrate, Tracer_dev.LeftDomain , h, offset ); } } else { // 2-D case. if (Data_MeshType == UNSTRUCTURED) { hipLaunchKernelGGL(( AdamsBashford_2D_Unstructured), dim3(nblocks), dim3(nthreads), 0, 0, Tracer_dev.x, Tracer_dev.y, velocity_dev, MeshElementArray_device, Tracer_dev.ElementIndex, r, s, MeshNodeArray_double_device, N1, integrate, h, offset ); } else { // Cartesian Mesh hipLaunchKernelGGL(( AdamsBashford_2Order_2D_Cartesian), dim3(nblocks), dim3(nthreads), 0, 0, Tracer_dev.x, Tracer_dev.y, velocity_dev, N1, integrate, Tracer_dev.LeftDomain , h, offset ); } } err = hipDeviceSynchronize(); // Synchronize function that will wait till all threads are done computing .... if(err != hipSuccess) { fprintf(stderr, "Error in AdamsBashford kernel. \n"); fprintf(stderr,"CUDA Error: %s \n\n", hipGetErrorString(err)); exit(1); } } printf("Release %d integrated \n",releases); } hipEventRecord( stop_int, 0 ); hipEventSynchronize( stop_int ); hipEventElapsedTime( &elapsedTime, start_int, stop_int ); printf( "Time for integration: %3.2f ms\n", elapsedTime ); hipEventDestroy( stop_int ) ; hipEventDestroy( start_int ) ; } /* void AdamsBashford_3DUnstructured_optimized(int N1, double h) { // Do local search ..... LocalSearch3D<<<nblocks, nthreads>>>(Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, MeshElementArray_device, MeshNodeArray_double_device, Tracer_dev.ElementIndex, N1, 0, r, s, t, integrate); err = hipDeviceSynchronize(); // Synchronize function that will wait till all threads are done computing .... if(err != hipSuccess) { fprintf(stderr, "Error in LocalSearch3D. \n"); fprintf(stderr,"CUDA Error: %s \n\n" , hipGetErrorString(err)); exit(1); } // Get Velocity .. Adams_Integrate3D<<<nblocks, nthreads>>>(Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, Tracer_dev.ElementIndex, xn0, xn1, xn2, velocity_dev, MeshElementArray_device, r, s, t, N1, integrate, h, 0, 0); err = hipDeviceSynchronize(); // Synchronize function that will wait till all threads are done computing .... if(err != hipSuccess) { fprintf(stderr, "Error in Euler3D. \n"); fprintf(stderr,"CUDA Error: %s \n\n" , hipGetErrorString(err)); exit(1); } // Do local search ..... LocalSearch3D<<<nblocks, nthreads>>>(Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, MeshElementArray_device, MeshNodeArray_double_device, Tracer_dev.ElementIndex, N1, 0, r, s, t, integrate); err = hipDeviceSynchronize(); // Synchronize function that will wait till all threads are done computing .... if(err != hipSuccess) { fprintf(stderr, "Error in LocalSearch3D. \n"); fprintf(stderr,"CUDA Error: %s \n\n" , hipGetErrorString(err)); exit(1); } // Get Velocity .. Adams_Integrate3D<<<nblocks, nthreads>>>(Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, Tracer_dev.ElementIndex, xn0, xn1, xn2, velocity_dev, MeshElementArray_device, r, s, t, N1, integrate, h, 1, 0); } __global__ void Adams_Integrate3D(double *x, double *y, double *z, int *eid, double *xn0, double *xn1, double *xn2, VelData_double vel, Element MeshElementArray_device, double *r, double *s, double *t, int ss, int *integrate, double h, int option, int offset ) { // Option = 0 => Euler method int tid; // Get thread index tid=(blockIdx.x*blockDim.x)+threadIdx.x; if( tid >= ss) { // Redundant thread ... return; } else { tid = tid + offset; if (integrate[tid] == 1) { double k[3]; if (option == 0 ) { // Get velocity GetVel_unstruct3D( tlocation[0], k, vel, MeshElementArray_device, eid[tid], r[tid], s[tid], t[tid] ); // This is Euler method with step of h = h/2 // x1 = x1 + (h/2)*k[0]; // y1 = y1 + (h/2)*k[1]; // z1 = z1 + (h/2)*k[2]; x[tid] = x[tid] + (0.5*h)*k[0]; y[tid] = y[tid] + (0.5*h)*k[1]; z[tid] = z[tid] + (0.5*h)*k[2]; xn0[tid] = k[0]; xn1[tid] = k[1]; xn2[tid] = k[2]; } else if (option == 1) { // Get velocity GetVel_unstruct3D( tlocation[1], k, vel, MeshElementArray_device, eid[tid], r[tid], s[tid], t[tid] ); // Corrector Adambashford 2nd order equation. // x1 = x1 + ( (3/2) * (h/2) * k[0] ) - ( (1/2) * (h/2) * xn0[tid] ); // y1 = y1 + ( (3/2) * (h/2) * k[1] ) - ( (1/2) * (h/2) * xn1[tid] ); // z1 = z1 + ( (3/2) * (h/2) * k[2] ) - ( (1/2) * (h/2) * xn2[tid] ); x[tid] = x[tid] + ( (1.5) * (0.5*h) * k[0] ) - ( (0.5) * (0.5*h) * xn0[tid] ); y[tid] = y[tid] + ( (1.5) * (0.5*h) * k[1] ) - ( (0.5) * (0.5*h) * xn1[tid] ); z[tid] = z[tid] + ( (1.5) * (0.5*h) * k[2] ) - ( (0.5) * (0.5*h) * xn2[tid] ); } return; } } } */ __global__ void AdamsBashford_3D_Unstructured(double *x, double *y, double *z, VelData_double vel, Element MeshElementArray_device, int *eid, double *r, double *s, double *t, Node_double MeshNodeArray_double_device, int ss, int *integrate, double h, int offset ) { int tid; // Get thread index tid=(blockIdx.x*blockDim.x)+threadIdx.x; if( tid >= ss) { // Redundant thread ... return; } else { tid = tid + offset; if (integrate[tid] == 1) { double k1[3], k2[3]; double x1,y1,z1; int ID = eid[tid]; x1 = x[tid]; y1 = y[tid]; z1 = z[tid]; // Do Local Search ID = get_local_search_3D(x1, y1, z1, MeshElementArray_device, MeshNodeArray_double_device, ID, r[tid], s[tid], t[tid]); // Get velocity GetVel_unstruct3D( tlocation[0], k1, vel, MeshElementArray_device, ID, r[tid], s[tid], t[tid] ); // This is Euler method with step of h = h/2 x1 = x1 + (h*0.5)*k1[0]; y1 = y1 + (h*0.5)*k1[1]; z1 = z1 + (h*0.5)*k1[2]; // Do Local Search ID = get_local_search_3D(x1, y1, z1, MeshElementArray_device, MeshNodeArray_double_device, ID, r[tid], s[tid], t[tid]); // Get velocity GetVel_unstruct3D( tlocation[1], k2, vel, MeshElementArray_device, ID, r[tid], s[tid], t[tid] ); // Corrector Adambashford 2nd order equation. x1 = x1 + ( (1.5) * (h*0.5) * k2[0] ) - ( (0.5) * (h*0.5) * k1[0] ); y1 = y1 + ( (1.5) * (h*0.5) * k2[1] ) - ( (0.5) * (h*0.5) * k1[1] ); z1 = z1 + ( (1.5) * (h*0.5) * k2[2] ) - ( (0.5) * (h*0.5) * k1[2] ); x[tid] = x1; y[tid] = y1; z[tid] = z1; eid[tid] = ID; } return; } } __global__ void AdamsBashford_2D_Unstructured(double *x, double *y, VelData_double vel, Element MeshElementArray_device, int *eid, double *r, double *s, Node_double MeshNodeArray_double_device, int ss, int *integrate, double h, int offset ) { int tid; // Get thread index tid=(blockIdx.x*blockDim.x)+threadIdx.x; if( tid >= ss) { // Redundant thread ... return; } else { tid = tid + offset; if (integrate[tid] == 1) { double k1[2], k2[2]; double x1,y1; int ID = eid[tid]; x1 = x[tid]; y1 = y[tid]; // Do Local Search ID = get_local_search_2D(x1, y1,MeshElementArray_device, MeshNodeArray_double_device, ID, r[tid], s[tid]); // Get velocity GetVel_unstruct2D( tlocation[0], k1, vel, MeshElementArray_device, ID, r[tid], s[tid] ); // This is Euler method with step of h = h/2 x1 = x1 + (h/2)*k1[0]; y1 = y1 + (h/2)*k1[1]; // Do Local Search ID = get_local_search_2D(x1, y1, MeshElementArray_device, MeshNodeArray_double_device, ID, r[tid], s[tid]); // Get velocity GetVel_unstruct2D( tlocation[1], k2, vel, MeshElementArray_device, ID, r[tid], s[tid] ); // Corrector Adambashford 2nd order equation. x1 = x1 + ( (3/2) * (h/2) * k2[0] ) - ( (1/2) * (h/2) * k1[0] ); y1 = y1 + ( (3/2) * (h/2) * k2[1] ) - ( (1/2) * (h/2) * k1[1] ); x[tid] = x1; y[tid] = y1; eid[tid] = ID; } return; } } __global__ void AdamsBashford_2Order_2D_Cartesian( double *x, double *y, VelData_double vel, int ss, int *integrate, int *ld , double h, int offset ) { //extern __shared__ cache[2*POINTS_BLOCKSIZE_MAIN]; // Each thread will use their own cache element twice. Order is thread 0 -> cache[0] and cache[1] // tloc will be in constant memory (Add this feature if doing multiple segments) // intervals represents how many timesteps will be evaluated by a single kernel.(Add this feature if doing multiple segments) // Since the first step involved is Euler, we will carryout integration over half a time step for first case. Then we will discard it completely. This is done for the first step run only. int tid; // Get thread index tid=(blockIdx.x*blockDim.x)+threadIdx.x; if( tid >= ss) { // Redundant thread ... return; } else { tid = tid + offset; if (integrate[tid] == 1) { double k1[2], k2[2]; double x1,y1; x1 = x[tid]; y1 = y[tid]; // Get velocity for x and y position at time t1 GetVel_cartesian2D(x1, y1, tlocation[0], k1, vel, ld[tid] ); //cache[2*threadIdx.x] = k[0]; //cache[2*threadIdx.x + 1] = k[1]; // This is Euler method with step of h = h/2 x1 = x1 + (h/2)*k1[0]; y1 = y1 + (h/2)*k1[1]; // Test if point is outside domain. ld[tid] = TestDomain(x1, y1); // Get velocity at new x and y positions GetVel_cartesian2D( x1, y1, tlocation[1], k2, vel, ld[tid] ); // Corrector Adambashford 2nd order equation. x1 = x1 + ( (3/2) * (h/2) * k2[0] ) - ( (1/2) * (h/2) * k1[0] ); y1 = y1 + ( (3/2) * (h/2) * k2[1] ) - ( (1/2) * (h/2) * k1[1] ); // Test if point is outside domain. ld[tid] = TestDomain(x1, y1); x[tid] = x1; y[tid] = y1; } } } __global__ void AdamsBashford_2Order_3D_Cartesian( double *x, double *y, double *z, VelData_double vel, int ss, int *integrate, int *ld , double h, int offset) { // Each thread will use their own cache element twice. Order is thread 0 -> cache[0] and cache[1] // tloc will be in constant memory (Add this feature if doing multiple segments) // intervals represents how many timesteps will be evaluated by a single kernel.(Add this feature if doing multiple segments) // Since the first step involved is Euler, we will carryout integration over half a time step for first case. Then we will discard it completely. This is done for the first step run only. int tid; // Get thread index tid=(blockIdx.x*blockDim.x)+threadIdx.x; if( tid >= ss) { // Redundant thread ... return; } else { tid = tid + offset; if (integrate[tid] == 1) { double k1[3], k2[3]; double x1,y1,z1; x1 = x[tid]; y1 = y[tid]; z1 = z[tid]; // Get velocity for x, y and z position at time t1 GetVel_cartesian3D(x1, y1, z1, tlocation[0], k1, vel, ld[tid] ); // This is Euler method with step of h = h/2 x1 = x1 + (h/2)*k1[0]; y1 = y1 + (h/2)*k1[1]; z1 = z1 + (h/2)*k1[2]; // Test if point is outside domain. ld[tid] = TestDomain3D(x1, y1, z1); // Get velocity at new x, y and z positions GetVel_cartesian3D( x1, y1, z1, tlocation[1], k2, vel, ld[tid] ); // Corrector Adambashford 2nd order equation. x1 = x1 + ( (3/2) * (h/2) * k2[0] ) - ( (1/2) * (h/2) * k1[0] ); y1 = y1 + ( (3/2) * (h/2) * k2[1] ) - ( (1/2) * (h/2) * k1[1] ); z1 = z1 + ( (3/2) * (h/2) * k2[2] ) - ( (1/2) * (h/2) * k1[2] ); // Test if point is outside domain. ld[tid] = TestDomain3D(x1, y1, z1); x[tid] = x1; y[tid] = y1; z[tid] = z1; } return; } } __device__ int TestDomain(double x, double y) { // This function uses constant memory. // Test if point is outside domain. if(x < (XX_Data[0] - TINY) || x > (XX_Data[1] + TINY)) { return(1); } else if(y < (YY_Data[0] - TINY) || y > (YY_Data[1] + TINY)) { return(1); } else { return(0); } } __device__ int TestDomain3D(double x, double y, double z) { // This function uses constant memory. // Test if point is outside domain. if(x < (XX_Data[0] - TINY) || x > (XX_Data[1] + TINY)) { return(1); } else if(y < (YY_Data[0] - TINY) || y > (YY_Data[1] + TINY)) { return(1); } else if (z < (ZZ_Data[0] - TINY) || z > (ZZ_Data[1] + TINY)) { return(1); } else { return(0); } }
db13b0a6d941d51f54a5b04dadef3d500c9b2295.cu
# include "multistep.h" void computePoints_AdamsBashford2(double tmin, double tmax, int N1, double Data_TMin, double Data_TMax, int Num_launch) { ////////////////////////////////////////////////////////////////////////////////////// // 2nd order Adams-Bashford Method // For Given Initial Value Problem: // (d/dt)y = f(y,t) // y(n) = constant (For our application) // equation is y(n+2) = y(n+1) + ( 3/2 * h * f( t(n+1), y(n+1) ) ) - ( 1/2 * h * f( t(n), y(n) ) ) // y(n) is given. // First y(n+1) is obtained from the Euler method. // Simple Euler Method used is y(n+1) = y(n) + ( h * f( t(n), y(n) ) ) ////////////////////////////////////////////////////////////////////////////////////// double h; // Integration time from tmin to tmax. int n_loops; float elapsedTime; // Total number of loops for integrating particles over a single time step. n_loops = ( fmod((tmax - tmin) , Int_TimeStep) == 0 ) ? fabs((tmax - tmin)/Int_TimeStep) : (fabs((tmax - tmin)/Int_TimeStep) + 1); // Calculation of Block size for single chunk of integration kernel ... if ( N1 % nthreads == 0 ) { nblocks = (int)(N1/nthreads); } else { nblocks = (int)((N1/nthreads) + 1); } double t1, t2, tloc[2], tcurrent; int releases, offset; // Time counters which use GPU to time our entire program.... cudaEvent_t start_int,stop_int; cudaEventCreate( &start_int ); cudaEventCreate( &stop_int ); cudaEventRecord( start_int, 0 ); for (releases = 0; releases < Num_launch; releases++) { printf("Integrating Release %d from %f to %f \n", releases, tmin, tmax); offset = releases * N1; // Integrate all points for this release for (int a = 0; a < n_loops; a++) { t1 = fmax (( tmin + a*Int_TimeStep ), Tracer.Start_time[offset]); t2 = t1 + Int_TimeStep; if (t2 > tmax) { t2 = tmax; h = t2 - t1; } else { h = t2 - t1; } tcurrent = t1; tloc[0] = (tcurrent - Data_TMin) / (Data_TMax - Data_TMin); tcurrent = t1 + (h/2); tloc[1] = (tcurrent - Data_TMin) / (Data_TMax - Data_TMin); // Memcpy of constant memory err = cudaMemcpyToSymbol( tlocation, &tloc, 2*sizeof(double), 0, cudaMemcpyHostToDevice); if(err != cudaSuccess) { fprintf(stderr, "Memory copy of constant variable (tloc) from Host to Device failed\n"); printf("CUDA Error: %s \n\n", cudaGetErrorString(err)); fprintf(stderr, " \n\n"); exit(1); } // Check which point needs to be integrated ... cudaPrintfInit(); check_int<<<nblocks, nthreads>>>(Tracer_dev, integrate, t1, t2, N1, offset); cudaPrintfDisplay(stdout, true); err = cudaThreadSynchronize(); // Synchronize function that will wait till all threads are done computing .... if(err != cudaSuccess) { fprintf(stderr, "Error in check_int. \n"); fprintf(stderr,"CUDA Error: %s \n\n", cudaGetErrorString(err)); exit(1); } if (Dimensions == 3) { // 3-D case. if (Data_MeshType == UNSTRUCTURED) { //AdamsBashford_3DUnstructured_optimized(N1, h); // This function is faster than the optimized version AdamsBashford_3D_Unstructured<<<nblocks, nthreads>>>(Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, velocity_dev, MeshElementArray_device, Tracer_dev.ElementIndex, r, s, t, MeshNodeArray_double_device, N1, integrate, h, offset ); } else { // Cartesian Mesh AdamsBashford_2Order_3D_Cartesian<<<nblocks, nthreads>>>( Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, velocity_dev, N1, integrate, Tracer_dev.LeftDomain , h, offset ); } } else { // 2-D case. if (Data_MeshType == UNSTRUCTURED) { AdamsBashford_2D_Unstructured<<<nblocks, nthreads>>>(Tracer_dev.x, Tracer_dev.y, velocity_dev, MeshElementArray_device, Tracer_dev.ElementIndex, r, s, MeshNodeArray_double_device, N1, integrate, h, offset ); } else { // Cartesian Mesh AdamsBashford_2Order_2D_Cartesian<<<nblocks, nthreads>>>( Tracer_dev.x, Tracer_dev.y, velocity_dev, N1, integrate, Tracer_dev.LeftDomain , h, offset ); } } err = cudaThreadSynchronize(); // Synchronize function that will wait till all threads are done computing .... if(err != cudaSuccess) { fprintf(stderr, "Error in AdamsBashford kernel. \n"); fprintf(stderr,"CUDA Error: %s \n\n", cudaGetErrorString(err)); exit(1); } } printf("Release %d integrated \n",releases); } cudaEventRecord( stop_int, 0 ); cudaEventSynchronize( stop_int ); cudaEventElapsedTime( &elapsedTime, start_int, stop_int ); printf( "Time for integration: %3.2f ms\n", elapsedTime ); cudaEventDestroy( stop_int ) ; cudaEventDestroy( start_int ) ; } /* void AdamsBashford_3DUnstructured_optimized(int N1, double h) { // Do local search ..... LocalSearch3D<<<nblocks, nthreads>>>(Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, MeshElementArray_device, MeshNodeArray_double_device, Tracer_dev.ElementIndex, N1, 0, r, s, t, integrate); err = cudaThreadSynchronize(); // Synchronize function that will wait till all threads are done computing .... if(err != cudaSuccess) { fprintf(stderr, "Error in LocalSearch3D. \n"); fprintf(stderr,"CUDA Error: %s \n\n" , cudaGetErrorString(err)); exit(1); } // Get Velocity .. Adams_Integrate3D<<<nblocks, nthreads>>>(Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, Tracer_dev.ElementIndex, xn0, xn1, xn2, velocity_dev, MeshElementArray_device, r, s, t, N1, integrate, h, 0, 0); err = cudaThreadSynchronize(); // Synchronize function that will wait till all threads are done computing .... if(err != cudaSuccess) { fprintf(stderr, "Error in Euler3D. \n"); fprintf(stderr,"CUDA Error: %s \n\n" , cudaGetErrorString(err)); exit(1); } // Do local search ..... LocalSearch3D<<<nblocks, nthreads>>>(Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, MeshElementArray_device, MeshNodeArray_double_device, Tracer_dev.ElementIndex, N1, 0, r, s, t, integrate); err = cudaThreadSynchronize(); // Synchronize function that will wait till all threads are done computing .... if(err != cudaSuccess) { fprintf(stderr, "Error in LocalSearch3D. \n"); fprintf(stderr,"CUDA Error: %s \n\n" , cudaGetErrorString(err)); exit(1); } // Get Velocity .. Adams_Integrate3D<<<nblocks, nthreads>>>(Tracer_dev.x, Tracer_dev.y, Tracer_dev.z, Tracer_dev.ElementIndex, xn0, xn1, xn2, velocity_dev, MeshElementArray_device, r, s, t, N1, integrate, h, 1, 0); } __global__ void Adams_Integrate3D(double *x, double *y, double *z, int *eid, double *xn0, double *xn1, double *xn2, VelData_double vel, Element MeshElementArray_device, double *r, double *s, double *t, int ss, int *integrate, double h, int option, int offset ) { // Option = 0 => Euler method int tid; // Get thread index tid=(blockIdx.x*blockDim.x)+threadIdx.x; if( tid >= ss) { // Redundant thread ... return; } else { tid = tid + offset; if (integrate[tid] == 1) { double k[3]; if (option == 0 ) { // Get velocity GetVel_unstruct3D( tlocation[0], k, vel, MeshElementArray_device, eid[tid], r[tid], s[tid], t[tid] ); // This is Euler method with step of h = h/2 // x1 = x1 + (h/2)*k[0]; // y1 = y1 + (h/2)*k[1]; // z1 = z1 + (h/2)*k[2]; x[tid] = x[tid] + (0.5*h)*k[0]; y[tid] = y[tid] + (0.5*h)*k[1]; z[tid] = z[tid] + (0.5*h)*k[2]; xn0[tid] = k[0]; xn1[tid] = k[1]; xn2[tid] = k[2]; } else if (option == 1) { // Get velocity GetVel_unstruct3D( tlocation[1], k, vel, MeshElementArray_device, eid[tid], r[tid], s[tid], t[tid] ); // Corrector Adambashford 2nd order equation. // x1 = x1 + ( (3/2) * (h/2) * k[0] ) - ( (1/2) * (h/2) * xn0[tid] ); // y1 = y1 + ( (3/2) * (h/2) * k[1] ) - ( (1/2) * (h/2) * xn1[tid] ); // z1 = z1 + ( (3/2) * (h/2) * k[2] ) - ( (1/2) * (h/2) * xn2[tid] ); x[tid] = x[tid] + ( (1.5) * (0.5*h) * k[0] ) - ( (0.5) * (0.5*h) * xn0[tid] ); y[tid] = y[tid] + ( (1.5) * (0.5*h) * k[1] ) - ( (0.5) * (0.5*h) * xn1[tid] ); z[tid] = z[tid] + ( (1.5) * (0.5*h) * k[2] ) - ( (0.5) * (0.5*h) * xn2[tid] ); } return; } } } */ __global__ void AdamsBashford_3D_Unstructured(double *x, double *y, double *z, VelData_double vel, Element MeshElementArray_device, int *eid, double *r, double *s, double *t, Node_double MeshNodeArray_double_device, int ss, int *integrate, double h, int offset ) { int tid; // Get thread index tid=(blockIdx.x*blockDim.x)+threadIdx.x; if( tid >= ss) { // Redundant thread ... return; } else { tid = tid + offset; if (integrate[tid] == 1) { double k1[3], k2[3]; double x1,y1,z1; int ID = eid[tid]; x1 = x[tid]; y1 = y[tid]; z1 = z[tid]; // Do Local Search ID = get_local_search_3D(x1, y1, z1, MeshElementArray_device, MeshNodeArray_double_device, ID, r[tid], s[tid], t[tid]); // Get velocity GetVel_unstruct3D( tlocation[0], k1, vel, MeshElementArray_device, ID, r[tid], s[tid], t[tid] ); // This is Euler method with step of h = h/2 x1 = x1 + (h*0.5)*k1[0]; y1 = y1 + (h*0.5)*k1[1]; z1 = z1 + (h*0.5)*k1[2]; // Do Local Search ID = get_local_search_3D(x1, y1, z1, MeshElementArray_device, MeshNodeArray_double_device, ID, r[tid], s[tid], t[tid]); // Get velocity GetVel_unstruct3D( tlocation[1], k2, vel, MeshElementArray_device, ID, r[tid], s[tid], t[tid] ); // Corrector Adambashford 2nd order equation. x1 = x1 + ( (1.5) * (h*0.5) * k2[0] ) - ( (0.5) * (h*0.5) * k1[0] ); y1 = y1 + ( (1.5) * (h*0.5) * k2[1] ) - ( (0.5) * (h*0.5) * k1[1] ); z1 = z1 + ( (1.5) * (h*0.5) * k2[2] ) - ( (0.5) * (h*0.5) * k1[2] ); x[tid] = x1; y[tid] = y1; z[tid] = z1; eid[tid] = ID; } return; } } __global__ void AdamsBashford_2D_Unstructured(double *x, double *y, VelData_double vel, Element MeshElementArray_device, int *eid, double *r, double *s, Node_double MeshNodeArray_double_device, int ss, int *integrate, double h, int offset ) { int tid; // Get thread index tid=(blockIdx.x*blockDim.x)+threadIdx.x; if( tid >= ss) { // Redundant thread ... return; } else { tid = tid + offset; if (integrate[tid] == 1) { double k1[2], k2[2]; double x1,y1; int ID = eid[tid]; x1 = x[tid]; y1 = y[tid]; // Do Local Search ID = get_local_search_2D(x1, y1,MeshElementArray_device, MeshNodeArray_double_device, ID, r[tid], s[tid]); // Get velocity GetVel_unstruct2D( tlocation[0], k1, vel, MeshElementArray_device, ID, r[tid], s[tid] ); // This is Euler method with step of h = h/2 x1 = x1 + (h/2)*k1[0]; y1 = y1 + (h/2)*k1[1]; // Do Local Search ID = get_local_search_2D(x1, y1, MeshElementArray_device, MeshNodeArray_double_device, ID, r[tid], s[tid]); // Get velocity GetVel_unstruct2D( tlocation[1], k2, vel, MeshElementArray_device, ID, r[tid], s[tid] ); // Corrector Adambashford 2nd order equation. x1 = x1 + ( (3/2) * (h/2) * k2[0] ) - ( (1/2) * (h/2) * k1[0] ); y1 = y1 + ( (3/2) * (h/2) * k2[1] ) - ( (1/2) * (h/2) * k1[1] ); x[tid] = x1; y[tid] = y1; eid[tid] = ID; } return; } } __global__ void AdamsBashford_2Order_2D_Cartesian( double *x, double *y, VelData_double vel, int ss, int *integrate, int *ld , double h, int offset ) { //extern __shared__ cache[2*POINTS_BLOCKSIZE_MAIN]; // Each thread will use their own cache element twice. Order is thread 0 -> cache[0] and cache[1] // tloc will be in constant memory (Add this feature if doing multiple segments) // intervals represents how many timesteps will be evaluated by a single kernel.(Add this feature if doing multiple segments) // Since the first step involved is Euler, we will carryout integration over half a time step for first case. Then we will discard it completely. This is done for the first step run only. int tid; // Get thread index tid=(blockIdx.x*blockDim.x)+threadIdx.x; if( tid >= ss) { // Redundant thread ... return; } else { tid = tid + offset; if (integrate[tid] == 1) { double k1[2], k2[2]; double x1,y1; x1 = x[tid]; y1 = y[tid]; // Get velocity for x and y position at time t1 GetVel_cartesian2D(x1, y1, tlocation[0], k1, vel, ld[tid] ); //cache[2*threadIdx.x] = k[0]; //cache[2*threadIdx.x + 1] = k[1]; // This is Euler method with step of h = h/2 x1 = x1 + (h/2)*k1[0]; y1 = y1 + (h/2)*k1[1]; // Test if point is outside domain. ld[tid] = TestDomain(x1, y1); // Get velocity at new x and y positions GetVel_cartesian2D( x1, y1, tlocation[1], k2, vel, ld[tid] ); // Corrector Adambashford 2nd order equation. x1 = x1 + ( (3/2) * (h/2) * k2[0] ) - ( (1/2) * (h/2) * k1[0] ); y1 = y1 + ( (3/2) * (h/2) * k2[1] ) - ( (1/2) * (h/2) * k1[1] ); // Test if point is outside domain. ld[tid] = TestDomain(x1, y1); x[tid] = x1; y[tid] = y1; } } } __global__ void AdamsBashford_2Order_3D_Cartesian( double *x, double *y, double *z, VelData_double vel, int ss, int *integrate, int *ld , double h, int offset) { // Each thread will use their own cache element twice. Order is thread 0 -> cache[0] and cache[1] // tloc will be in constant memory (Add this feature if doing multiple segments) // intervals represents how many timesteps will be evaluated by a single kernel.(Add this feature if doing multiple segments) // Since the first step involved is Euler, we will carryout integration over half a time step for first case. Then we will discard it completely. This is done for the first step run only. int tid; // Get thread index tid=(blockIdx.x*blockDim.x)+threadIdx.x; if( tid >= ss) { // Redundant thread ... return; } else { tid = tid + offset; if (integrate[tid] == 1) { double k1[3], k2[3]; double x1,y1,z1; x1 = x[tid]; y1 = y[tid]; z1 = z[tid]; // Get velocity for x, y and z position at time t1 GetVel_cartesian3D(x1, y1, z1, tlocation[0], k1, vel, ld[tid] ); // This is Euler method with step of h = h/2 x1 = x1 + (h/2)*k1[0]; y1 = y1 + (h/2)*k1[1]; z1 = z1 + (h/2)*k1[2]; // Test if point is outside domain. ld[tid] = TestDomain3D(x1, y1, z1); // Get velocity at new x, y and z positions GetVel_cartesian3D( x1, y1, z1, tlocation[1], k2, vel, ld[tid] ); // Corrector Adambashford 2nd order equation. x1 = x1 + ( (3/2) * (h/2) * k2[0] ) - ( (1/2) * (h/2) * k1[0] ); y1 = y1 + ( (3/2) * (h/2) * k2[1] ) - ( (1/2) * (h/2) * k1[1] ); z1 = z1 + ( (3/2) * (h/2) * k2[2] ) - ( (1/2) * (h/2) * k1[2] ); // Test if point is outside domain. ld[tid] = TestDomain3D(x1, y1, z1); x[tid] = x1; y[tid] = y1; z[tid] = z1; } return; } } __device__ int TestDomain(double x, double y) { // This function uses constant memory. // Test if point is outside domain. if(x < (XX_Data[0] - TINY) || x > (XX_Data[1] + TINY)) { return(1); } else if(y < (YY_Data[0] - TINY) || y > (YY_Data[1] + TINY)) { return(1); } else { return(0); } } __device__ int TestDomain3D(double x, double y, double z) { // This function uses constant memory. // Test if point is outside domain. if(x < (XX_Data[0] - TINY) || x > (XX_Data[1] + TINY)) { return(1); } else if(y < (YY_Data[0] - TINY) || y > (YY_Data[1] + TINY)) { return(1); } else if (z < (ZZ_Data[0] - TINY) || z > (ZZ_Data[1] + TINY)) { return(1); } else { return(0); } }
d7eae5c0bb2fd7932509746ed39c39e164d2ffa9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vectorLength(int size, const double *x, const double *y, double *len) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { len[ix] = sqrt(x[ix] * x[ix] + y[ix] * y[ix]); } }
d7eae5c0bb2fd7932509746ed39c39e164d2ffa9.cu
#include "includes.h" __global__ void vectorLength(int size, const double *x, const double *y, double *len) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { len[ix] = sqrt(x[ix] * x[ix] + y[ix] * y[ix]); } }
aecdb90932399aa724df7f8363cefa071a5cbf33.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "rw_cuda_edge_list.h" #include "utils.cuh" #include <ATen/hip/HIPContext.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> __device__ int64_t sample_int(int64_t start, int64_t end,hiprandState_t* rand_state){ auto sampled_int = start + (hiprand(rand_state) % (((end + 1) - start))); return sampled_int; } __device__ int64_t sample_neighbor_gpu(int64_t target_node, int64_t jump_node, int64_t padding_index, const torch::PackedTensorAccessor64<int64_t,2> node_edge_index, const torch::PackedTensorAccessor64<int64_t,2> edge_list_indexed, hiprandState_t* rand_state ) { if(target_node != padding_index){ // get the edge range for the target node auto start_index = node_edge_index[target_node][0]; auto end_index = node_edge_index[target_node][1]; // randomly select an index in this range if(start_index == -1 || end_index == -1){ return padding_index; }else{ auto nbr_edge_index = sample_int(start_index,end_index,rand_state); // get the edge at this index auto nbr_id = edge_list_indexed[nbr_edge_index][1]; return nbr_id; } }else{ // restart the walk from first node return jump_node; } } __global__ void uniform_walk_edge_list_gpu(const torch::PackedTensorAccessor64<int64_t,2> walks, const torch::PackedTensorAccessor64<int64_t,2> edge_list_indexed_accessor, const torch::PackedTensorAccessor64<int64_t,2> node_edges_index_accessor, const torch::PackedTensorAccessor64<int64_t,1> target_nodes_accesor, const int walk_length, const int64_t padding_index, const int64_t num_nodes, const int seed, const bool restart ) { // get the thread const auto thread_index = blockIdx.x * blockDim.x + threadIdx.x; // seed rng hiprandState_t rand_state; hiprand_init(seed,thread_index,0,&rand_state); // bound check if(thread_index < num_nodes) { // get the walk array for this node auto walks_for_node = walks[thread_index]; // get the target node int64_t target_node = target_nodes_accesor[thread_index]; // set the jump node according to restart policy int64_t jump_node = 0; if(restart == true){ jump_node = target_node; }else{ jump_node = padding_index; } // add target node as the first node in walk walks_for_node[0] = target_node; // start walk int64_t previous_node = target_node; for (int64_t walk_step=1;walk_step < walk_length;walk_step++){ // sample a neighor int64_t next_node = sample_neighbor_gpu(previous_node, jump_node, padding_index, node_edges_index_accessor, edge_list_indexed_accessor, &rand_state); walks_for_node[walk_step] = next_node; // update previous node previous_node = next_node; } } } __device__ bool is_neighbor(int64_t new_node, int64_t previous_node, const torch::PackedTensorAccessor64<int64_t,2> node_edge_index, const torch::PackedTensorAccessor64<int64_t,2> edge_list_indexed) { // get the edge range for the target node auto start_index = node_edge_index[previous_node][0]; auto end_index = node_edge_index[previous_node][1]; // randomly select an index in this range if(start_index == -1 || end_index == -1){ return false; }else{ for(int64_t i = start_index;i<end_index;i++){ auto node = edge_list_indexed[i][1]; if(node==new_node){ return true; } } return false; } } __global__ void biased_walk_edge_list_gpu(const torch::PackedTensorAccessor64<int64_t,2> walks, const torch::PackedTensorAccessor64<int64_t,2> edge_list_indexed_accessor, const torch::PackedTensorAccessor64<int64_t,2> node_edges_index_accessor, const torch::PackedTensorAccessor64<int64_t,1> target_nodes_accesor, const double p, const double q, const int walk_length, const int64_t padding_index, const int64_t num_nodes, const int seed, const bool restart ) { // get the thread const auto thread_index = blockIdx.x * blockDim.x + threadIdx.x; // seed rng hiprandState_t rand_state; hiprand_init(seed,thread_index,0,&rand_state); // normalize rejection probs double max_prob_init = fmax(1.0/p,1); double max_prob = fmax(max_prob_init,1.0/q); double prob_0 = 1.0/p/max_prob; double prob_1 = 1.0/max_prob; double prob_2 = 1.0/q/max_prob; // bound check if(thread_index < num_nodes) { // get the walk array for this node auto walks_for_node = walks[thread_index]; // get the target node int64_t target_node = target_nodes_accesor[thread_index]; // set the jump node according to restart policy int64_t jump_node = 0; if(restart == true){ jump_node = target_node; }else{ jump_node = padding_index; } // add target node as the first node in walk walks_for_node[0] = target_node; // sample the first neighbor walks_for_node[1] = sample_neighbor_gpu(target_node, jump_node, padding_index, node_edges_index_accessor, edge_list_indexed_accessor, &rand_state); // start walk int64_t previous_node = walks_for_node[1]; for(int64_t walk_step = 2; walk_step< walk_length;walk_step++){ int64_t selected_node = -1; // rejection sampling while(true) { // sample a new neighbor int64_t new_node = sample_neighbor_gpu(previous_node, jump_node, padding_index, node_edges_index_accessor, edge_list_indexed_accessor, &rand_state); auto random_prob = hiprand_uniform(&rand_state); // t_node int64_t t_node = walks_for_node[walk_step-2]; // new_node is the same as previous to previous node, so go back. if(new_node == t_node) { if(random_prob < prob_0){ selected_node = new_node; break; } } // if new_node is a padding_idx then restart if(new_node == padding_index){ if(random_prob < prob_0){ selected_node = jump_node; break; } } // else if new_node and t_node are neighbors i.e distance is 1 else if(is_neighbor(new_node,t_node,node_edges_index_accessor,edge_list_indexed_accessor)){ if(random_prob < prob_1) { selected_node = new_node; break; } } // else distance is 2 else if(random_prob < prob_2){ selected_node = new_node; break; } } // end while walks_for_node[walk_step] = selected_node; previous_node = selected_node; } } } torch::Tensor walk_edge_list_gpu(const torch::Tensor *edge_list_indexed, const torch::Tensor *node_edges_idx, const torch::Tensor *target_nodes, const double p, const double q, const int walk_length, const int seed, const int64_t padding_idx, const bool restart ) { CHECK_CUDA((*edge_list_indexed)); CHECK_CUDA((*node_edges_idx)); CHECK_CUDA((*target_nodes)); // construct a tensor to hold the walks auto walk_size = walk_length + 1; auto options = torch::TensorOptions().dtype(torch::kInt64).device(torch::kCUDA,node_edges_idx->device().index()); auto walks = torch::empty({(*target_nodes).size(0),walk_size},options); // create accessors auto walks_accessor = walks.packed_accessor64<int64_t,2>(); auto edge_list_indexed_accessor = edge_list_indexed->packed_accessor64<int64_t,2>(); auto node_edges_index_accessor = node_edges_idx->packed_accessor64<int64_t,2>(); auto target_nodes_accesor = target_nodes->packed_accessor64<int64_t,1>(); // get the number of nodes int64_t num_nodes = (*target_nodes).size(0); // Thread block size int NUM_THREADS = 1024; // Grid size int NUM_BLOCKS = int((num_nodes + NUM_THREADS - 1)/NUM_THREADS); // active stream auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // perform walks if(p == 1.0 && q == 1.0){ hipLaunchKernelGGL(( uniform_walk_edge_list_gpu), dim3(NUM_BLOCKS),dim3(NUM_THREADS),0,stream, walks_accessor, edge_list_indexed_accessor, node_edges_index_accessor, target_nodes_accesor, walk_size, padding_idx, num_nodes, seed, restart ); }else{ hipLaunchKernelGGL(( biased_walk_edge_list_gpu), dim3(NUM_BLOCKS),dim3(NUM_THREADS),0,stream, walks_accessor, edge_list_indexed_accessor, node_edges_index_accessor, target_nodes_accesor, p, q, walk_size, padding_idx, num_nodes, seed, restart ); } return walks; }
aecdb90932399aa724df7f8363cefa071a5cbf33.cu
#include "rw_cuda_edge_list.h" #include "utils.cuh" #include <ATen/cuda/CUDAContext.h> #include <curand.h> #include <curand_kernel.h> __device__ int64_t sample_int(int64_t start, int64_t end,curandState_t* rand_state){ auto sampled_int = start + (curand(rand_state) % (((end + 1) - start))); return sampled_int; } __device__ int64_t sample_neighbor_gpu(int64_t target_node, int64_t jump_node, int64_t padding_index, const torch::PackedTensorAccessor64<int64_t,2> node_edge_index, const torch::PackedTensorAccessor64<int64_t,2> edge_list_indexed, curandState_t* rand_state ) { if(target_node != padding_index){ // get the edge range for the target node auto start_index = node_edge_index[target_node][0]; auto end_index = node_edge_index[target_node][1]; // randomly select an index in this range if(start_index == -1 || end_index == -1){ return padding_index; }else{ auto nbr_edge_index = sample_int(start_index,end_index,rand_state); // get the edge at this index auto nbr_id = edge_list_indexed[nbr_edge_index][1]; return nbr_id; } }else{ // restart the walk from first node return jump_node; } } __global__ void uniform_walk_edge_list_gpu(const torch::PackedTensorAccessor64<int64_t,2> walks, const torch::PackedTensorAccessor64<int64_t,2> edge_list_indexed_accessor, const torch::PackedTensorAccessor64<int64_t,2> node_edges_index_accessor, const torch::PackedTensorAccessor64<int64_t,1> target_nodes_accesor, const int walk_length, const int64_t padding_index, const int64_t num_nodes, const int seed, const bool restart ) { // get the thread const auto thread_index = blockIdx.x * blockDim.x + threadIdx.x; // seed rng curandState_t rand_state; curand_init(seed,thread_index,0,&rand_state); // bound check if(thread_index < num_nodes) { // get the walk array for this node auto walks_for_node = walks[thread_index]; // get the target node int64_t target_node = target_nodes_accesor[thread_index]; // set the jump node according to restart policy int64_t jump_node = 0; if(restart == true){ jump_node = target_node; }else{ jump_node = padding_index; } // add target node as the first node in walk walks_for_node[0] = target_node; // start walk int64_t previous_node = target_node; for (int64_t walk_step=1;walk_step < walk_length;walk_step++){ // sample a neighor int64_t next_node = sample_neighbor_gpu(previous_node, jump_node, padding_index, node_edges_index_accessor, edge_list_indexed_accessor, &rand_state); walks_for_node[walk_step] = next_node; // update previous node previous_node = next_node; } } } __device__ bool is_neighbor(int64_t new_node, int64_t previous_node, const torch::PackedTensorAccessor64<int64_t,2> node_edge_index, const torch::PackedTensorAccessor64<int64_t,2> edge_list_indexed) { // get the edge range for the target node auto start_index = node_edge_index[previous_node][0]; auto end_index = node_edge_index[previous_node][1]; // randomly select an index in this range if(start_index == -1 || end_index == -1){ return false; }else{ for(int64_t i = start_index;i<end_index;i++){ auto node = edge_list_indexed[i][1]; if(node==new_node){ return true; } } return false; } } __global__ void biased_walk_edge_list_gpu(const torch::PackedTensorAccessor64<int64_t,2> walks, const torch::PackedTensorAccessor64<int64_t,2> edge_list_indexed_accessor, const torch::PackedTensorAccessor64<int64_t,2> node_edges_index_accessor, const torch::PackedTensorAccessor64<int64_t,1> target_nodes_accesor, const double p, const double q, const int walk_length, const int64_t padding_index, const int64_t num_nodes, const int seed, const bool restart ) { // get the thread const auto thread_index = blockIdx.x * blockDim.x + threadIdx.x; // seed rng curandState_t rand_state; curand_init(seed,thread_index,0,&rand_state); // normalize rejection probs double max_prob_init = fmax(1.0/p,1); double max_prob = fmax(max_prob_init,1.0/q); double prob_0 = 1.0/p/max_prob; double prob_1 = 1.0/max_prob; double prob_2 = 1.0/q/max_prob; // bound check if(thread_index < num_nodes) { // get the walk array for this node auto walks_for_node = walks[thread_index]; // get the target node int64_t target_node = target_nodes_accesor[thread_index]; // set the jump node according to restart policy int64_t jump_node = 0; if(restart == true){ jump_node = target_node; }else{ jump_node = padding_index; } // add target node as the first node in walk walks_for_node[0] = target_node; // sample the first neighbor walks_for_node[1] = sample_neighbor_gpu(target_node, jump_node, padding_index, node_edges_index_accessor, edge_list_indexed_accessor, &rand_state); // start walk int64_t previous_node = walks_for_node[1]; for(int64_t walk_step = 2; walk_step< walk_length;walk_step++){ int64_t selected_node = -1; // rejection sampling while(true) { // sample a new neighbor int64_t new_node = sample_neighbor_gpu(previous_node, jump_node, padding_index, node_edges_index_accessor, edge_list_indexed_accessor, &rand_state); auto random_prob = curand_uniform(&rand_state); // t_node int64_t t_node = walks_for_node[walk_step-2]; // new_node is the same as previous to previous node, so go back. if(new_node == t_node) { if(random_prob < prob_0){ selected_node = new_node; break; } } // if new_node is a padding_idx then restart if(new_node == padding_index){ if(random_prob < prob_0){ selected_node = jump_node; break; } } // else if new_node and t_node are neighbors i.e distance is 1 else if(is_neighbor(new_node,t_node,node_edges_index_accessor,edge_list_indexed_accessor)){ if(random_prob < prob_1) { selected_node = new_node; break; } } // else distance is 2 else if(random_prob < prob_2){ selected_node = new_node; break; } } // end while walks_for_node[walk_step] = selected_node; previous_node = selected_node; } } } torch::Tensor walk_edge_list_gpu(const torch::Tensor *edge_list_indexed, const torch::Tensor *node_edges_idx, const torch::Tensor *target_nodes, const double p, const double q, const int walk_length, const int seed, const int64_t padding_idx, const bool restart ) { CHECK_CUDA((*edge_list_indexed)); CHECK_CUDA((*node_edges_idx)); CHECK_CUDA((*target_nodes)); // construct a tensor to hold the walks auto walk_size = walk_length + 1; auto options = torch::TensorOptions().dtype(torch::kInt64).device(torch::kCUDA,node_edges_idx->device().index()); auto walks = torch::empty({(*target_nodes).size(0),walk_size},options); // create accessors auto walks_accessor = walks.packed_accessor64<int64_t,2>(); auto edge_list_indexed_accessor = edge_list_indexed->packed_accessor64<int64_t,2>(); auto node_edges_index_accessor = node_edges_idx->packed_accessor64<int64_t,2>(); auto target_nodes_accesor = target_nodes->packed_accessor64<int64_t,1>(); // get the number of nodes int64_t num_nodes = (*target_nodes).size(0); // Thread block size int NUM_THREADS = 1024; // Grid size int NUM_BLOCKS = int((num_nodes + NUM_THREADS - 1)/NUM_THREADS); // active stream auto stream = at::cuda::getCurrentCUDAStream(); // perform walks if(p == 1.0 && q == 1.0){ uniform_walk_edge_list_gpu<<<NUM_BLOCKS,NUM_THREADS,0,stream>>>(walks_accessor, edge_list_indexed_accessor, node_edges_index_accessor, target_nodes_accesor, walk_size, padding_idx, num_nodes, seed, restart ); }else{ biased_walk_edge_list_gpu<<<NUM_BLOCKS,NUM_THREADS,0,stream>>>(walks_accessor, edge_list_indexed_accessor, node_edges_index_accessor, target_nodes_accesor, p, q, walk_size, padding_idx, num_nodes, seed, restart ); } return walks; }
0610e33ff571a7334e0e063d465db901486d08cb.hip
// !!! This is a file automatically generated by hipify!!! /** * Yuri Gorokhov * lab 4-2 - Constant memory test */ #include <stdio.h> #include <hip/hip_runtime.h> #include <math.h> #include "../include/cuda_util.h" #define SIZE 2048 #define ITERATIONS 5000000 __constant__ int array[SIZE]; __global__ void read_kernel(int); int main(void) { hipEvent_t start, stop; float elapsedTime; // Initialize Array int* hostArray = (int*)malloc(SIZE * sizeof(int)); for(int i = 0; i < SIZE; i++) hostArray[i] = i; // Copy Array cudasafe( hipMemcpyToSymbol(array, hostArray, SIZE * sizeof(int), 0, hipMemcpyHostToDevice), "hipMemcpyToSymbol" ); for(int n = 1; n <= 16; n++) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipLaunchKernelGGL(( read_kernel), dim3(1),dim3(256), 0, 0, n); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("\nN = %i -> %f", n, elapsedTime); } } __global__ void read_kernel(int n) { int a; for(int i = 0; i < ITERATIONS; i++) { a = array[128*(threadIdx.x % n)]; } __syncthreads(); }
0610e33ff571a7334e0e063d465db901486d08cb.cu
/** * Yuri Gorokhov * lab 4-2 - Constant memory test */ #include <stdio.h> #include <cuda.h> #include <math.h> #include "../include/cuda_util.h" #define SIZE 2048 #define ITERATIONS 5000000 __constant__ int array[SIZE]; __global__ void read_kernel(int); int main(void) { cudaEvent_t start, stop; float elapsedTime; // Initialize Array int* hostArray = (int*)malloc(SIZE * sizeof(int)); for(int i = 0; i < SIZE; i++) hostArray[i] = i; // Copy Array cudasafe( cudaMemcpyToSymbol(array, hostArray, SIZE * sizeof(int), 0, cudaMemcpyHostToDevice), "cudaMemcpyToSymbol" ); for(int n = 1; n <= 16; n++) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); read_kernel<<<1,256>>>(n); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("\nN = %i -> %f", n, elapsedTime); } } __global__ void read_kernel(int n) { int a; for(int i = 0; i < ITERATIONS; i++) { a = array[128*(threadIdx.x % n)]; } __syncthreads(); }
5ef6136042fe8a20222180d72ba4ca0890d644e2.hip
// !!! This is a file automatically generated by hipify!!! /** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/dgemv_mgpu.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 3.0.0 * @author Ahmad Abdelfattah * @date 2018-11-14 **/ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <rocblas.h> #include "gemv_mgpu_core.cuh" #include "gemv_mgpu_offset_core.cuh" #include "defs.h" #if(TARGET_SM >= 30) #define dgemvn_mgpu_bs (64) #define dgemvn_mgpu_ty (4) //#define dgemvn_mgpu_by (2) #define dgemvt_mgpu_bs (64) #define dgemvt_mgpu_ty (4) //#define dgemvt_mgpu_by (2) #else #define dgemvn_mgpu_bs (64) #define dgemvn_mgpu_ty (8) //#define dgemvn_mgpu_by (1) #define dgemvt_mgpu_bs (64) #define dgemvt_mgpu_ty (8) //#define dgemvt_mgpu_by (1) #endif extern "C" int kblas_dscal_async(int n, double alpha, double *x, int incx, hipStream_t stream); extern "C" int kblas_dgemv_mgpu_driver( char trans, int rows, int cols, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int gpu_gid, int ngpus, hipStream_t stream = 0) { const double d_zero = 0.0; if(trans == 'n' || trans == 'N') { //******** config parameters const int thread_x = dgemvn_mgpu_bs; const int thread_y = dgemvn_mgpu_ty; const int elements_per_thread = thread_x/(2*thread_y); int grid_y_n = 1 * ngpus; //************************** // scaling with beta //if(gpu_gid == 0)hipblasDscal(rows, beta, dY, incy); if(gpu_gid == 0)kblas_dscal_async(rows, beta, dY, incy, stream); else kblas_dscal_async(rows, d_zero, dY, incy, stream); int cols_ = dgemvn_mgpu_bs * ( (cols/dgemvn_mgpu_bs)/ngpus ); if(gpu_gid < (cols/dgemvn_mgpu_bs)%ngpus) cols_ += dgemvn_mgpu_bs; if(gpu_gid == (cols/dgemvn_mgpu_bs)%ngpus) cols_ += cols%dgemvn_mgpu_bs; int mod_r = rows % dgemvn_mgpu_bs; int mod_c = cols_ % dgemvn_mgpu_bs; if(mod_r == 0) { if(mod_c == 0) { // special case int blocks = rows/dgemvn_mgpu_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; hipLaunchKernelGGL(( gemvn_mgpu_special<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus); } else { // generic case for columns only const int irregular_cols = mod_c % elements_per_thread; int blocks = rows/dgemvn_mgpu_bs; blocks += 1; // dummy thread block dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 1:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 2:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 3:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 4:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 5:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 6:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 7:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 8:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 9:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 10:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 11:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 12:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 13:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 14:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 15:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else // mod_r != 0 { if(mod_c == 0) { // generic case for columns only int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0); dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); } else { // generic case for rows and cols const int irregular_cols = mod_c % elements_per_thread; int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0); //printf("gpu_gid = %d, cols_ = %d \n", gpu_gid, cols_); dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 1:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 2:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 3:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 4:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 5:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 6:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 7:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 8:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 9:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 10:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 11:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 12:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 13:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 14:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 15:hipLaunchKernelGGL(( gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } } // end of non-transpose case else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { int conj; if(trans == 'c' || trans == 'C') conj = 1; else conj = 0; //************ config parameters const int thread_x = dgemvt_mgpu_bs; const int thread_y = dgemvt_mgpu_ty; const int elements_per_thread = thread_x/(2*thread_y); int grid_y_t = 1 * ngpus; //****************************** // scaling with beta //if(gpu_gid == 0)hipblasDscal(cols, beta, dY, incy); if(gpu_gid == 0)kblas_dscal_async(cols, beta, dY, incy, stream); else kblas_dscal_async(cols, d_zero, dY, incy, stream); int cols_ = dgemvt_mgpu_bs * ( (cols/dgemvt_mgpu_bs)/ngpus ); if(gpu_gid < (cols/dgemvt_mgpu_bs)%ngpus) cols_ += dgemvt_mgpu_bs; if(gpu_gid == (cols/dgemvt_mgpu_bs)%ngpus) cols_ += cols%dgemvt_mgpu_bs; int mod_r = rows % dgemvt_mgpu_bs; int mod_c = cols_ % dgemvt_mgpu_bs; if(mod_c == 0) { if(mod_r == 0) { // special case int blocks = cols_/dgemvt_mgpu_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; hipLaunchKernelGGL(( gemvt_mgpu_special<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, conj); } else { // mod_r != 0 int blocks = cols_/dgemvt_mgpu_bs; blocks += 1; // dummy thread block dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); } } else // mod_c != 0 { const int irregular_cols = mod_c % elements_per_thread; int blocks = cols_/dgemvt_mgpu_bs + (mod_c != 0); dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 1:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 2:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 3:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 4:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 5:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 6:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 7:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 8:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 9:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 10:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 11:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 12:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 13:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 14:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 15:hipLaunchKernelGGL(( gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; default: printf("DGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else { printf("DGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } /*************************************************************************************/ extern "C" int kblas_dgemv_mgpu_driver_offset( char trans, int rows, int cols, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int gpu_gid, int ngpus, int offset_r, int offset_c, hipStream_t stream = 0) { const double d_zero = 0.0; if(trans == 'n' || trans == 'N') { //**** Config parameters const int thread_x = dgemvn_mgpu_bs; const int thread_y = dgemvn_mgpu_ty; const int elements_per_thread = thread_x/(2*thread_y); const int grid_y_n = 2 * ngpus; //************************* /** offset necessary calculation **/ int offset_r_ = offset_r % dgemvn_mgpu_bs; int offset_c_ = offset_c % dgemvn_mgpu_bs; int total_blocks_skipped_r = offset_r / dgemvn_mgpu_bs; int total_blocks_skipped_c = offset_c / dgemvn_mgpu_bs; int my_skipped_blocks_r = total_blocks_skipped_r; int my_skipped_blocks_c = total_blocks_skipped_c/ngpus; if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1; int ref_gpu = total_blocks_skipped_c%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks_c * dgemvn_mgpu_bs * lda; dA += total_blocks_skipped_r * dgemvn_mgpu_bs; dX += total_blocks_skipped_c * dgemvn_mgpu_bs * incx; dY += my_skipped_blocks_r * dgemvn_mgpu_bs * incy; rows -= total_blocks_skipped_r * dgemvn_mgpu_bs; cols -= total_blocks_skipped_c * dgemvn_mgpu_bs; /** end offset necessary calculation **/ int nstripes = (cols/dgemvn_mgpu_bs) + ((cols%dgemvn_mgpu_bs) != 0); // scaling with beta if(gpu_gid == 0)kblas_dscal_async(rows-offset_r_, beta, dY+(offset_r_*incy), incy, stream); else kblas_dscal_async(rows-offset_r_, d_zero, dY+(offset_r_*incy), incy, stream); int cols_ = dgemvn_mgpu_bs * ( (cols/dgemvn_mgpu_bs)/ngpus ); if(new_gpu_gid < (cols/dgemvn_mgpu_bs)%ngpus) cols_ += dgemvn_mgpu_bs; if(new_gpu_gid == (cols/dgemvn_mgpu_bs)%ngpus) cols_ += cols%dgemvn_mgpu_bs; int mod_r = rows % dgemvn_mgpu_bs; int mod_c = cols_ % dgemvn_mgpu_bs; if(mod_r == 0 && mod_c == 0) { // special case int blocks = rows/dgemvn_mgpu_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; hipLaunchKernelGGL(( gemvn_mgpu_special_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread>) , dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); } else { // generic case for columns only const int irregular_cols = mod_c % elements_per_thread; int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0); if(mod_r == 0)blocks += 1; // dummy thread block, will return immediately if mod_r == 0 dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 1:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 2:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 3:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 4:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 5:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 6:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 7:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 8:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 9:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 10:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 11:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 12:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 13:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 14:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 15:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } // end of non-transpose case else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { int conj; if(trans == 'c' || trans == 'C') conj = 1; else conj = 0; //**** Config parameters const int thread_x = dgemvt_mgpu_bs; const int thread_y = dgemvt_mgpu_ty; const int elements_per_thread = thread_x/(2*thread_y); const int grid_y_t = 2 * ngpus; //************************* /** offset necessary calculation **/ int offset_r_ = offset_r % dgemvt_mgpu_bs; int offset_c_ = offset_c % dgemvt_mgpu_bs; int total_blocks_skipped_r = offset_r / dgemvt_mgpu_bs; int total_blocks_skipped_c = offset_c / dgemvt_mgpu_bs; int my_skipped_blocks_r = total_blocks_skipped_r; int my_skipped_blocks_c = total_blocks_skipped_c/ngpus; if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1; int ref_gpu = total_blocks_skipped_c%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; //if(new_gpu_gid != 3){return 0;} // Advance pointers accordingly dA += my_skipped_blocks_c * dgemvt_mgpu_bs * lda; dA += my_skipped_blocks_r * dgemvt_mgpu_bs; dX += total_blocks_skipped_r * dgemvt_mgpu_bs * incx; dY += total_blocks_skipped_c * dgemvt_mgpu_bs * incy; rows -= total_blocks_skipped_r * dgemvt_mgpu_bs; cols -= total_blocks_skipped_c * dgemvt_mgpu_bs; /** end offset necessary calculation **/ int nstripes = (cols/dgemvt_mgpu_bs) + ((cols%dgemvt_mgpu_bs) != 0); // scaling with beta //if(gpu_gid == 0)hipblasDscal(cols-offset_, beta, dY+(offset_*incy), incy); if(gpu_gid == 0)kblas_dscal_async(cols-offset_c_, beta, dY+(offset_c_*incy), incy, stream); else kblas_dscal_async(cols-offset_r_, d_zero, dY+(offset_r_*incy), incy, stream); int cols_ = dgemvt_mgpu_bs * ( (cols/dgemvt_mgpu_bs)/ngpus ); if(new_gpu_gid < (cols/dgemvt_mgpu_bs)%ngpus) cols_ += dgemvt_mgpu_bs; if(new_gpu_gid == (cols/dgemvt_mgpu_bs)%ngpus) cols_ += cols%dgemvt_mgpu_bs; int mod_r = rows % dgemvt_mgpu_bs; int mod_c = cols_ % dgemvt_mgpu_bs; if(mod_r == 0 && mod_c == 0) { int blocks = cols_/dgemvt_mgpu_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; hipLaunchKernelGGL(( gemvt_mgpu_special_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); } else { const int irregular_cols = mod_c % elements_per_thread; int blocks = cols_/dgemvt_mgpu_bs + (mod_c != 0); int gpu_last = (nstripes+ngpus-1)%ngpus; if(mod_c == 0 && new_gpu_gid == gpu_last) blocks += 1; // dummy thread block, will return if mod_c == 0 dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 1:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 2:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 3:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 4:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 5:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 6:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 7:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 8:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 9:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 10:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 11:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 12:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 13:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 14:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 15:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; default: printf("DGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else { printf("DGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } /***********************************************************************************/ extern "C" int kblas_dgemv_mgpu( char trans, int rows, int cols, double alpha, double **dA, int lda, double **dX, int incx, double beta, double **dY, int incy, int ngpus, int offset_r, int offset_c) { const int ngpus_local = ngpus; if(offset_r == 0 && offset_c == 0) { for(int i = 0; i < ngpus_local; i++) { hipSetDevice(gpu_lid[i]); kblas_dgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus); } } else { for(int i = 0; i < ngpus_local; i++) { hipSetDevice(gpu_lid[i]); kblas_dgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c); } } // wait for gpus to finish for(int i = 0; i < ngpus_local; i++) { hipSetDevice(gpu_lid[i]); hipDeviceSynchronize(); } return 0; } /*************************************************************************************/ extern "C" int kblas_dgemv_mgpu_async( char trans, int rows, int cols, double alpha, double **dA, int lda, double **dX, int incx, double beta, double **dY, int incy, int ngpus, int offset_r, int offset_c, hipStream_t stream[MAX_NGPUS][MAX_STREAMS]) { const int ngpus_local = ngpus; if(offset_r == 0 && offset_c == 0) { for(int i = 0; i < ngpus_local; i++) { hipSetDevice(gpu_lid[i]); kblas_dgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, stream[i][0]); } } else { for(int i = 0; i < ngpus_local; i++) { hipSetDevice(gpu_lid[i]); kblas_dgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c); } } return 0; } /*************************************************************************************/ extern "C" int get_dgemv_mgpu_bs(char trans) { if(trans == 'n' || trans == 'N') return dgemvn_mgpu_bs; else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') return dgemvt_mgpu_bs; else {printf("Error .. input %c is not supported for gemv \n", trans); return -1;} }
5ef6136042fe8a20222180d72ba4ca0890d644e2.cu
/** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/dgemv_mgpu.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 3.0.0 * @author Ahmad Abdelfattah * @date 2018-11-14 **/ #include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cublas.h> #include "gemv_mgpu_core.cuh" #include "gemv_mgpu_offset_core.cuh" #include "defs.h" #if(TARGET_SM >= 30) #define dgemvn_mgpu_bs (64) #define dgemvn_mgpu_ty (4) //#define dgemvn_mgpu_by (2) #define dgemvt_mgpu_bs (64) #define dgemvt_mgpu_ty (4) //#define dgemvt_mgpu_by (2) #else #define dgemvn_mgpu_bs (64) #define dgemvn_mgpu_ty (8) //#define dgemvn_mgpu_by (1) #define dgemvt_mgpu_bs (64) #define dgemvt_mgpu_ty (8) //#define dgemvt_mgpu_by (1) #endif extern "C" int kblas_dscal_async(int n, double alpha, double *x, int incx, cudaStream_t stream); extern "C" int kblas_dgemv_mgpu_driver( char trans, int rows, int cols, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int gpu_gid, int ngpus, cudaStream_t stream = 0) { const double d_zero = 0.0; if(trans == 'n' || trans == 'N') { //******** config parameters const int thread_x = dgemvn_mgpu_bs; const int thread_y = dgemvn_mgpu_ty; const int elements_per_thread = thread_x/(2*thread_y); int grid_y_n = 1 * ngpus; //************************** // scaling with beta //if(gpu_gid == 0)cublasDscal(rows, beta, dY, incy); if(gpu_gid == 0)kblas_dscal_async(rows, beta, dY, incy, stream); else kblas_dscal_async(rows, d_zero, dY, incy, stream); int cols_ = dgemvn_mgpu_bs * ( (cols/dgemvn_mgpu_bs)/ngpus ); if(gpu_gid < (cols/dgemvn_mgpu_bs)%ngpus) cols_ += dgemvn_mgpu_bs; if(gpu_gid == (cols/dgemvn_mgpu_bs)%ngpus) cols_ += cols%dgemvn_mgpu_bs; int mod_r = rows % dgemvn_mgpu_bs; int mod_c = cols_ % dgemvn_mgpu_bs; if(mod_r == 0) { if(mod_c == 0) { // special case int blocks = rows/dgemvn_mgpu_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; gemvn_mgpu_special<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus); } else { // generic case for columns only const int irregular_cols = mod_c % elements_per_thread; int blocks = rows/dgemvn_mgpu_bs; blocks += 1; // dummy thread block dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 1: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 2: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 3: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 4: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 5: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 6: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 7: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 8: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 9: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 10: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 11: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 12: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 13: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 14: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 15: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else // mod_r != 0 { if(mod_c == 0) { // generic case for columns only int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0); dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); } else { // generic case for rows and cols const int irregular_cols = mod_c % elements_per_thread; int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0); //printf("gpu_gid = %d, cols_ = %d \n", gpu_gid, cols_); dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 1: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 2: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 3: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 4: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 5: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 6: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 7: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 8: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 9: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 10: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 11: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 12: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 13: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 14: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; case 15: gemvn_mgpu_generic<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break; default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } } // end of non-transpose case else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { int conj; if(trans == 'c' || trans == 'C') conj = 1; else conj = 0; //************ config parameters const int thread_x = dgemvt_mgpu_bs; const int thread_y = dgemvt_mgpu_ty; const int elements_per_thread = thread_x/(2*thread_y); int grid_y_t = 1 * ngpus; //****************************** // scaling with beta //if(gpu_gid == 0)cublasDscal(cols, beta, dY, incy); if(gpu_gid == 0)kblas_dscal_async(cols, beta, dY, incy, stream); else kblas_dscal_async(cols, d_zero, dY, incy, stream); int cols_ = dgemvt_mgpu_bs * ( (cols/dgemvt_mgpu_bs)/ngpus ); if(gpu_gid < (cols/dgemvt_mgpu_bs)%ngpus) cols_ += dgemvt_mgpu_bs; if(gpu_gid == (cols/dgemvt_mgpu_bs)%ngpus) cols_ += cols%dgemvt_mgpu_bs; int mod_r = rows % dgemvt_mgpu_bs; int mod_c = cols_ % dgemvt_mgpu_bs; if(mod_c == 0) { if(mod_r == 0) { // special case int blocks = cols_/dgemvt_mgpu_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; gemvt_mgpu_special<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, conj); } else { // mod_r != 0 int blocks = cols_/dgemvt_mgpu_bs; blocks += 1; // dummy thread block dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); } } else // mod_c != 0 { const int irregular_cols = mod_c % elements_per_thread; int blocks = cols_/dgemvt_mgpu_bs + (mod_c != 0); dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 1: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 2: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 3: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 4: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 5: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 6: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 7: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 8: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 9: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 10: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 11: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 12: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 13: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 14: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; case 15: gemvt_mgpu_generic<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break; default: printf("DGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else { printf("DGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } /*************************************************************************************/ extern "C" int kblas_dgemv_mgpu_driver_offset( char trans, int rows, int cols, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int gpu_gid, int ngpus, int offset_r, int offset_c, cudaStream_t stream = 0) { const double d_zero = 0.0; if(trans == 'n' || trans == 'N') { //**** Config parameters const int thread_x = dgemvn_mgpu_bs; const int thread_y = dgemvn_mgpu_ty; const int elements_per_thread = thread_x/(2*thread_y); const int grid_y_n = 2 * ngpus; //************************* /** offset necessary calculation **/ int offset_r_ = offset_r % dgemvn_mgpu_bs; int offset_c_ = offset_c % dgemvn_mgpu_bs; int total_blocks_skipped_r = offset_r / dgemvn_mgpu_bs; int total_blocks_skipped_c = offset_c / dgemvn_mgpu_bs; int my_skipped_blocks_r = total_blocks_skipped_r; int my_skipped_blocks_c = total_blocks_skipped_c/ngpus; if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1; int ref_gpu = total_blocks_skipped_c%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks_c * dgemvn_mgpu_bs * lda; dA += total_blocks_skipped_r * dgemvn_mgpu_bs; dX += total_blocks_skipped_c * dgemvn_mgpu_bs * incx; dY += my_skipped_blocks_r * dgemvn_mgpu_bs * incy; rows -= total_blocks_skipped_r * dgemvn_mgpu_bs; cols -= total_blocks_skipped_c * dgemvn_mgpu_bs; /** end offset necessary calculation **/ int nstripes = (cols/dgemvn_mgpu_bs) + ((cols%dgemvn_mgpu_bs) != 0); // scaling with beta if(gpu_gid == 0)kblas_dscal_async(rows-offset_r_, beta, dY+(offset_r_*incy), incy, stream); else kblas_dscal_async(rows-offset_r_, d_zero, dY+(offset_r_*incy), incy, stream); int cols_ = dgemvn_mgpu_bs * ( (cols/dgemvn_mgpu_bs)/ngpus ); if(new_gpu_gid < (cols/dgemvn_mgpu_bs)%ngpus) cols_ += dgemvn_mgpu_bs; if(new_gpu_gid == (cols/dgemvn_mgpu_bs)%ngpus) cols_ += cols%dgemvn_mgpu_bs; int mod_r = rows % dgemvn_mgpu_bs; int mod_c = cols_ % dgemvn_mgpu_bs; if(mod_r == 0 && mod_c == 0) { // special case int blocks = rows/dgemvn_mgpu_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; gemvn_mgpu_special_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread> <<<dimGrid, dimBlock, 0, stream>>> (rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); } else { // generic case for columns only const int irregular_cols = mod_c % elements_per_thread; int blocks = (rows/dgemvn_mgpu_bs) + (mod_r != 0); if(mod_r == 0)blocks += 1; // dummy thread block, will return immediately if mod_r == 0 dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_n); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 1: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 2: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 3: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 4: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 5: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 6: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 7: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 8: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 9: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 10: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 11: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 12: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 13: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 14: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; case 15: gemvn_mgpu_generic_offset<double, dgemvn_mgpu_bs, dgemvn_mgpu_bs, dgemvn_mgpu_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break; default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } // end of non-transpose case else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { int conj; if(trans == 'c' || trans == 'C') conj = 1; else conj = 0; //**** Config parameters const int thread_x = dgemvt_mgpu_bs; const int thread_y = dgemvt_mgpu_ty; const int elements_per_thread = thread_x/(2*thread_y); const int grid_y_t = 2 * ngpus; //************************* /** offset necessary calculation **/ int offset_r_ = offset_r % dgemvt_mgpu_bs; int offset_c_ = offset_c % dgemvt_mgpu_bs; int total_blocks_skipped_r = offset_r / dgemvt_mgpu_bs; int total_blocks_skipped_c = offset_c / dgemvt_mgpu_bs; int my_skipped_blocks_r = total_blocks_skipped_r; int my_skipped_blocks_c = total_blocks_skipped_c/ngpus; if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1; int ref_gpu = total_blocks_skipped_c%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; //if(new_gpu_gid != 3){return 0;} // Advance pointers accordingly dA += my_skipped_blocks_c * dgemvt_mgpu_bs * lda; dA += my_skipped_blocks_r * dgemvt_mgpu_bs; dX += total_blocks_skipped_r * dgemvt_mgpu_bs * incx; dY += total_blocks_skipped_c * dgemvt_mgpu_bs * incy; rows -= total_blocks_skipped_r * dgemvt_mgpu_bs; cols -= total_blocks_skipped_c * dgemvt_mgpu_bs; /** end offset necessary calculation **/ int nstripes = (cols/dgemvt_mgpu_bs) + ((cols%dgemvt_mgpu_bs) != 0); // scaling with beta //if(gpu_gid == 0)cublasDscal(cols-offset_, beta, dY+(offset_*incy), incy); if(gpu_gid == 0)kblas_dscal_async(cols-offset_c_, beta, dY+(offset_c_*incy), incy, stream); else kblas_dscal_async(cols-offset_r_, d_zero, dY+(offset_r_*incy), incy, stream); int cols_ = dgemvt_mgpu_bs * ( (cols/dgemvt_mgpu_bs)/ngpus ); if(new_gpu_gid < (cols/dgemvt_mgpu_bs)%ngpus) cols_ += dgemvt_mgpu_bs; if(new_gpu_gid == (cols/dgemvt_mgpu_bs)%ngpus) cols_ += cols%dgemvt_mgpu_bs; int mod_r = rows % dgemvt_mgpu_bs; int mod_c = cols_ % dgemvt_mgpu_bs; if(mod_r == 0 && mod_c == 0) { int blocks = cols_/dgemvt_mgpu_bs; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; gemvt_mgpu_special_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); } else { const int irregular_cols = mod_c % elements_per_thread; int blocks = cols_/dgemvt_mgpu_bs + (mod_c != 0); int gpu_last = (nstripes+ngpus-1)%ngpus; if(mod_c == 0 && new_gpu_gid == gpu_last) blocks += 1; // dummy thread block, will return if mod_c == 0 dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, grid_y_t); if(blocks == 0) return 0; switch(irregular_cols) { /** * The kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ case 0: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 1: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 2: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 3: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 4: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 5: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 6: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 7: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 8: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 9: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 10: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 11: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 12: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 13: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 14: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; case 15: gemvt_mgpu_generic_offset<double, dgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break; default: printf("DGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else { printf("DGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } /***********************************************************************************/ extern "C" int kblas_dgemv_mgpu( char trans, int rows, int cols, double alpha, double **dA, int lda, double **dX, int incx, double beta, double **dY, int incy, int ngpus, int offset_r, int offset_c) { const int ngpus_local = ngpus; if(offset_r == 0 && offset_c == 0) { for(int i = 0; i < ngpus_local; i++) { cudaSetDevice(gpu_lid[i]); kblas_dgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus); } } else { for(int i = 0; i < ngpus_local; i++) { cudaSetDevice(gpu_lid[i]); kblas_dgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c); } } // wait for gpus to finish for(int i = 0; i < ngpus_local; i++) { cudaSetDevice(gpu_lid[i]); cudaDeviceSynchronize(); } return 0; } /*************************************************************************************/ extern "C" int kblas_dgemv_mgpu_async( char trans, int rows, int cols, double alpha, double **dA, int lda, double **dX, int incx, double beta, double **dY, int incy, int ngpus, int offset_r, int offset_c, cudaStream_t stream[MAX_NGPUS][MAX_STREAMS]) { const int ngpus_local = ngpus; if(offset_r == 0 && offset_c == 0) { for(int i = 0; i < ngpus_local; i++) { cudaSetDevice(gpu_lid[i]); kblas_dgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, stream[i][0]); } } else { for(int i = 0; i < ngpus_local; i++) { cudaSetDevice(gpu_lid[i]); kblas_dgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c); } } return 0; } /*************************************************************************************/ extern "C" int get_dgemv_mgpu_bs(char trans) { if(trans == 'n' || trans == 'N') return dgemvn_mgpu_bs; else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') return dgemvt_mgpu_bs; else {printf("Error .. input %c is not supported for gemv \n", trans); return -1;} }
d184f4a1ff6b3122037df8aed05b7eabbca01011.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <time.h> #include <math.h> #include <vector> __global__ void MonteKarloPi(int * niter, int *count, double* x, double* y) { for (int i = 0; i < *niter; ++i) { //get random points double z = (x[i]* x[i])+ (y[i]* y[i]); //check to see if point is in unit circle if (z <= 1) { ++*count; } } } int main() { const int niter = 10000; int i; int count = 0; double pi; int* dev_niter, *dev_count; int size = sizeof(int); double x[niter], *dev_x; double y[niter], *dev_y; const size_t x_size = sizeof(double) * size_t(niter); srand(time(NULL)); for (int i = 0; i < niter; i++) { x[i] = (double)rand() / RAND_MAX; y[i] = (double)rand() / RAND_MAX; } hipMalloc((void**)&dev_niter, size); hipMalloc((void**)&dev_count, size); hipMalloc((void**)&dev_x, x_size); hipMalloc((void**)&dev_y, x_size); hipMemcpy(dev_niter, &niter, size, hipMemcpyHostToDevice); hipMemcpy(dev_count, &count, size, hipMemcpyHostToDevice); hipMemcpy(dev_x, x, x_size, hipMemcpyHostToDevice); hipMemcpy(dev_y, y, x_size, hipMemcpyHostToDevice); MonteKarloPi << < 1, 1 >> > (dev_niter, dev_count, dev_x, dev_y); hipMemcpy( &count, dev_count, size, hipMemcpyDeviceToHost); pi = ((double)count / (double)niter) * 4.0; printf("Pi: %f\n", pi); return 0; }
d184f4a1ff6b3122037df8aed05b7eabbca01011.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand.h> #include <time.h> #include <math.h> #include <vector> __global__ void MonteKarloPi(int * niter, int *count, double* x, double* y) { for (int i = 0; i < *niter; ++i) { //get random points double z = (x[i]* x[i])+ (y[i]* y[i]); //check to see if point is in unit circle if (z <= 1) { ++*count; } } } int main() { const int niter = 10000; int i; int count = 0; double pi; int* dev_niter, *dev_count; int size = sizeof(int); double x[niter], *dev_x; double y[niter], *dev_y; const size_t x_size = sizeof(double) * size_t(niter); srand(time(NULL)); for (int i = 0; i < niter; i++) { x[i] = (double)rand() / RAND_MAX; y[i] = (double)rand() / RAND_MAX; } cudaMalloc((void**)&dev_niter, size); cudaMalloc((void**)&dev_count, size); cudaMalloc((void**)&dev_x, x_size); cudaMalloc((void**)&dev_y, x_size); cudaMemcpy(dev_niter, &niter, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_count, &count, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_x, x, x_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_y, y, x_size, cudaMemcpyHostToDevice); MonteKarloPi << < 1, 1 >> > (dev_niter, dev_count, dev_x, dev_y); cudaMemcpy( &count, dev_count, size, cudaMemcpyDeviceToHost); pi = ((double)count / (double)niter) * 4.0; printf("Pi: %f\n", pi); return 0; }
e105986e954140330121f82f6139060d41eec2fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> /** * Funo que realiza a soma de a + b e retorna um inteiro. * __device__ apenas o dispositivo pode chamar esta funo. */ __device__ int addem( int a, int b ) { return a + b; } /** * Funo que vai ser chamada do host para o dispositivo. * realiza a soma de a + b e guarda o resultado no ponteiro passado */ __global__ void add( int a, int b, int *c ) { *c = addem( a, b ); } int main(void){ /** variavel de resposta **/ int c; /** ponteiro a ser passado para as funes no dispositivo **/ int *dev_c; /** alocao de memria no dispositivo **/ hipMalloc((void**)&dev_c, sizeof(int)); /** chamada a funo add para ser executada no dispositivo **/ hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2,7,dev_c); /** cpia do conteudo do ponteiro dev_c para a variavel c **/ hipMemcpy(&c,dev_c, sizeof(int),hipMemcpyDeviceToHost); printf( "2 + 7 = %d\n", c ); /** liberao de memoria alocada no dispositivo **/ hipFree(dev_c); return 0; }
e105986e954140330121f82f6139060d41eec2fd.cu
#include <stdio.h> #include <stdlib.h> /** * Função que realiza a soma de a + b e retorna um inteiro. * __device__ apenas o dispositivo pode chamar esta função. */ __device__ int addem( int a, int b ) { return a + b; } /** * Função que vai ser chamada do host para o dispositivo. * realiza a soma de a + b e guarda o resultado no ponteiro passado */ __global__ void add( int a, int b, int *c ) { *c = addem( a, b ); } int main(void){ /** variavel de resposta **/ int c; /** ponteiro a ser passado para as funções no dispositivo **/ int *dev_c; /** alocação de memória no dispositivo **/ cudaMalloc((void**)&dev_c, sizeof(int)); /** chamada a função add para ser executada no dispositivo **/ add<<<1,1>>>(2,7,dev_c); /** cópia do conteudo do ponteiro dev_c para a variavel c **/ cudaMemcpy(&c,dev_c, sizeof(int),cudaMemcpyDeviceToHost); printf( "2 + 7 = %d\n", c ); /** liberação de memoria alocada no dispositivo **/ cudaFree(dev_c); return 0; }
521fb4be2f0c5a3a61a298b2bbcbcbc3f34d06be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialAdaptiveMaxPooling.cu" #else #include "../common.h" // 4d tensor B x D x H x W void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCIndexTensor *indices, int osizeW, int osizeH) { THCUNN_assertSameGPU(state, 3, input, output, indices); THCIndex_t *indices_data; scalar_t *output_data; scalar_t *input_data; THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s"); if (input->dim() == 3) { int64_t sizeD = input->size(0); int64_t isizeH = input->size(1); int64_t isizeW = input->size(2); int64_t istrideD = input->stride(0); int64_t istrideH = input->stride(1); int64_t istrideW = input->stride(2); input_data = THCTensor_(data)(state, input); THCTensor_(resize3d)(state, output, sizeD, osizeH, osizeW); THCIndexTensor_(resize3d)(state, indices, sizeD, osizeH, osizeW); indices_data = THCIndexTensor_(data)(state, indices); output_data = THCTensor_(data)(state, output); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel hipLaunchKernelGGL(( adaptivemaxpool) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); THCudaCheck(hipGetLastError()); } else { input = THCTensor_(newContiguous)(state, input); int64_t sizeB = input->size(0); int64_t sizeD = input->size(1); int64_t isizeH = input->size(2); int64_t isizeW = input->size(3); int64_t istrideD = input->stride(1); int64_t istrideH = input->stride(2); int64_t istrideW = input->stride(3); input_data = THCTensor_(data)(state, input); THCTensor_(resize4d)(state, output, sizeB, sizeD, osizeH, osizeW); THCIndexTensor_(resize4d)(state, indices, sizeB, sizeD, osizeH, osizeW); indices_data = THCIndexTensor_(data)(state, indices); output_data = THCTensor_(data)(state, output); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB*sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel hipLaunchKernelGGL(( adaptivemaxpool) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); THCudaCheck(hipGetLastError()); // clean THCTensor_(free)(state, input); } } void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCIndexTensor *indices) { bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput); THCIndex_t *indices_data; scalar_t *gradInput_data; scalar_t *gradOutput_data; gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (input->dim() == 3) { int64_t sizeD = input->size(0); int64_t isizeH = input->size(1); int64_t isizeW = input->size(2); int64_t osizeH = gradOutput->size(1); int64_t osizeW = gradOutput->size(2); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); indices_data = THCIndexTensor_(data)(state, indices); gradOutput_data = THCTensor_(data)(state, gradOutput); gradInput_data = THCTensor_(data)(state, gradInput); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } THCudaCheck(hipGetLastError()); } else { int64_t sizeB = input->size(0); int64_t sizeD = input->size(1); int64_t isizeH = input->size(2); int64_t isizeW = input->size(3); int64_t osizeH = gradOutput->size(2); int64_t osizeW = gradOutput->size(3); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); indices_data = THCIndexTensor_(data)(state, indices); gradOutput_data = THCTensor_(data)(state, gradOutput); gradInput_data = THCTensor_(data)(state, gradInput); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB*sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( adaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } THCudaCheck(hipGetLastError()); } // clean THCTensor_(free)(state,gradOutput); } #endif
521fb4be2f0c5a3a61a298b2bbcbcbc3f34d06be.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialAdaptiveMaxPooling.cu" #else #include "../common.h" // 4d tensor B x D x H x W void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCIndexTensor *indices, int osizeW, int osizeH) { THCUNN_assertSameGPU(state, 3, input, output, indices); THCIndex_t *indices_data; scalar_t *output_data; scalar_t *input_data; THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s"); if (input->dim() == 3) { int64_t sizeD = input->size(0); int64_t isizeH = input->size(1); int64_t isizeW = input->size(2); int64_t istrideD = input->stride(0); int64_t istrideH = input->stride(1); int64_t istrideW = input->stride(2); input_data = THCTensor_(data)(state, input); THCTensor_(resize3d)(state, output, sizeD, osizeH, osizeW); THCIndexTensor_(resize3d)(state, indices, sizeD, osizeH, osizeW); indices_data = THCIndexTensor_(data)(state, indices); output_data = THCTensor_(data)(state, output); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel adaptivemaxpool <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); THCudaCheck(cudaGetLastError()); } else { input = THCTensor_(newContiguous)(state, input); int64_t sizeB = input->size(0); int64_t sizeD = input->size(1); int64_t isizeH = input->size(2); int64_t isizeW = input->size(3); int64_t istrideD = input->stride(1); int64_t istrideH = input->stride(2); int64_t istrideW = input->stride(3); input_data = THCTensor_(data)(state, input); THCTensor_(resize4d)(state, output, sizeB, sizeD, osizeH, osizeW); THCIndexTensor_(resize4d)(state, indices, sizeB, sizeD, osizeH, osizeW); indices_data = THCIndexTensor_(data)(state, indices); output_data = THCTensor_(data)(state, output); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB*sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel adaptivemaxpool <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); THCudaCheck(cudaGetLastError()); // clean THCTensor_(free)(state, input); } } void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCIndexTensor *indices) { bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput); THCIndex_t *indices_data; scalar_t *gradInput_data; scalar_t *gradOutput_data; gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (input->dim() == 3) { int64_t sizeD = input->size(0); int64_t isizeH = input->size(1); int64_t isizeW = input->size(2); int64_t osizeH = gradOutput->size(1); int64_t osizeW = gradOutput->size(2); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); indices_data = THCIndexTensor_(data)(state, indices); gradOutput_data = THCTensor_(data)(state, gradOutput); gradInput_data = THCTensor_(data)(state, gradInput); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptivemaxgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel atomicadaptivemaxgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } THCudaCheck(cudaGetLastError()); } else { int64_t sizeB = input->size(0); int64_t sizeD = input->size(1); int64_t isizeH = input->size(2); int64_t isizeW = input->size(3); int64_t osizeH = gradOutput->size(2); int64_t osizeW = gradOutput->size(3); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); indices_data = THCIndexTensor_(data)(state, indices); gradOutput_data = THCTensor_(data)(state, gradOutput); gradInput_data = THCTensor_(data)(state, gradInput); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB*sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptivemaxgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel, accumulate gradients atomically adaptivemaxgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } THCudaCheck(cudaGetLastError()); } // clean THCTensor_(free)(state,gradOutput); } #endif
e7e9d54f4fbb29a592e3ef3fa2617c922152855b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ __global__ void Copy( TColor *dst, int imageW, int imageH ){ const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; //Add half of a texel to always address exact texel centers const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; if(ix < imageW && iy < imageH){ float4 fresult = tex2D(texImage, x, y); dst[imageW * iy + ix] = make_color(fresult.x, fresult.y, fresult.z, 0); } } extern "C" void cuda_Copy(TColor *d_dst, int imageW, int imageH) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); hipLaunchKernelGGL(( Copy), dim3(grid), dim3(threads), 0, 0, d_dst, imageW, imageH); }
e7e9d54f4fbb29a592e3ef3fa2617c922152855b.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ __global__ void Copy( TColor *dst, int imageW, int imageH ){ const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; //Add half of a texel to always address exact texel centers const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; if(ix < imageW && iy < imageH){ float4 fresult = tex2D(texImage, x, y); dst[imageW * iy + ix] = make_color(fresult.x, fresult.y, fresult.z, 0); } } extern "C" void cuda_Copy(TColor *d_dst, int imageW, int imageH) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); Copy<<<grid, threads>>>(d_dst, imageW, imageH); }
f775b5bf3ed417950d68ae1155bb9055a0d35d3c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <cutil.h> #include "patusrt.h" // forward_decls --> __global__ void initialize(double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max); __global__ void upstream_5_3d(double * * u_0_1_out, double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max); // <-- int main (int argc, char** argv) { int i; hipError_t res; // prepare grids // declare_grids --> double * u_0_1_out; double * u_0_0; double * u_0_1; if ((argc!=4)) { printf("Wrong number of parameters. Syntax:\n%s <x_max> <y_max> <z_max>\n", argv[0]); exit(-1); } int x_max = atoi(argv[1]); int y_max = atoi(argv[2]); int z_max = atoi(argv[3]); // <-- // allocate_grids --> u_0_0=((double * )malloc(((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double)))); u_0_1=((double * )malloc(((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double)))); // <-- // declare_GPU_grids --> double * u_0_1_out_gpu; double * u_0_0_gpu; double * u_0_1_gpu; dim3 thds(1, 1, 1); dim3 blks(x_max, (y_max*z_max), 1); // <-- // allocate_GPU_grids --> hipMalloc(((void * * )( & u_0_1_gpu)), ((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double))); hipMalloc(((void * * )( & u_0_0_gpu)), ((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double))); hipMalloc(((void * * )( & u_0_1_out_gpu)), ((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double * ))); // <-- // copy_grids_to_GPU --> hipMemcpy(((void * )u_0_1_gpu), ((void * )u_0_1), ((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double)), hipMemcpyHostToDevice); hipMemcpy(((void * )u_0_0_gpu), ((void * )u_0_0), ((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double)), hipMemcpyHostToDevice); // <-- // initialize_grids --> hipLaunchKernelGGL(( initialize), dim3(blks), dim3(thds), 0, 0, u_0_0_gpu, u_0_1_gpu, 0.1, x_max, y_max, z_max); // <-- hipDeviceSynchronize (); res = hipGetLastError (); if (res != hipSuccess) { printf ("CUDA Error [Initialization]: %s.\n", hipGetErrorString (res)); } long nFlopsPerStencil = 22; long nGridPointsCount = 5 * ((x_max*y_max)*z_max); long nBytesTransferred = 5 * (((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double))+(((x_max*y_max)*z_max)*sizeof (double))); // warm up // compute_stencil --> hipLaunchKernelGGL(( upstream_5_3d), dim3(blks), dim3(thds), 0, 0, ( & u_0_1_out_gpu), u_0_0_gpu, u_0_1_gpu, 0.2, x_max, y_max, z_max); // <-- hipDeviceSynchronize (); res = hipGetLastError (); if (res != hipSuccess) { printf ("CUDA Error [Stencil]: %s.\n", hipGetErrorString (res)); } // run the benchmark tic (); for (i = 0; i < 5; i++) { // compute_stencil --> hipLaunchKernelGGL(( upstream_5_3d), dim3(blks), dim3(thds), 0, 0, ( & u_0_1_out_gpu), u_0_0_gpu, u_0_1_gpu, 0.30000000000000004, x_max, y_max, z_max); // <-- hipDeviceSynchronize (); } toc (nFlopsPerStencil, nGridPointsCount, nBytesTransferred); // free memory // deallocate_grids --> hipFree(((void * )u_0_1_gpu)); hipFree(((void * )u_0_0_gpu)); hipFree(((void * )u_0_1_out_gpu)); free(u_0_0); free(u_0_1); // <-- hipDeviceReset (); return EXIT_SUCCESS; }
f775b5bf3ed417950d68ae1155bb9055a0d35d3c.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cutil.h> #include "patusrt.h" // forward_decls --> __global__ void initialize(double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max); __global__ void upstream_5_3d(double * * u_0_1_out, double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max); // <-- int main (int argc, char** argv) { int i; cudaError_t res; // prepare grids // declare_grids --> double * u_0_1_out; double * u_0_0; double * u_0_1; if ((argc!=4)) { printf("Wrong number of parameters. Syntax:\n%s <x_max> <y_max> <z_max>\n", argv[0]); exit(-1); } int x_max = atoi(argv[1]); int y_max = atoi(argv[2]); int z_max = atoi(argv[3]); // <-- // allocate_grids --> u_0_0=((double * )malloc(((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double)))); u_0_1=((double * )malloc(((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double)))); // <-- // declare_GPU_grids --> double * u_0_1_out_gpu; double * u_0_0_gpu; double * u_0_1_gpu; dim3 thds(1, 1, 1); dim3 blks(x_max, (y_max*z_max), 1); // <-- // allocate_GPU_grids --> cudaMalloc(((void * * )( & u_0_1_gpu)), ((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double))); cudaMalloc(((void * * )( & u_0_0_gpu)), ((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double))); cudaMalloc(((void * * )( & u_0_1_out_gpu)), ((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double * ))); // <-- // copy_grids_to_GPU --> cudaMemcpy(((void * )u_0_1_gpu), ((void * )u_0_1), ((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double)), cudaMemcpyHostToDevice); cudaMemcpy(((void * )u_0_0_gpu), ((void * )u_0_0), ((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double)), cudaMemcpyHostToDevice); // <-- // initialize_grids --> initialize<<<blks, thds>>>(u_0_0_gpu, u_0_1_gpu, 0.1, x_max, y_max, z_max); // <-- cudaThreadSynchronize (); res = cudaGetLastError (); if (res != cudaSuccess) { printf ("CUDA Error [Initialization]: %s.\n", cudaGetErrorString (res)); } long nFlopsPerStencil = 22; long nGridPointsCount = 5 * ((x_max*y_max)*z_max); long nBytesTransferred = 5 * (((((x_max+10)*(y_max+10))*(z_max+10))*sizeof (double))+(((x_max*y_max)*z_max)*sizeof (double))); // warm up // compute_stencil --> upstream_5_3d<<<blks, thds>>>(( & u_0_1_out_gpu), u_0_0_gpu, u_0_1_gpu, 0.2, x_max, y_max, z_max); // <-- cudaThreadSynchronize (); res = cudaGetLastError (); if (res != cudaSuccess) { printf ("CUDA Error [Stencil]: %s.\n", cudaGetErrorString (res)); } // run the benchmark tic (); for (i = 0; i < 5; i++) { // compute_stencil --> upstream_5_3d<<<blks, thds>>>(( & u_0_1_out_gpu), u_0_0_gpu, u_0_1_gpu, 0.30000000000000004, x_max, y_max, z_max); // <-- cudaThreadSynchronize (); } toc (nFlopsPerStencil, nGridPointsCount, nBytesTransferred); // free memory // deallocate_grids --> cudaFree(((void * )u_0_1_gpu)); cudaFree(((void * )u_0_0_gpu)); cudaFree(((void * )u_0_1_out_gpu)); free(u_0_0); free(u_0_1); // <-- cudaThreadExit (); return EXIT_SUCCESS; }
ef04d897d7ddb2e752b84476592b814c54525d1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/hip/SortStable.h> #include <ATen/Dispatch.h> #include <ATen/core/Array.h> #include <ATen/core/TensorBase.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/cub.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/SortUtils.cuh> #include <ATen/native/hip/SortingCommon.cuh> #include <c10/core/DeviceArray.h> #include <limits> namespace at::native { namespace { struct offset_t { int stride; int begin; __device__ int operator[](int i) { return stride * (begin + i); } }; // Segmented sort by full sort algorithm:. // Say we are sorting a (2, 3) tensor. We have in flattened form: // values 0.4 1.2 5.3 6.2 1.3 2.3 // indices 0 1 2 0 1 2 // segment_id 0 0 0 1 1 1 // First we sort by values, globally: // values 6.2 5.3 2.3 1.2 1.3 0.4 // indices 0 2 2 1 1 0 // segment_id 1 0 1 0 1 0 // Then we stable sort by segment id: // values 5.3 1.2 0.4 6.2 2.3 1.3 // indices 2 1 0 0 2 1 // segment_id 0 0 0 1 1 1 // This method can only work if the slice we are sorting (`dim`) is // innermost, and both values and indices are contiguous. We do this // by re-arranging the input into this form as needed, which will // unfortunately allocate memory if the request is not in this form. // Vectorized sort is slower than iterated sort if the number of // slices is small (since we're sorting twice, instead of invoking a // smaller sort `numSlices` times), but the cub sort // implementation here is a catch-all, so we're not looking for // efficiency, but instead correctness. template <typename scalar_t> __global__ void sort_postprocess_kernel( const scalar_t* in, scalar_t* out, int64_t* index, const int2* i_s_ptr, int nsegments, int nsort) { CUDA_KERNEL_LOOP(i, nsegments * nsort) { int segment = i / nsort; int j = i % nsort; int offset = segment * nsort; const scalar_t* in_ = in + offset; scalar_t* out_ = out + offset; int64_t* index_ = index + offset; const int2* i_s_ptr_ = i_s_ptr + offset; int idx = i_s_ptr_[j].y; index_[j] = idx; out_[j] = in_[idx]; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_index_and_segment_kernel( int2* data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { auto div_mod = nsort_divider.divmod(idx); auto segment = static_cast<int>(div_mod.div); auto sort = static_cast<int>(div_mod.mod); data[idx] = int2{segment, sort}; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_reverse_indices_kernel( int64_t* data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { data[idx] = nsort_divider.mod(idx); } } template <typename scalar_t> inline void segmented_sort_large_segments( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t* self_ptr, scalar_t* values_ptr, int64_t* indices_ptr) { using namespace at::cuda::detail; auto allocator = at::cuda::getCUDADeviceAllocator(); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(nsort); c10::DeviceArray<int64_t> indices(*allocator, nsort); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); hipLaunchKernelGGL(( fill_reverse_indices_kernel), dim3(grid), dim3(block), 0, stream, indices.get(), nsort, nsort_divider); const int64_t* initial_indices = indices.get(); for (auto i : c10::irange(nsegments)) { at::cuda::cub::radix_sort_pairs<scalar_t, int64_t>( self_ptr, values_ptr, initial_indices, indices_ptr, nsort, descending); indices_ptr += nsort; self_ptr += nsort; values_ptr += nsort; } } template <typename scalar_t> inline void segmented_sort_pairs_by_full_sort( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t* const self_ptr, scalar_t* const values_ptr, int64_t* const indices_ptr) { int64_t segment_bits = std::max<int64_t>( 1L, static_cast<int64_t>(::ceil(std::log2(nsegments)))); const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto indices_and_segment = cuda_allocator->allocate(numel * sizeof(int2)); auto i_s_ptr = static_cast<int2*>(indices_and_segment.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); hipLaunchKernelGGL(( fill_index_and_segment_kernel), dim3(grid), dim3(block), 0, stream, i_s_ptr, numel, nsort_divider); auto indices_and_segment2 = cuda_allocator->allocate(nsegments * nsort * sizeof(int2)); auto i_s_ptr2 = static_cast<int2*>(indices_and_segment2.get()); at::cuda::cub::radix_sort_pairs<scalar_t, int2>( self_ptr, nullptr, i_s_ptr, i_s_ptr2, n, descending); TORCH_INTERNAL_ASSERT(segment_bits <= 32); // sort on lower 32bits, i.e. segment index at::cuda::cub::radix_sort_keys<int64_t>( reinterpret_cast<int64_t*>(i_s_ptr2), reinterpret_cast<int64_t*>(i_s_ptr), n, false, 0, segment_bits); hipLaunchKernelGGL(( sort_postprocess_kernel), dim3((n + 511) / 512), dim3(512), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_ptr, values_ptr, indices_ptr, i_s_ptr, nsegments, nsort); } template <typename scalar_t> void segmented_sort_pairs( int64_t nsegments, int64_t nsort, int64_t n, bool descending, const scalar_t* self_ptr, scalar_t* values_ptr, int64_t* indices_ptr) { const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto reverse_indices = cuda_allocator->allocate(numel * sizeof(int64_t)); int64_t* reverse_indices_ptr = static_cast<int64_t*>(reverse_indices.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); hipLaunchKernelGGL(( fill_reverse_indices_kernel), dim3(grid), dim3(block), 0, stream, reverse_indices_ptr, numel, nsort_divider); at::cuda::cub::segmented_sort_pairs( self_ptr, values_ptr, reverse_indices_ptr, indices_ptr, n, nsegments, offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending); } } // namespace void launch_stable_sort_kernel( const TensorBase& self, int64_t dim, bool descending, const TensorBase& values, const TensorBase& indices) { const auto numel = self.numel(); if (numel == 0) { return; } int64_t numel_or_intmax = ::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max())); int64_t nsort = self.size(dim); int64_t nbatch = (numel_or_intmax / nsort) * nsort; TORCH_CHECK(nbatch > 0, "Cannot sort dimension of length ", nsort); int64_t* indices_ptr = indices.data_ptr<int64_t>(); #if (defined(USE_ROCM) && ROCM_VERSION < 40500) constexpr bool is_rocm_bf16_sort_unsupported = true; #else constexpr bool is_rocm_bf16_sort_unsupported = false; #endif AT_DISPATCH_ALL_TYPES_AND3( kBool, kHalf, kBFloat16, self.scalar_type(), "sort", [&] { c10::guts::if_constexpr<!( is_rocm_bf16_sort_unsupported && std::is_same<scalar_t, c10::BFloat16>::value)>( [&](auto _) { const scalar_t* self_ptr = self.data_ptr<scalar_t>(); scalar_t* values_ptr = values.data_ptr<scalar_t>(); int64_t remaining = _(numel); while (remaining > 0) { int64_t n = ::min(remaining, nbatch); int64_t nsegments = n / nsort; if (nsegments == 1 || nsort >= 1000000) { // rough heuristics where even a single // sort occupies GPU segmented_sort_large_segments( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else if (nsegments < 128) { segmented_sort_pairs_by_full_sort( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else { segmented_sort_pairs( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } remaining -= n; self_ptr += n; values_ptr += n; indices_ptr += n; } }, [&](auto _) { TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm < 4.5"); }); }); } } // namespace at::native
ef04d897d7ddb2e752b84476592b814c54525d1e.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/cuda/SortStable.h> #include <ATen/Dispatch.h> #include <ATen/core/Array.h> #include <ATen/core/TensorBase.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/cub.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/SortUtils.cuh> #include <ATen/native/cuda/SortingCommon.cuh> #include <c10/core/DeviceArray.h> #include <limits> namespace at::native { namespace { struct offset_t { int stride; int begin; __device__ int operator[](int i) { return stride * (begin + i); } }; // Segmented sort by full sort algorithm:. // Say we are sorting a (2, 3) tensor. We have in flattened form: // values 0.4 1.2 5.3 6.2 1.3 2.3 // indices 0 1 2 0 1 2 // segment_id 0 0 0 1 1 1 // First we sort by values, globally: // values 6.2 5.3 2.3 1.2 1.3 0.4 // indices 0 2 2 1 1 0 // segment_id 1 0 1 0 1 0 // Then we stable sort by segment id: // values 5.3 1.2 0.4 6.2 2.3 1.3 // indices 2 1 0 0 2 1 // segment_id 0 0 0 1 1 1 // This method can only work if the slice we are sorting (`dim`) is // innermost, and both values and indices are contiguous. We do this // by re-arranging the input into this form as needed, which will // unfortunately allocate memory if the request is not in this form. // Vectorized sort is slower than iterated sort if the number of // slices is small (since we're sorting twice, instead of invoking a // smaller sort `numSlices` times), but the cub sort // implementation here is a catch-all, so we're not looking for // efficiency, but instead correctness. template <typename scalar_t> __global__ void sort_postprocess_kernel( const scalar_t* in, scalar_t* out, int64_t* index, const int2* i_s_ptr, int nsegments, int nsort) { CUDA_KERNEL_LOOP(i, nsegments * nsort) { int segment = i / nsort; int j = i % nsort; int offset = segment * nsort; const scalar_t* in_ = in + offset; scalar_t* out_ = out + offset; int64_t* index_ = index + offset; const int2* i_s_ptr_ = i_s_ptr + offset; int idx = i_s_ptr_[j].y; index_[j] = idx; out_[j] = in_[idx]; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_index_and_segment_kernel( int2* data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { auto div_mod = nsort_divider.divmod(idx); auto segment = static_cast<int>(div_mod.div); auto sort = static_cast<int>(div_mod.mod); data[idx] = int2{segment, sort}; } } C10_LAUNCH_BOUNDS_1(at::cuda::detail::CUDA_NUM_THREADS) __global__ void fill_reverse_indices_kernel( int64_t* data, int numel, at::cuda::detail::IntDivider<uint32_t> nsort_divider) { CUDA_KERNEL_LOOP(idx, numel) { data[idx] = nsort_divider.mod(idx); } } template <typename scalar_t> inline void segmented_sort_large_segments( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t* self_ptr, scalar_t* values_ptr, int64_t* indices_ptr) { using namespace at::cuda::detail; auto allocator = at::cuda::getCUDADeviceAllocator(); auto stream = at::cuda::getCurrentCUDAStream(); dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(nsort); c10::DeviceArray<int64_t> indices(*allocator, nsort); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); fill_reverse_indices_kernel<<<grid, block, 0, stream>>>( indices.get(), nsort, nsort_divider); const int64_t* initial_indices = indices.get(); for (auto i : c10::irange(nsegments)) { at::cuda::cub::radix_sort_pairs<scalar_t, int64_t>( self_ptr, values_ptr, initial_indices, indices_ptr, nsort, descending); indices_ptr += nsort; self_ptr += nsort; values_ptr += nsort; } } template <typename scalar_t> inline void segmented_sort_pairs_by_full_sort( const int64_t nsegments, const int64_t nsort, const int64_t n, const bool descending, const scalar_t* const self_ptr, scalar_t* const values_ptr, int64_t* const indices_ptr) { int64_t segment_bits = std::max<int64_t>( 1L, static_cast<int64_t>(std::ceil(std::log2(nsegments)))); const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto indices_and_segment = cuda_allocator->allocate(numel * sizeof(int2)); auto i_s_ptr = static_cast<int2*>(indices_and_segment.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::cuda::getCurrentCUDAStream(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); fill_index_and_segment_kernel<<<grid, block, 0, stream>>>( i_s_ptr, numel, nsort_divider); auto indices_and_segment2 = cuda_allocator->allocate(nsegments * nsort * sizeof(int2)); auto i_s_ptr2 = static_cast<int2*>(indices_and_segment2.get()); at::cuda::cub::radix_sort_pairs<scalar_t, int2>( self_ptr, nullptr, i_s_ptr, i_s_ptr2, n, descending); TORCH_INTERNAL_ASSERT(segment_bits <= 32); // sort on lower 32bits, i.e. segment index at::cuda::cub::radix_sort_keys<int64_t>( reinterpret_cast<int64_t*>(i_s_ptr2), reinterpret_cast<int64_t*>(i_s_ptr), n, false, 0, segment_bits); sort_postprocess_kernel<<< (n + 511) / 512, 512, 0, at::cuda::getCurrentCUDAStream()>>>( self_ptr, values_ptr, indices_ptr, i_s_ptr, nsegments, nsort); } template <typename scalar_t> void segmented_sort_pairs( int64_t nsegments, int64_t nsort, int64_t n, bool descending, const scalar_t* self_ptr, scalar_t* values_ptr, int64_t* indices_ptr) { const auto numel = nsort * nsegments; auto cuda_allocator = at::cuda::getCUDADeviceAllocator(); auto reverse_indices = cuda_allocator->allocate(numel * sizeof(int64_t)); int64_t* reverse_indices_ptr = static_cast<int64_t*>(reverse_indices.get()); using namespace at::cuda::detail; dim3 block = CUDA_NUM_THREADS; dim3 grid = GET_BLOCKS(numel); auto stream = c10::cuda::getCurrentCUDAStream(); at::cuda::detail::IntDivider<uint32_t> nsort_divider(nsort); fill_reverse_indices_kernel<<<grid, block, 0, stream>>>( reverse_indices_ptr, numel, nsort_divider); at::cuda::cub::segmented_sort_pairs( self_ptr, values_ptr, reverse_indices_ptr, indices_ptr, n, nsegments, offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending); } } // namespace void launch_stable_sort_kernel( const TensorBase& self, int64_t dim, bool descending, const TensorBase& values, const TensorBase& indices) { const auto numel = self.numel(); if (numel == 0) { return; } int64_t numel_or_intmax = std::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max())); int64_t nsort = self.size(dim); int64_t nbatch = (numel_or_intmax / nsort) * nsort; TORCH_CHECK(nbatch > 0, "Cannot sort dimension of length ", nsort); int64_t* indices_ptr = indices.data_ptr<int64_t>(); #if (defined(USE_ROCM) && ROCM_VERSION < 40500) constexpr bool is_rocm_bf16_sort_unsupported = true; #else constexpr bool is_rocm_bf16_sort_unsupported = false; #endif AT_DISPATCH_ALL_TYPES_AND3( kBool, kHalf, kBFloat16, self.scalar_type(), "sort", [&] { c10::guts::if_constexpr<!( is_rocm_bf16_sort_unsupported && std::is_same<scalar_t, c10::BFloat16>::value)>( [&](auto _) { const scalar_t* self_ptr = self.data_ptr<scalar_t>(); scalar_t* values_ptr = values.data_ptr<scalar_t>(); int64_t remaining = _(numel); while (remaining > 0) { int64_t n = std::min(remaining, nbatch); int64_t nsegments = n / nsort; if (nsegments == 1 || nsort >= 1000000) { // rough heuristics where even a single // sort occupies GPU segmented_sort_large_segments( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else if (nsegments < 128) { segmented_sort_pairs_by_full_sort( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } else { segmented_sort_pairs( nsegments, nsort, n, descending, self_ptr, values_ptr, indices_ptr); } remaining -= n; self_ptr += n; values_ptr += n; indices_ptr += n; } }, [&](auto _) { TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm < 4.5"); }); }); } } // namespace at::native
22de5d9794827cb897c8bce6dd51ca8f2a4ab6f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void abs_kerneld(double *v, int n) { int x(threadIdx.x + blockDim.x * blockIdx.x); if (x >= n) return; v[x] = ::abs(v[x]); }
22de5d9794827cb897c8bce6dd51ca8f2a4ab6f5.cu
#include "includes.h" __global__ void abs_kerneld(double *v, int n) { int x(threadIdx.x + blockDim.x * blockIdx.x); if (x >= n) return; v[x] = ::abs(v[x]); }
f0ff234c8116ab55afa13e50639354ec60365654.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. //int r = blockIdx.x; //int c = threadIdx.x; int r = blockIdx.x*blockDim.x + threadIdx.x; int c = blockIdx.y*blockDim.y + threadIdx.y; if(r>=numRows || c>=numCols) return; float result = 0.f; //For every value in the filter around the pixel (c, r) for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) { for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image int image_r = min(max(r + filter_r, 0), (numRows - 1)); int image_c = min(max(c + filter_c, 0), (numCols - 1)); float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]); float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2]; result += image_value * filter_value; } } outputChannel[r*numCols + c] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } //int r = blockIdx.x; //int c = threadIdx.x; int r = blockIdx.x*blockDim.x + threadIdx.x; int c = blockIdx.y*blockDim.y + threadIdx.y; if(r>=numRows || c>=numCols) return; int index = r*numCols + c; redChannel[index] = inputImageRGBA[index].x; greenChannel[index] = inputImageRGBA[index].y; blueChannel[index] = inputImageRGBA[index].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { //const int2 thread_2D_pos = make_int2(blockIdx.x, threadIdx.x); const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); //const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; const int thread_1D_pos = thread_2D_pos.x*numCols + thread_2D_pos.y; //make sure we don't try and access memory outside the image //by having any threads mapped there return early //if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) // return; if (thread_2D_pos.x >= numRows || thread_2D_pos.y >= numCols) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) //const dim3 blockSize(numCols, 1, 1); const dim3 blockSize(32, 32, 1); //const dim3 blockSize(32, 1, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. //const dim3 gridSize(numRows, 1, 1); const dim3 gridSize((numRows+31)/32, (numCols+31)/32, 1); //const dim3 gridSize((numRows+31)/32, numCols, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
f0ff234c8116ab55afa13e50639354ec60365654.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. //int r = blockIdx.x; //int c = threadIdx.x; int r = blockIdx.x*blockDim.x + threadIdx.x; int c = blockIdx.y*blockDim.y + threadIdx.y; if(r>=numRows || c>=numCols) return; float result = 0.f; //For every value in the filter around the pixel (c, r) for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) { for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image int image_r = min(max(r + filter_r, 0), (numRows - 1)); int image_c = min(max(c + filter_c, 0), (numCols - 1)); float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]); float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2]; result += image_value * filter_value; } } outputChannel[r*numCols + c] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } //int r = blockIdx.x; //int c = threadIdx.x; int r = blockIdx.x*blockDim.x + threadIdx.x; int c = blockIdx.y*blockDim.y + threadIdx.y; if(r>=numRows || c>=numCols) return; int index = r*numCols + c; redChannel[index] = inputImageRGBA[index].x; greenChannel[index] = inputImageRGBA[index].y; blueChannel[index] = inputImageRGBA[index].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { //const int2 thread_2D_pos = make_int2(blockIdx.x, threadIdx.x); const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); //const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; const int thread_1D_pos = thread_2D_pos.x*numCols + thread_2D_pos.y; //make sure we don't try and access memory outside the image //by having any threads mapped there return early //if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) // return; if (thread_2D_pos.x >= numRows || thread_2D_pos.y >= numCols) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) //const dim3 blockSize(numCols, 1, 1); const dim3 blockSize(32, 32, 1); //const dim3 blockSize(32, 1, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. //const dim3 gridSize(numRows, 1, 1); const dim3 gridSize((numRows+31)/32, (numCols+31)/32, 1); //const dim3 gridSize((numRows+31)/32, numCols, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
4890cbbbe568cadef32cb44dd1601807af5dfc87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_allocator.h" #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/atomic/common.cuh" #include "core/providers/cuda/reduction/reduction_utils.cuh" #include "orttraining/training_ops/cuda/math/isfinite.cuh" #include "orttraining/training_ops/cuda/optimizer/common.h" #include "orttraining/training_ops/cuda/optimizer/common.cuh" #include "orttraining/training_ops/cuda/optimizer/lamb.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2, typename T3> __device__ __forceinline__ void _LambComputeDirectionRule( const T1& g_scale, const T1& w, const T2& g, const T3& m1, const T3& m2, const float& alpha, const float& beta, const float& lambda, const float& epsilon, const float& alpha_correction, const float& beta_correction, T2& d, T3& m1_new, T3& m2_new) { // Actual gradient. The scale is a product of loss' scale and // global gradient norm (if the norm > 1). const T1 g_unscaled = T1(g) / g_scale; // A constant in Lamb's equation. const T1 one = T1(1.0f); // Update exponentially-averaged historical gradient const T1 m1_new_tmp = alpha * static_cast<T1>(m1) + (one - alpha) * g_unscaled; // Update exponentially-averaged historical squared gradient const T1 m2_new_tmp = beta * static_cast<T1>(m2) + (one - beta) * g_unscaled * g_unscaled; // Compute unbiased 1st-order momentom. // The value alpha_correction is usually (1-alpha^t), // where t is the number of executed training iterations. const T1 m1_new_tmp_corrected = m1_new_tmp / alpha_correction; // Compute unbiased 2nd-order momentom. // The value beta_correction is usually (1-beta^t), // where t is the number of executed training iterations. const T1 m2_new_tmp_corrected = m2_new_tmp / beta_correction; // Save regularized update direction to output. const T1 d_tmp = lambda * w + m1_new_tmp_corrected / (_Sqrt(m2_new_tmp_corrected) + epsilon); // Things are updated only if the direction is finite. if (_IsFiniteScalar(d_tmp)) { d = d_tmp; m1_new = m1_new_tmp; m2_new = m2_new_tmp; } else { d = T2(0); m1_new = m1; m2_new = m2; } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void _LambComputeDirectionImpl( const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* g_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); _LambComputeDirectionRule( scale, weights[id], grads[id], moment_1[id], moment_2[id], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, update_direction[id], moment_1_out[id], moment_2_out[id]); } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambComputeDirection( hipStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _LambComputeDirectionImpl<T1, T2, T3, T_GRAD_NORM>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, max_norm, alpha_correction, beta_correction, update_direction, moment_1_out, moment_2_out, N); } #define SPECIALIZED_LAMB_COMPUTE_DIRECTION(T1, T2, T3, T_GRAD_NORM) \ template void LambComputeDirection( \ hipStream_t stream, \ const T1* weights, \ const T2* grads, \ const T3* moment_1, \ const T3* moment_2, \ const T1* loss_scale, \ const T_GRAD_NORM* grad_norm, \ float alpha, \ float beta, \ float lambda, \ float epsilon, \ float max_norm, \ float alpha_correction, \ float beta_correction, \ T2* weights_out, \ T3* moment_1_out, \ T3* moment_2_out, \ size_t count); SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, float, float, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(double, double, double, double) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __device__ __forceinline__ void _LambUpdateRule( const T1 eta, const float ratio_min, const float ratio_max, const T2 r_norm, const T2 w_norm, const T2 w, const T3 d, T2* w_new, T3* g_new, T_MIXED_PRECISION_FP* w_mixed_precision_new) { // Confidence coefficeint of this update. const T2 ratio = (w_norm != T2(0.0f) && r_norm != T2(0.0f)) ? T2(eta) * _Max(T2(ratio_min), _Min(T2(ratio_max), _Sqrt(w_norm / r_norm))) : T2(eta); // Compute delta using the saved update direction. const T2 delta = -ratio * T2(d); const T2 w_new_tmp = w + delta; if (_IsFiniteScalar(w_new_tmp)) { if (g_new) { *g_new = T3(delta); } if (w_new) { *w_new = w_new_tmp; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w_new_tmp); } } } else { if (g_new) { *g_new = T3(0); } if (w_new) { *w_new = w; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w); } } } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void _LambUpdateImpl( const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, weights[id], update_direction[id], weights_out != nullptr ? weights_out + id : nullptr, gradients_out != nullptr ? gradients_out + id : nullptr, mixed_precision_weights_out != nullptr ? mixed_precision_weights_out + id : nullptr); } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambUpdate( hipStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _LambUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, eta, ratio_min, ratio_max, r_norm, w_norm, weights, update_direction, weights_out, gradients_out, mixed_precision_weights_out, N); } #define INSTANTIATE_LAMB_UPDATE(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambUpdate( \ hipStream_t stream, \ const T1* eta, \ const float ratio_min, \ const float ratio_max, \ const T2* r_norm, \ const T2* w_norm, \ const T2* weights, \ const T3* update_direction, \ T2* weights_out, \ T3* gradients_out, \ T_MIXED_PRECISION_FP* mixed_precision_weights_out, \ size_t count); INSTANTIATE_LAMB_UPDATE(float, float, float, half) INSTANTIATE_LAMB_UPDATE(double, double, double, half) INSTANTIATE_LAMB_UPDATE(half, float, half, half) INSTANTIATE_LAMB_UPDATE(float, float, half, half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_UPDATE(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(float, float, nv_bfloat16, nv_bfloat16) #endif template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void LambMultiTensorComputeDirectionImpl( ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T1* w = reinterpret_cast<const T1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; T2* g = reinterpret_cast<T2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; const T3* m1 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* m2 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T3* m1_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start; T3* m2_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start; const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); #pragma unroll for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambComputeDirectionRule( scale, w[i], g[i], m1[i], m2[i], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, g[i], m1_new[i], m2_new[i]); } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( hipStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int thread_count = ChunkGroup<6>::thread_count_per_block; const int block_count = chunk_group.chunk_count; hipLaunchKernelGGL(( LambMultiTensorComputeDirectionImpl<T1, T2, T3>), dim3(block_count), dim3(thread_count), 0, stream, chunk_group, loss_scale, g_norm, lambda, alpha, beta, epsilon, max_norm, alpha_correction, beta_correction); } #define INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(T1, T2, T3, T_GRAD_NORM) \ template void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( \ hipStream_t stream, \ ChunkGroup<6> chunk_group, \ const T1* loss_scale, \ const T_GRAD_NORM* g_norm, \ const float lambda, \ const float alpha, \ const float beta, \ const float epsilon, \ const float max_norm, \ const float alpha_correction, \ const float beta_correction); INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, float, float, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(double, double, double, double) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void LambMultiTensorUpdateImpl( ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T2* w_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[0][group_index]); const T2* r_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[1][group_index]); const T2* w = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* d = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T2* w_new = chunk_group.tensor_ptrs[4][group_index] != nullptr ? reinterpret_cast<T2*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start : nullptr; T3* g_new = chunk_group.tensor_ptrs[5][group_index] != nullptr ? reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start : nullptr; T_MIXED_PRECISION_FP* w_mixed_precision_new = chunk_group.tensor_ptrs[6][group_index] != nullptr ? reinterpret_cast<T_MIXED_PRECISION_FP*>(chunk_group.tensor_ptrs[6][group_index]) + chunk_start : nullptr; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, w[i], d[i], w_new != nullptr ? w_new + i : nullptr, g_new != nullptr ? g_new + i : nullptr, w_mixed_precision_new != nullptr ? w_mixed_precision_new + i : nullptr); } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( hipStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int thread_count = ChunkGroup<7>::thread_count_per_block; const int block_count = chunk_group.chunk_count; hipLaunchKernelGGL(( LambMultiTensorUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP>), dim3(block_count), dim3(thread_count), 0, stream, chunk_group, eta, ratio_min, ratio_max); } #define INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( \ hipStream_t stream, \ ChunkGroup<7> chunk_group, \ const T1* eta, \ const float ratio_min, \ const float ratio_max); INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(half, float, half, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, half, half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, nv_bfloat16, nv_bfloat16) #endif // w_buffer[i], d_buffer[i] is used to store the squared sum of all elements processed by the i-th block. // sync_range_and_lock is used for a well ordered reduction over blocks spanning the same tensor template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> __launch_bounds__(ChunkGroup<4>::thread_count_per_block) __global__ void LambMultiTensorReductionImpl( ChunkGroup<4> chunk_group, TOut1* w_buffer, TOut2* d_buffer, LambMultiTensorSyncRangeAndLock* sync_range_and_lock) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const TIn1* w = reinterpret_cast<const TIn1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; const TIn2* d = reinterpret_cast<const TIn2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; TOut1* w_norm = reinterpret_cast<TOut1*>(chunk_group.tensor_ptrs[2][group_index]); TOut2* d_norm = reinterpret_cast<TOut2*>(chunk_group.tensor_ptrs[3][group_index]); TBuf d_sum = TBuf(0.f); TBuf w_sum = TBuf(0.f); constexpr int load_count_per_thread = 4; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x * load_count_per_thread) { #pragma unroll for (int j = 0; j < load_count_per_thread; ++j) { const int index_in_chunk = i + j * blockDim.x; const int index_in_tensor = chunk_start + index_in_chunk; if (index_in_chunk < chunk_size && index_in_tensor < tensor_size) { const TBuf w_element = TBuf(w[index_in_chunk]); const TBuf d_element = TBuf(d[index_in_chunk]); w_sum += w_element * w_element; d_sum += d_element * d_element; } } } // Thread count in a block must be a multiple of GPU_WARP_SIZE. #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { w_sum += WARP_SHFL_DOWN(w_sum, stride); d_sum += WARP_SHFL_DOWN(d_sum, stride); } const int warp_count_in_block = blockDim.x / GPU_WARP_SIZE; const int lid = threadIdx.x % GPU_WARP_SIZE; const int wid = threadIdx.x / GPU_WARP_SIZE; // Shape is 2 x warp_count_in_block. extern __shared__ unsigned char shared_memory_[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_); TBuf* w_shared_memory_ = shared_memory; TBuf* d_shared_memory_ = shared_memory + warp_count_in_block; if (lid == 0) { w_shared_memory_[wid] = w_sum; d_shared_memory_[wid] = d_sum; } __syncthreads(); #pragma unroll for (int stride = warp_count_in_block / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { w_shared_memory_[threadIdx.x] += w_shared_memory_[threadIdx.x + stride]; d_shared_memory_[threadIdx.x] += d_shared_memory_[threadIdx.x + stride]; } __syncthreads(); } // ascertain the range of blocks with the associated tensor // note: if non-ordered reduction is OK, then atomicAdd over blocks could suffice const int leading_block_in_tensor = sync_range_and_lock[group_index].leading_block; const int num_blocks_in_tensor = sync_range_and_lock[group_index].number_blocks; if (num_blocks_in_tensor == 1) { if (threadIdx.x == 0) { *w_norm = TOut1(w_shared_memory_[0]); *d_norm = TOut2(d_shared_memory_[0]); } return; } if (threadIdx.x == 0) { w_buffer[blockIdx.x] = w_shared_memory_[0]; d_buffer[blockIdx.x] = d_shared_memory_[0]; } __threadfence(); __syncthreads(); // use lock to determine if this is last block for given tensor __shared__ bool is_last_block_done; if (threadIdx.x == 0) { int* p_lock = &sync_range_and_lock[group_index].completed_blocks; int counter = atomicAdd(p_lock, 1); is_last_block_done = (counter == num_blocks_in_tensor - 1); } __syncthreads(); // only last block to finish for associated tensor enters below if (is_last_block_done) { const int pow2_bound = least_pow2_bound(num_blocks_in_tensor); int blockid = leading_block_in_tensor + threadIdx.x; for (int stride = pow2_bound / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride && threadIdx.x + stride < num_blocks_in_tensor) { w_buffer[blockid] += w_buffer[blockid + stride]; d_buffer[blockid] += d_buffer[blockid + stride]; } __syncthreads(); } if (threadIdx.x == 0) { *w_norm = TOut1(w_buffer[leading_block_in_tensor]); *d_norm = TOut2(d_buffer[leading_block_in_tensor]); } } } CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> compute_tensor_range_and_lock(ChunkGroup<4> chunk_group, const CudaKernel& kernel) { const int num_blocks = chunk_group.chunk_count; // sync_range_and_lock is a struct consisting of (start_block, num_blocks, lock) for each tensor // Note: Adding such info to chunk group causes overflow (unless max tensors is reduced) const int max_tensors = ChunkGroup<4>::max_tensor_group_count; LambMultiTensorSyncRangeAndLock initial = {0, 0, 0}; CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> sync_range_and_lock(&kernel, initial, max_tensors); for (int block_index = num_blocks - 1; block_index >= 0; block_index--) { int tensor_index = chunk_group.block_index_to_tensor_group_index[block_index]; auto& tensor_block_span = sync_range_and_lock.CpuPtr()[tensor_index]; tensor_block_span.leading_block = block_index; tensor_block_span.number_blocks++; } sync_range_and_lock.CopyToGpu(); return sync_range_and_lock; } template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(hipStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size) { // thread count per block. constexpr int thread_count = ChunkGroup<4>::thread_count_per_block; // shared memory's size per block. const int shared_memory_size = thread_count / GPU_WARP_SIZE * 2 * sizeof(TBuf); // Enforce assumptions used inside this reduction CUDA kernel. ORT_ENFORCE(thread_count % GPU_WARP_SIZE == 0); ORT_ENFORCE((thread_count & (thread_count - 1)) == 0); const int num_blocks = chunk_group.chunk_count; const size_t w_buffer_size = num_blocks * sizeof(TOut1); const size_t d_buffer_size = num_blocks * sizeof(TOut2); ORT_ENFORCE(w_buffer_size + d_buffer_size <= reduction_buffer_size); TOut1* w_buffer = reinterpret_cast<TOut1*>(reduction_buffer); TOut2* d_buffer = reinterpret_cast<TOut2*>(w_buffer + num_blocks); auto sync_range_and_lock = compute_tensor_range_and_lock(chunk_group, kernel); hipLaunchKernelGGL(( LambMultiTensorReductionImpl<TIn1, TIn2, TOut1, TOut2, TBuf>), dim3(chunk_group.chunk_count), dim3(thread_count), shared_memory_size, stream, chunk_group, w_buffer, d_buffer, sync_range_and_lock.GpuPtr()); } #define INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(TIn1, TIn2, TOut1, TOut2, TBuf) \ template void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(hipStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size); INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, float, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(double, double, double, double, double) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, half, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(half, half, half, half, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(nv_bfloat16, nv_bfloat16, nv_bfloat16, nv_bfloat16, float) #endif } // namespace cuda } // namespace onnxruntime
4890cbbbe568cadef32cb44dd1601807af5dfc87.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_allocator.h" #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/atomic/common.cuh" #include "core/providers/cuda/reduction/reduction_utils.cuh" #include "orttraining/training_ops/cuda/math/isfinite.cuh" #include "orttraining/training_ops/cuda/optimizer/common.h" #include "orttraining/training_ops/cuda/optimizer/common.cuh" #include "orttraining/training_ops/cuda/optimizer/lamb.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2, typename T3> __device__ __forceinline__ void _LambComputeDirectionRule( const T1& g_scale, const T1& w, const T2& g, const T3& m1, const T3& m2, const float& alpha, const float& beta, const float& lambda, const float& epsilon, const float& alpha_correction, const float& beta_correction, T2& d, T3& m1_new, T3& m2_new) { // Actual gradient. The scale is a product of loss' scale and // global gradient norm (if the norm > 1). const T1 g_unscaled = T1(g) / g_scale; // A constant in Lamb's equation. const T1 one = T1(1.0f); // Update exponentially-averaged historical gradient const T1 m1_new_tmp = alpha * static_cast<T1>(m1) + (one - alpha) * g_unscaled; // Update exponentially-averaged historical squared gradient const T1 m2_new_tmp = beta * static_cast<T1>(m2) + (one - beta) * g_unscaled * g_unscaled; // Compute unbiased 1st-order momentom. // The value alpha_correction is usually (1-alpha^t), // where t is the number of executed training iterations. const T1 m1_new_tmp_corrected = m1_new_tmp / alpha_correction; // Compute unbiased 2nd-order momentom. // The value beta_correction is usually (1-beta^t), // where t is the number of executed training iterations. const T1 m2_new_tmp_corrected = m2_new_tmp / beta_correction; // Save regularized update direction to output. const T1 d_tmp = lambda * w + m1_new_tmp_corrected / (_Sqrt(m2_new_tmp_corrected) + epsilon); // Things are updated only if the direction is finite. if (_IsFiniteScalar(d_tmp)) { d = d_tmp; m1_new = m1_new_tmp; m2_new = m2_new_tmp; } else { d = T2(0); m1_new = m1; m2_new = m2; } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void _LambComputeDirectionImpl( const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* g_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); _LambComputeDirectionRule( scale, weights[id], grads[id], moment_1[id], moment_2[id], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, update_direction[id], moment_1_out[id], moment_2_out[id]); } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambComputeDirection( cudaStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _LambComputeDirectionImpl<T1, T2, T3, T_GRAD_NORM><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, max_norm, alpha_correction, beta_correction, update_direction, moment_1_out, moment_2_out, N); } #define SPECIALIZED_LAMB_COMPUTE_DIRECTION(T1, T2, T3, T_GRAD_NORM) \ template void LambComputeDirection( \ cudaStream_t stream, \ const T1* weights, \ const T2* grads, \ const T3* moment_1, \ const T3* moment_2, \ const T1* loss_scale, \ const T_GRAD_NORM* grad_norm, \ float alpha, \ float beta, \ float lambda, \ float epsilon, \ float max_norm, \ float alpha_correction, \ float beta_correction, \ T2* weights_out, \ T3* moment_1_out, \ T3* moment_2_out, \ size_t count); SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, float, float, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(double, double, double, double) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __device__ __forceinline__ void _LambUpdateRule( const T1 eta, const float ratio_min, const float ratio_max, const T2 r_norm, const T2 w_norm, const T2 w, const T3 d, T2* w_new, T3* g_new, T_MIXED_PRECISION_FP* w_mixed_precision_new) { // Confidence coefficeint of this update. const T2 ratio = (w_norm != T2(0.0f) && r_norm != T2(0.0f)) ? T2(eta) * _Max(T2(ratio_min), _Min(T2(ratio_max), _Sqrt(w_norm / r_norm))) : T2(eta); // Compute delta using the saved update direction. const T2 delta = -ratio * T2(d); const T2 w_new_tmp = w + delta; if (_IsFiniteScalar(w_new_tmp)) { if (g_new) { *g_new = T3(delta); } if (w_new) { *w_new = w_new_tmp; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w_new_tmp); } } } else { if (g_new) { *g_new = T3(0); } if (w_new) { *w_new = w; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w); } } } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void _LambUpdateImpl( const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, weights[id], update_direction[id], weights_out != nullptr ? weights_out + id : nullptr, gradients_out != nullptr ? gradients_out + id : nullptr, mixed_precision_weights_out != nullptr ? mixed_precision_weights_out + id : nullptr); } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambUpdate( cudaStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _LambUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( eta, ratio_min, ratio_max, r_norm, w_norm, weights, update_direction, weights_out, gradients_out, mixed_precision_weights_out, N); } #define INSTANTIATE_LAMB_UPDATE(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambUpdate( \ cudaStream_t stream, \ const T1* eta, \ const float ratio_min, \ const float ratio_max, \ const T2* r_norm, \ const T2* w_norm, \ const T2* weights, \ const T3* update_direction, \ T2* weights_out, \ T3* gradients_out, \ T_MIXED_PRECISION_FP* mixed_precision_weights_out, \ size_t count); INSTANTIATE_LAMB_UPDATE(float, float, float, half) INSTANTIATE_LAMB_UPDATE(double, double, double, half) INSTANTIATE_LAMB_UPDATE(half, float, half, half) INSTANTIATE_LAMB_UPDATE(float, float, half, half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_UPDATE(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(float, float, nv_bfloat16, nv_bfloat16) #endif template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void LambMultiTensorComputeDirectionImpl( ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T1* w = reinterpret_cast<const T1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; T2* g = reinterpret_cast<T2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; const T3* m1 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* m2 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T3* m1_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start; T3* m2_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start; const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); #pragma unroll for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambComputeDirectionRule( scale, w[i], g[i], m1[i], m2[i], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, g[i], m1_new[i], m2_new[i]); } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( cudaStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int thread_count = ChunkGroup<6>::thread_count_per_block; const int block_count = chunk_group.chunk_count; LambMultiTensorComputeDirectionImpl<T1, T2, T3><<<block_count, thread_count, 0, stream>>>( chunk_group, loss_scale, g_norm, lambda, alpha, beta, epsilon, max_norm, alpha_correction, beta_correction); } #define INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(T1, T2, T3, T_GRAD_NORM) \ template void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( \ cudaStream_t stream, \ ChunkGroup<6> chunk_group, \ const T1* loss_scale, \ const T_GRAD_NORM* g_norm, \ const float lambda, \ const float alpha, \ const float beta, \ const float epsilon, \ const float max_norm, \ const float alpha_correction, \ const float beta_correction); INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, float, float, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(double, double, double, double) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void LambMultiTensorUpdateImpl( ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T2* w_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[0][group_index]); const T2* r_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[1][group_index]); const T2* w = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* d = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T2* w_new = chunk_group.tensor_ptrs[4][group_index] != nullptr ? reinterpret_cast<T2*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start : nullptr; T3* g_new = chunk_group.tensor_ptrs[5][group_index] != nullptr ? reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start : nullptr; T_MIXED_PRECISION_FP* w_mixed_precision_new = chunk_group.tensor_ptrs[6][group_index] != nullptr ? reinterpret_cast<T_MIXED_PRECISION_FP*>(chunk_group.tensor_ptrs[6][group_index]) + chunk_start : nullptr; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, w[i], d[i], w_new != nullptr ? w_new + i : nullptr, g_new != nullptr ? g_new + i : nullptr, w_mixed_precision_new != nullptr ? w_mixed_precision_new + i : nullptr); } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( cudaStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int thread_count = ChunkGroup<7>::thread_count_per_block; const int block_count = chunk_group.chunk_count; LambMultiTensorUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP><<<block_count, thread_count, 0, stream>>>( chunk_group, eta, ratio_min, ratio_max); } #define INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( \ cudaStream_t stream, \ ChunkGroup<7> chunk_group, \ const T1* eta, \ const float ratio_min, \ const float ratio_max); INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(half, float, half, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, half, half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, nv_bfloat16, nv_bfloat16) #endif // w_buffer[i], d_buffer[i] is used to store the squared sum of all elements processed by the i-th block. // sync_range_and_lock is used for a well ordered reduction over blocks spanning the same tensor template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> __launch_bounds__(ChunkGroup<4>::thread_count_per_block) __global__ void LambMultiTensorReductionImpl( ChunkGroup<4> chunk_group, TOut1* w_buffer, TOut2* d_buffer, LambMultiTensorSyncRangeAndLock* sync_range_and_lock) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const TIn1* w = reinterpret_cast<const TIn1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; const TIn2* d = reinterpret_cast<const TIn2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; TOut1* w_norm = reinterpret_cast<TOut1*>(chunk_group.tensor_ptrs[2][group_index]); TOut2* d_norm = reinterpret_cast<TOut2*>(chunk_group.tensor_ptrs[3][group_index]); TBuf d_sum = TBuf(0.f); TBuf w_sum = TBuf(0.f); constexpr int load_count_per_thread = 4; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x * load_count_per_thread) { #pragma unroll for (int j = 0; j < load_count_per_thread; ++j) { const int index_in_chunk = i + j * blockDim.x; const int index_in_tensor = chunk_start + index_in_chunk; if (index_in_chunk < chunk_size && index_in_tensor < tensor_size) { const TBuf w_element = TBuf(w[index_in_chunk]); const TBuf d_element = TBuf(d[index_in_chunk]); w_sum += w_element * w_element; d_sum += d_element * d_element; } } } // Thread count in a block must be a multiple of GPU_WARP_SIZE. #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { w_sum += WARP_SHFL_DOWN(w_sum, stride); d_sum += WARP_SHFL_DOWN(d_sum, stride); } const int warp_count_in_block = blockDim.x / GPU_WARP_SIZE; const int lid = threadIdx.x % GPU_WARP_SIZE; const int wid = threadIdx.x / GPU_WARP_SIZE; // Shape is 2 x warp_count_in_block. extern __shared__ unsigned char shared_memory_[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_); TBuf* w_shared_memory_ = shared_memory; TBuf* d_shared_memory_ = shared_memory + warp_count_in_block; if (lid == 0) { w_shared_memory_[wid] = w_sum; d_shared_memory_[wid] = d_sum; } __syncthreads(); #pragma unroll for (int stride = warp_count_in_block / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { w_shared_memory_[threadIdx.x] += w_shared_memory_[threadIdx.x + stride]; d_shared_memory_[threadIdx.x] += d_shared_memory_[threadIdx.x + stride]; } __syncthreads(); } // ascertain the range of blocks with the associated tensor // note: if non-ordered reduction is OK, then atomicAdd over blocks could suffice const int leading_block_in_tensor = sync_range_and_lock[group_index].leading_block; const int num_blocks_in_tensor = sync_range_and_lock[group_index].number_blocks; if (num_blocks_in_tensor == 1) { if (threadIdx.x == 0) { *w_norm = TOut1(w_shared_memory_[0]); *d_norm = TOut2(d_shared_memory_[0]); } return; } if (threadIdx.x == 0) { w_buffer[blockIdx.x] = w_shared_memory_[0]; d_buffer[blockIdx.x] = d_shared_memory_[0]; } __threadfence(); __syncthreads(); // use lock to determine if this is last block for given tensor __shared__ bool is_last_block_done; if (threadIdx.x == 0) { int* p_lock = &sync_range_and_lock[group_index].completed_blocks; int counter = atomicAdd(p_lock, 1); is_last_block_done = (counter == num_blocks_in_tensor - 1); } __syncthreads(); // only last block to finish for associated tensor enters below if (is_last_block_done) { const int pow2_bound = least_pow2_bound(num_blocks_in_tensor); int blockid = leading_block_in_tensor + threadIdx.x; for (int stride = pow2_bound / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride && threadIdx.x + stride < num_blocks_in_tensor) { w_buffer[blockid] += w_buffer[blockid + stride]; d_buffer[blockid] += d_buffer[blockid + stride]; } __syncthreads(); } if (threadIdx.x == 0) { *w_norm = TOut1(w_buffer[leading_block_in_tensor]); *d_norm = TOut2(d_buffer[leading_block_in_tensor]); } } } CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> compute_tensor_range_and_lock(ChunkGroup<4> chunk_group, const CudaKernel& kernel) { const int num_blocks = chunk_group.chunk_count; // sync_range_and_lock is a struct consisting of (start_block, num_blocks, lock) for each tensor // Note: Adding such info to chunk group causes overflow (unless max tensors is reduced) const int max_tensors = ChunkGroup<4>::max_tensor_group_count; LambMultiTensorSyncRangeAndLock initial = {0, 0, 0}; CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> sync_range_and_lock(&kernel, initial, max_tensors); for (int block_index = num_blocks - 1; block_index >= 0; block_index--) { int tensor_index = chunk_group.block_index_to_tensor_group_index[block_index]; auto& tensor_block_span = sync_range_and_lock.CpuPtr()[tensor_index]; tensor_block_span.leading_block = block_index; tensor_block_span.number_blocks++; } sync_range_and_lock.CopyToGpu(); return sync_range_and_lock; } template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(cudaStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size) { // thread count per block. constexpr int thread_count = ChunkGroup<4>::thread_count_per_block; // shared memory's size per block. const int shared_memory_size = thread_count / GPU_WARP_SIZE * 2 * sizeof(TBuf); // Enforce assumptions used inside this reduction CUDA kernel. ORT_ENFORCE(thread_count % GPU_WARP_SIZE == 0); ORT_ENFORCE((thread_count & (thread_count - 1)) == 0); const int num_blocks = chunk_group.chunk_count; const size_t w_buffer_size = num_blocks * sizeof(TOut1); const size_t d_buffer_size = num_blocks * sizeof(TOut2); ORT_ENFORCE(w_buffer_size + d_buffer_size <= reduction_buffer_size); TOut1* w_buffer = reinterpret_cast<TOut1*>(reduction_buffer); TOut2* d_buffer = reinterpret_cast<TOut2*>(w_buffer + num_blocks); auto sync_range_and_lock = compute_tensor_range_and_lock(chunk_group, kernel); LambMultiTensorReductionImpl<TIn1, TIn2, TOut1, TOut2, TBuf><<<chunk_group.chunk_count, thread_count, shared_memory_size, stream>>>( chunk_group, w_buffer, d_buffer, sync_range_and_lock.GpuPtr()); } #define INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(TIn1, TIn2, TOut1, TOut2, TBuf) \ template void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(cudaStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size); INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, float, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(double, double, double, double, double) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, half, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(half, half, half, half, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(nv_bfloat16, nv_bfloat16, nv_bfloat16, nv_bfloat16, float) #endif } // namespace cuda } // namespace onnxruntime
68dc8f5006870ef3b4191cdb7de6894b1a86c911.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nids.h" //CKJUNG, 19.03.22 NIDS functions __device__ int lookup2D(int* trie, int col, int row) { if(row == -1) row = 0; return trie[row*MAXC + col]; } __device__ int lookup1D(int* arr, int point) { if(point == -1) point = 0; return arr[point]; } __global__ void testtt(void) { START_YLW printf("_______________[%s] HELLO!!!__________________\n", __FUNCTION__); END } // CKJUNG, 19.01.30 [NF#3:NIDS]------------------------------------- __global__ void nids(struct mempool** mempool, uint32_t* pkt_cnt, int** d_dstTrie, int** d_dstFailure, int** d_dstOutput, struct portGroup *d_pg) { int i; __shared__ unsigned char xlatcase[256]; int tid = blockDim.x * blockIdx.x + threadIdx.x; int pktid = tid / THD_PER_PKT; int dataid = tid % THD_PER_PKT; struct pkt_buf* buf = NULL; __shared__ struct pkt_buf* buf_pool[512]; struct mempool* mini_mempool = NULL; if(pktid < 512) mini_mempool = mempool[pktid]; if(threadIdx.x == 0){ for(i = 0; i < 256; i++) xlatcase[i] = (unsigned char)TOUPPER(i); // Init xlatcase : Convert Lower to Upper for(int i = 0; i < 512; i++) buf_pool[i] = NULL; } #if 0 if(threadIdx.x == NF_T_NUM - 1){ START_RED printf("[%s] threadIdx.x %d is alive!\n", __FUNCTION__, threadIdx.x); END } #endif while(true) { //Persistent Kernel __syncthreads(); if(pktid < 512){ if(dataid == 0) buf_pool[pktid] = pkt_buf_extract(mini_mempool, 1); __syncthreads(); buf = buf_pool[pktid]; if(buf != NULL){ #if 1 struct udphdr* udph = (struct udphdr *)(buf->data + sizeof(struct ethhdr) + sizeof(struct iphdr)); // Extract "portNUM" & "Length of payload" int dst_port = NTOHS(udph->dest); //int payload_len = (int)NTOHS(udph->len) - sizeof(struct udphdr); int payload_len = PKT_DATA_SIZE - 42; unsigned char* payload = buf->data + sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct udphdr) + dataid*DATA_PER_THD; int y, r, s, cnt = 0; r = 0; int ret = 0; int curPoint = sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct udphdr) + dataid*DATA_PER_THD; int tmp = NTOHS(udph->len); if(d_pg->dstPortMap[dst_port] == NULL){ }else{ int *tmp_trie = d_dstTrie[dst_port]; int *tmp_failure = d_dstFailure[dst_port]; int *tmp_output = d_dstOutput[dst_port]; while(y + curPoint < PKT_SIZE){ if(payload[y]>='a' && payload[y]<='z') // Convert Lower to Upper payload[y] = xlatcase[payload[y]]; // string matching with Trie and Failure link while((s = lookup2D(tmp_trie, payload[y], r)) == -1){ r = lookup1D(tmp_failure, r); } if(s == 0){ if(y >= payload_len/THD_PER_PKT) break; } r = s; ret = lookup1D(tmp_output, r); cnt += ret; y++; } } //memset(payload, 0, 10); //memcpy(payload + 2, &dst_port, sizeof(int)); //memset(payload + 6, 0, 4); #endif __syncthreads(); if(dataid == 0){ buf->app_idx = 2; buf = NULL; buf_pool[pktid] = NULL; } } } } } // ~CKJUNG, ---------------------------------------------------------- extern "C" void initialize_nids(struct mempool** mempool, uint32_t *pkt_cnt) { #if 1 // CKJUNG, 19.03.22 [NF #3: NIDS] Setting DST TRIEs, Failures, Outputs ///////////////////////// char buf[30]; char *tok; int portNum; int i, j; queue<int> q; printf("____[Initialize]__NF #3__NIDS__\n"); // DRAM : TRIEs, Failures, Outputs struct portGroup pg; memset(&pg, 0, sizeof(struct portGroup)); pg.dstTrie = (int**)calloc(sizeof(int*),MAX_PORTS); pg.dstFailure = (int**)calloc(sizeof(int*),MAX_PORTS); pg.dstOutput = (int**)calloc(sizeof(int*),MAX_PORTS); // GDDR : TRIEs, Failures, Outputs struct portGroup *d_pg; ASSERTRT(hipMalloc((void**)&d_pg, sizeof(struct portGroup))); ASSERTRT(hipMemset(d_pg, 0, sizeof(struct portGroup))); // [TODO] 19.03.22. How to access "Double pointer in struct which is in GPU?" int **d_dstTrie; int **d_dstFailure; int **d_dstOutput; ASSERTRT(hipMalloc((void**)&d_dstTrie, sizeof(int*)*MAX_PORTS)); ASSERTRT(hipMalloc((void**)&d_dstFailure, sizeof(int*)*MAX_PORTS)); ASSERTRT(hipMalloc((void**)&d_dstOutput, sizeof(int*)*MAX_PORTS)); FILE* fp = fopen("./apps/lib/ck_dst_trie.txt","r"); while((fgets(buf, LINE_LENGTH, fp)) != NULL) { if(!strcmp(buf, " ")||!strcmp(buf, "\n")) continue; // CKJUNG, For port Num tok = strtok(buf, " "); if(!strcmp(tok, "dst")){ portNum = atoi(strtok(NULL, " ")); }else if(!strcmp(tok, "src")){ portNum = atoi(strtok(NULL, " ")); }else{ // Gen or After portNum int Depth = atoi(buf); if(Depth == 0) // If meaningless then continue,, continue; // CKJUNG, Initialize Array int arr[Depth][MAXC]; for(i = 0; i < Depth; i++) for(j = 0; j < MAXC; j++) arr[i][j] = -1; // ~CKJUNG pg.dstOutput[portNum] = (int*)malloc(sizeof(int)*(Depth+1)); for(i = 0; i < Depth+1; i++) (pg.dstOutput[portNum])[i] = 0; // CKJUNG, Fill the Array int prev = -1; int ptnLen = 0; int numPtn = 1; for(i = 0; i < Depth; i++) { int stateNum; int row, col; fgets(buf, LINE_LENGTH, fp); //printf("buf: %s\n", buf); stateNum = atoi(strtok(buf, ":")); tok = strtok(NULL, ":"); row = atoi(strtok(tok, " ")); if(prev > row){ ptnLen = 1; numPtn++; pg.dstOutput[portNum][stateNum-1] = 1; // Filling Output vector 1. }else if(i == Depth-1){ ptnLen++; pg.dstOutput[portNum][stateNum] = 1; // Filling Output vector 2. }else{ ptnLen++; } prev = row; col = atoi(strtok(NULL, " ")); arr[row][col] = stateNum; } // 1st Row should be filled by "zeroes". // Because they are root nodes for(i = 0; i < MAXC; i++) if(arr[0][i] == -1) arr[0][i] = 0; //CKJUNG, [TODO, 19.02.18 16:43] Making failure State // Initialize Failure link as -1 int oo; pg.dstFailure[portNum] = (int*)malloc(sizeof(int)*(Depth+1)); for(oo = 0; oo < Depth+1; oo++) (pg.dstFailure[portNum])[oo] = -1; // Initiailize Failure link of root node as 0(root) int ch; for(ch = 0; ch < MAXC; ch++) { // If root node has some child nodes if(arr[0][ch] != 0) { (pg.dstFailure[portNum])[arr[0][ch]] = 0; q.push(arr[0][ch]); } } while(q.size()) { int state = q.front(); if(state >= Depth) break; q.pop(); for(ch = 0; ch < MAXC; ch++) { if(arr[state][ch] != -1) { int failure = (pg.dstFailure[portNum])[state]; while(arr[failure][ch] == -1) failure = (pg.dstFailure[portNum])[failure]; failure = arr[failure][ch]; (pg.dstFailure[portNum])[arr[state][ch]] = failure; (pg.dstOutput[portNum])[arr[state][ch]] += (pg.dstOutput[portNum])[failure]; q.push(arr[state][ch]); } } } //[THINK] Every time we malloc here, we get NEW ADDRESS for each TRIE, CKJUNG pg.dstTrie[portNum] = (int*)malloc(sizeof(int)*Depth*MAXC); for(i = 0; i < Depth; i++) for(j = 0; j < MAXC; j++) pg.dstTrie[portNum][i*MAXC+j] = arr[i][j]; //[THINK] We SHOULDN'T FREE "brr" until the end of the program!!, CKJUNG pg.dstPortMap[portNum] = 1; // Set portMap pg.dstTrieDepth[portNum] = Depth; } // We've read all } ///////////////////////////// CKJUNG, Copy Tries to GPU /////////////////////////////////////// int *tmp_trie[MAX_PORTS]; int *tmp_failure[MAX_PORTS]; int *tmp_output[MAX_PORTS]; for(i = 0; i < MAX_PORTS; i++){ if(pg.dstPortMap[i] == 1){ // If "this port" has a TRIE, // CKJUNG, hipMemcpy "PortMap" & "Depth" for dst ASSERTRT(hipMemcpy(&(d_pg->dstPortMap[i]),&(pg.dstPortMap[i]), sizeof(int), hipMemcpyHostToDevice)); ASSERTRT(hipMemcpy(&(d_pg->dstTrieDepth[i]),&(pg.dstTrieDepth[i]), sizeof(int), hipMemcpyHostToDevice)); // CKJUNG, hipMalloc "Trie" & "Failure" & "Output" for GDDR ASSERTRT(hipMalloc((void**)&tmp_trie[i], (pg.dstTrieDepth[i])*MAXC*sizeof(int))); ASSERTRT(hipMalloc((void**)&tmp_failure[i], (pg.dstTrieDepth[i]+1)*sizeof(int))); ASSERTRT(hipMalloc((void**)&tmp_output[i], (pg.dstTrieDepth[i]+1)*sizeof(int))); // CKJUNG, hipMemcpy "Trie" & "Failure" & Output" to GDDR ASSERTRT(hipMemcpy(tmp_trie[i], pg.dstTrie[i], (pg.dstTrieDepth[i])*MAXC*sizeof(int), hipMemcpyHostToDevice)); ASSERTRT(hipMemcpy(tmp_failure[i], pg.dstFailure[i], (pg.dstTrieDepth[i]+1)*sizeof(int), hipMemcpyHostToDevice)); ASSERTRT(hipMemcpy(tmp_output[i], pg.dstOutput[i], (pg.dstTrieDepth[i]+1)*sizeof(int), hipMemcpyHostToDevice)); } } ASSERTRT(hipMemcpy(d_dstTrie, tmp_trie, sizeof(int*)*MAX_PORTS, hipMemcpyHostToDevice)); ASSERTRT(hipMemcpy(d_dstFailure, tmp_failure, sizeof(int*)*MAX_PORTS, hipMemcpyHostToDevice)); ASSERTRT(hipMemcpy(d_dstOutput, tmp_output, sizeof(int*)*MAX_PORTS, hipMemcpyHostToDevice)); hipStream_t cuda_stream4; ASSERT_CUDA(hipStreamCreateWithFlags(&cuda_stream4,hipStreamNonBlocking)); START_BLU printf("[NIDS] # of Thread Blocks : %d, # of Threads : %d\n", NF_TB_NUM, NF_T_NUM); END hipLaunchKernelGGL(( nids), dim3(NF_TB_NUM), dim3(NF_T_NUM), 0, cuda_stream4 , mempool, pkt_cnt, d_dstTrie, d_dstFailure, d_dstOutput, d_pg); START_GRN printf("[Done]____[Initialize]__NF #3__NIDS__\n"); printf("[NIDS] %s\n", hipGetErrorName(hipGetLastError())); END // ~ CKJUNG ///////////////////////////////////////////////////////////////////////////// #endif }
68dc8f5006870ef3b4191cdb7de6894b1a86c911.cu
#include "nids.h" //CKJUNG, 19.03.22 NIDS functions __device__ int lookup2D(int* trie, int col, int row) { if(row == -1) row = 0; return trie[row*MAXC + col]; } __device__ int lookup1D(int* arr, int point) { if(point == -1) point = 0; return arr[point]; } __global__ void testtt(void) { START_YLW printf("_______________[%s] HELLO!!!__________________\n", __FUNCTION__); END } // CKJUNG, 19.01.30 [NF#3:NIDS]------------------------------------- __global__ void nids(struct mempool** mempool, uint32_t* pkt_cnt, int** d_dstTrie, int** d_dstFailure, int** d_dstOutput, struct portGroup *d_pg) { int i; __shared__ unsigned char xlatcase[256]; int tid = blockDim.x * blockIdx.x + threadIdx.x; int pktid = tid / THD_PER_PKT; int dataid = tid % THD_PER_PKT; struct pkt_buf* buf = NULL; __shared__ struct pkt_buf* buf_pool[512]; struct mempool* mini_mempool = NULL; if(pktid < 512) mini_mempool = mempool[pktid]; if(threadIdx.x == 0){ for(i = 0; i < 256; i++) xlatcase[i] = (unsigned char)TOUPPER(i); // Init xlatcase : Convert Lower to Upper for(int i = 0; i < 512; i++) buf_pool[i] = NULL; } #if 0 if(threadIdx.x == NF_T_NUM - 1){ START_RED printf("[%s] threadIdx.x %d is alive!\n", __FUNCTION__, threadIdx.x); END } #endif while(true) { //Persistent Kernel __syncthreads(); if(pktid < 512){ if(dataid == 0) buf_pool[pktid] = pkt_buf_extract(mini_mempool, 1); __syncthreads(); buf = buf_pool[pktid]; if(buf != NULL){ #if 1 struct udphdr* udph = (struct udphdr *)(buf->data + sizeof(struct ethhdr) + sizeof(struct iphdr)); // Extract "portNUM" & "Length of payload" int dst_port = NTOHS(udph->dest); //int payload_len = (int)NTOHS(udph->len) - sizeof(struct udphdr); int payload_len = PKT_DATA_SIZE - 42; unsigned char* payload = buf->data + sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct udphdr) + dataid*DATA_PER_THD; int y, r, s, cnt = 0; r = 0; int ret = 0; int curPoint = sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct udphdr) + dataid*DATA_PER_THD; int tmp = NTOHS(udph->len); if(d_pg->dstPortMap[dst_port] == NULL){ }else{ int *tmp_trie = d_dstTrie[dst_port]; int *tmp_failure = d_dstFailure[dst_port]; int *tmp_output = d_dstOutput[dst_port]; while(y + curPoint < PKT_SIZE){ if(payload[y]>='a' && payload[y]<='z') // Convert Lower to Upper payload[y] = xlatcase[payload[y]]; // string matching with Trie and Failure link while((s = lookup2D(tmp_trie, payload[y], r)) == -1){ r = lookup1D(tmp_failure, r); } if(s == 0){ if(y >= payload_len/THD_PER_PKT) break; } r = s; ret = lookup1D(tmp_output, r); cnt += ret; y++; } } //memset(payload, 0, 10); //memcpy(payload + 2, &dst_port, sizeof(int)); //memset(payload + 6, 0, 4); #endif __syncthreads(); if(dataid == 0){ buf->app_idx = 2; buf = NULL; buf_pool[pktid] = NULL; } } } } } // ~CKJUNG, ---------------------------------------------------------- extern "C" void initialize_nids(struct mempool** mempool, uint32_t *pkt_cnt) { #if 1 // CKJUNG, 19.03.22 [NF #3: NIDS] Setting DST TRIEs, Failures, Outputs ///////////////////////// char buf[30]; char *tok; int portNum; int i, j; queue<int> q; printf("____[Initialize]__NF #3__NIDS__\n"); // DRAM : TRIEs, Failures, Outputs struct portGroup pg; memset(&pg, 0, sizeof(struct portGroup)); pg.dstTrie = (int**)calloc(sizeof(int*),MAX_PORTS); pg.dstFailure = (int**)calloc(sizeof(int*),MAX_PORTS); pg.dstOutput = (int**)calloc(sizeof(int*),MAX_PORTS); // GDDR : TRIEs, Failures, Outputs struct portGroup *d_pg; ASSERTRT(cudaMalloc((void**)&d_pg, sizeof(struct portGroup))); ASSERTRT(cudaMemset(d_pg, 0, sizeof(struct portGroup))); // [TODO] 19.03.22. How to access "Double pointer in struct which is in GPU?" int **d_dstTrie; int **d_dstFailure; int **d_dstOutput; ASSERTRT(cudaMalloc((void**)&d_dstTrie, sizeof(int*)*MAX_PORTS)); ASSERTRT(cudaMalloc((void**)&d_dstFailure, sizeof(int*)*MAX_PORTS)); ASSERTRT(cudaMalloc((void**)&d_dstOutput, sizeof(int*)*MAX_PORTS)); FILE* fp = fopen("./apps/lib/ck_dst_trie.txt","r"); while((fgets(buf, LINE_LENGTH, fp)) != NULL) { if(!strcmp(buf, " ")||!strcmp(buf, "\n")) continue; // CKJUNG, For port Num tok = strtok(buf, " "); if(!strcmp(tok, "dst")){ portNum = atoi(strtok(NULL, " ")); }else if(!strcmp(tok, "src")){ portNum = atoi(strtok(NULL, " ")); }else{ // Gen or After portNum int Depth = atoi(buf); if(Depth == 0) // If meaningless then continue,, continue; // CKJUNG, Initialize Array int arr[Depth][MAXC]; for(i = 0; i < Depth; i++) for(j = 0; j < MAXC; j++) arr[i][j] = -1; // ~CKJUNG pg.dstOutput[portNum] = (int*)malloc(sizeof(int)*(Depth+1)); for(i = 0; i < Depth+1; i++) (pg.dstOutput[portNum])[i] = 0; // CKJUNG, Fill the Array int prev = -1; int ptnLen = 0; int numPtn = 1; for(i = 0; i < Depth; i++) { int stateNum; int row, col; fgets(buf, LINE_LENGTH, fp); //printf("buf: %s\n", buf); stateNum = atoi(strtok(buf, ":")); tok = strtok(NULL, ":"); row = atoi(strtok(tok, " ")); if(prev > row){ ptnLen = 1; numPtn++; pg.dstOutput[portNum][stateNum-1] = 1; // Filling Output vector 1. }else if(i == Depth-1){ ptnLen++; pg.dstOutput[portNum][stateNum] = 1; // Filling Output vector 2. }else{ ptnLen++; } prev = row; col = atoi(strtok(NULL, " ")); arr[row][col] = stateNum; } // 1st Row should be filled by "zeroes". // Because they are root nodes for(i = 0; i < MAXC; i++) if(arr[0][i] == -1) arr[0][i] = 0; //CKJUNG, [TODO, 19.02.18 16:43] Making failure State // Initialize Failure link as -1 int oo; pg.dstFailure[portNum] = (int*)malloc(sizeof(int)*(Depth+1)); for(oo = 0; oo < Depth+1; oo++) (pg.dstFailure[portNum])[oo] = -1; // Initiailize Failure link of root node as 0(root) int ch; for(ch = 0; ch < MAXC; ch++) { // If root node has some child nodes if(arr[0][ch] != 0) { (pg.dstFailure[portNum])[arr[0][ch]] = 0; q.push(arr[0][ch]); } } while(q.size()) { int state = q.front(); if(state >= Depth) break; q.pop(); for(ch = 0; ch < MAXC; ch++) { if(arr[state][ch] != -1) { int failure = (pg.dstFailure[portNum])[state]; while(arr[failure][ch] == -1) failure = (pg.dstFailure[portNum])[failure]; failure = arr[failure][ch]; (pg.dstFailure[portNum])[arr[state][ch]] = failure; (pg.dstOutput[portNum])[arr[state][ch]] += (pg.dstOutput[portNum])[failure]; q.push(arr[state][ch]); } } } //[THINK] Every time we malloc here, we get NEW ADDRESS for each TRIE, CKJUNG pg.dstTrie[portNum] = (int*)malloc(sizeof(int)*Depth*MAXC); for(i = 0; i < Depth; i++) for(j = 0; j < MAXC; j++) pg.dstTrie[portNum][i*MAXC+j] = arr[i][j]; //[THINK] We SHOULDN'T FREE "brr" until the end of the program!!, CKJUNG pg.dstPortMap[portNum] = 1; // Set portMap pg.dstTrieDepth[portNum] = Depth; } // We've read all } ///////////////////////////// CKJUNG, Copy Tries to GPU /////////////////////////////////////// int *tmp_trie[MAX_PORTS]; int *tmp_failure[MAX_PORTS]; int *tmp_output[MAX_PORTS]; for(i = 0; i < MAX_PORTS; i++){ if(pg.dstPortMap[i] == 1){ // If "this port" has a TRIE, // CKJUNG, cudaMemcpy "PortMap" & "Depth" for dst ASSERTRT(cudaMemcpy(&(d_pg->dstPortMap[i]),&(pg.dstPortMap[i]), sizeof(int), cudaMemcpyHostToDevice)); ASSERTRT(cudaMemcpy(&(d_pg->dstTrieDepth[i]),&(pg.dstTrieDepth[i]), sizeof(int), cudaMemcpyHostToDevice)); // CKJUNG, cudaMalloc "Trie" & "Failure" & "Output" for GDDR ASSERTRT(cudaMalloc((void**)&tmp_trie[i], (pg.dstTrieDepth[i])*MAXC*sizeof(int))); ASSERTRT(cudaMalloc((void**)&tmp_failure[i], (pg.dstTrieDepth[i]+1)*sizeof(int))); ASSERTRT(cudaMalloc((void**)&tmp_output[i], (pg.dstTrieDepth[i]+1)*sizeof(int))); // CKJUNG, cudaMemcpy "Trie" & "Failure" & Output" to GDDR ASSERTRT(cudaMemcpy(tmp_trie[i], pg.dstTrie[i], (pg.dstTrieDepth[i])*MAXC*sizeof(int), cudaMemcpyHostToDevice)); ASSERTRT(cudaMemcpy(tmp_failure[i], pg.dstFailure[i], (pg.dstTrieDepth[i]+1)*sizeof(int), cudaMemcpyHostToDevice)); ASSERTRT(cudaMemcpy(tmp_output[i], pg.dstOutput[i], (pg.dstTrieDepth[i]+1)*sizeof(int), cudaMemcpyHostToDevice)); } } ASSERTRT(cudaMemcpy(d_dstTrie, tmp_trie, sizeof(int*)*MAX_PORTS, cudaMemcpyHostToDevice)); ASSERTRT(cudaMemcpy(d_dstFailure, tmp_failure, sizeof(int*)*MAX_PORTS, cudaMemcpyHostToDevice)); ASSERTRT(cudaMemcpy(d_dstOutput, tmp_output, sizeof(int*)*MAX_PORTS, cudaMemcpyHostToDevice)); cudaStream_t cuda_stream4; ASSERT_CUDA(cudaStreamCreateWithFlags(&cuda_stream4,cudaStreamNonBlocking)); START_BLU printf("[NIDS] # of Thread Blocks : %d, # of Threads : %d\n", NF_TB_NUM, NF_T_NUM); END nids<<< NF_TB_NUM, NF_T_NUM, 0, cuda_stream4 >>> (mempool, pkt_cnt, d_dstTrie, d_dstFailure, d_dstOutput, d_pg); START_GRN printf("[Done]____[Initialize]__NF #3__NIDS__\n"); printf("[NIDS] %s\n", cudaGetErrorName(cudaGetLastError())); END // ~ CKJUNG ///////////////////////////////////////////////////////////////////////////// #endif }
4b29663e246ac82d74cb6360e9897a201b4b2460.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------- // CUDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision$ // $Date$ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt // in the root directory of this source distribution. // ------------------------------------------------------------- #include "cudpp_globals.h" #include "cudpp_radixsort.h" #include "scan_cta.cu" #include "cudpp.h" #include <stdio.h> #include "cudpp_util.h" #include <math.h> #include "sharedmem.h" #ifdef __DEVICE_EMULATION__ #define __EMUSYNC __syncthreads() #else #define __EMUSYNC #endif /** * @file * sort_cta.cu * * @brief CUDPP CTA-level sort routines */ /** \addtogroup cudpp_cta * @{ */ /** @name Radix Sort Functions * @{ */ typedef unsigned int uint; /** * @brief Flips bits of single-precision floating-point number (parameterized by doFlip) * * flip a float for sorting * finds SIGN of fp number. * if it's 1 (negative float), it flips all bits * if it's 0 (positive float), it flips the sign only * @param[in] f floating-point input (passed as unsigned int) * @see floatUnflip **/ template <bool doFlip> __device__ uint floatFlip(uint f) { if (doFlip) { uint mask = -int(f >> 31) | 0x80000000; return f ^ mask; } else return f; } /** * @brief Reverses bit-flip of single-precision floating-point number (parameterized by doFlip) * * flip a float back (invert FloatFlip) * signed was flipped from above, so: * if sign is 1 (negative), it flips the sign bit back * if sign is 0 (positive), it flips all bits back * @param[in] f floating-point input (passed as unsigned int) * @see floatFlip **/ template <bool doFlip> __device__ uint floatUnflip(uint f) { if (doFlip) { uint mask = ((f >> 31) - 1) | 0x80000000; return f ^ mask; } else return f; } /** * @brief Scans one warp quickly, optimized for 32-element warps, using shared memory * * Scans each warp in parallel ("warp-scan"), one element per thread. * uses 2 numElements of shared memory per thread (64 numElements per warp) * * @param[in] val Elements per thread to scan * @param[in,out] sData **/ template<class T, int maxlevel> __device__ T scanwarp(T val, volatile T* sData) { // The following is the same as 2 * WARP_SIZE * warpId + threadInWarp = // 64*(threadIdx.x >> 5) + (threadIdx.x & (WARP_SIZE - 1)) int idx = 2 * threadIdx.x - (threadIdx.x & (WARP_SIZE - 1)); sData[idx] = 0; idx += WARP_SIZE; T t = sData[idx] = val; __EMUSYNC; #ifdef __DEVICE_EMULATION__ t = sData[idx - 1]; __EMUSYNC; sData[idx] += t; __EMUSYNC; t = sData[idx - 2]; __EMUSYNC; sData[idx] += t; __EMUSYNC; t = sData[idx - 4]; __EMUSYNC; sData[idx] += t; __EMUSYNC; t = sData[idx - 8]; __EMUSYNC; sData[idx] += t; __EMUSYNC; t = sData[idx - 16]; __EMUSYNC; sData[idx] += t; __EMUSYNC; #else if (0 <= maxlevel) { sData[idx] = t = t + sData[idx - 1]; } __EMUSYNC; if (1 <= maxlevel) { sData[idx] = t = t + sData[idx - 2]; } __EMUSYNC; if (2 <= maxlevel) { sData[idx] = t = t + sData[idx - 4]; } __EMUSYNC; if (3 <= maxlevel) { sData[idx] = t = t + sData[idx - 8]; } __EMUSYNC; if (4 <= maxlevel) { sData[idx] = t = t + sData[idx -16]; } __EMUSYNC; #endif return sData[idx] - val; // convert inclusive -> exclusive } /** * @brief Scans 4*CTA_SIZE unsigned ints in a block * * scan4 scans 4*CTA_SIZE numElements in a block (4 per * thread), using a warp-scan algorithm * * @param[in] idata 4-vector of integers to scan **/ __device__ uint4 scan4(uint4 idata) { extern __shared__ uint ptr[]; uint idx = threadIdx.x; uint4 val4 = idata; uint sum[3]; sum[0] = val4.x; sum[1] = val4.y + sum[0]; sum[2] = val4.z + sum[1]; uint val = val4.w + sum[2]; val = scanwarp<uint, 4>(val, ptr); __syncthreads(); if ((idx & (WARP_SIZE - 1)) == WARP_SIZE - 1) { ptr[idx >> 5] = val + val4.w + sum[2]; } __syncthreads(); #ifndef __DEVICE_EMULATION__ if (idx < WARP_SIZE) #endif { ptr[idx] = scanwarp<uint, 2>(ptr[idx], ptr); } __syncthreads(); val += ptr[idx >> 5]; val4.x = val; val4.y = val + sum[0]; val4.z = val + sum[1]; val4.w = val + sum[2]; return val4; } /** * @brief Computes output position for each thread given predicate; trues come first then falses * * Rank is the core of the radix sort loop. Given a predicate, it * computes the output position for each thread in an ordering where all * True threads come first, followed by all False threads. * This version handles 4 predicates per thread; hence, "rank4". * * @param[in] preds true/false values for each of the 4 elements in this thread * * @todo is the description of "preds" correct? **/ template <int ctasize> __device__ uint4 rank4(uint4 preds) { uint4 address = scan4(preds); __shared__ uint numtrue; if (threadIdx.x == ctasize-1) { numtrue = address.w + preds.w; } __syncthreads(); uint4 rank; uint idx = threadIdx.x << 2; rank.x = (preds.x) ? address.x : numtrue + idx - address.x; rank.y = (preds.y) ? address.y : numtrue + idx + 1 - address.y; rank.z = (preds.z) ? address.z : numtrue + idx + 2 - address.z; rank.w = (preds.w) ? address.w : numtrue + idx + 3 - address.w; return rank; } /** * @brief Sorts one block * * Uses rank to sort one bit at a time: Sorts a block according * to bits startbit -> nbits + startbit * @param[in,out] key * @param[in,out] value **/ template<uint nbits, uint startbit> __device__ void radixSortBlock(uint4 &key, uint4 &value) { extern __shared__ uint sMem1[]; for(uint shift = startbit; shift < (startbit + nbits); ++shift) { uint4 lsb; lsb.x = !((key.x >> shift) & 0x1); lsb.y = !((key.y >> shift) & 0x1); lsb.z = !((key.z >> shift) & 0x1); lsb.w = !((key.w >> shift) & 0x1); uint4 r = rank4<256>(lsb); #if 1 // This arithmetic strides the ranks across 4 SORT_CTA_SIZE regions sMem1[(r.x & 3) * SORT_CTA_SIZE + (r.x >> 2)] = key.x; sMem1[(r.y & 3) * SORT_CTA_SIZE + (r.y >> 2)] = key.y; sMem1[(r.z & 3) * SORT_CTA_SIZE + (r.z >> 2)] = key.z; sMem1[(r.w & 3) * SORT_CTA_SIZE + (r.w >> 2)] = key.w; __syncthreads(); // The above allows us to read without 4-way bank conflicts: key.x = sMem1[threadIdx.x]; key.y = sMem1[threadIdx.x + SORT_CTA_SIZE]; key.z = sMem1[threadIdx.x + 2 * SORT_CTA_SIZE]; key.w = sMem1[threadIdx.x + 3 * SORT_CTA_SIZE]; __syncthreads(); sMem1[(r.x & 3) * SORT_CTA_SIZE + (r.x >> 2)] = value.x; sMem1[(r.y & 3) * SORT_CTA_SIZE + (r.y >> 2)] = value.y; sMem1[(r.z & 3) * SORT_CTA_SIZE + (r.z >> 2)] = value.z; sMem1[(r.w & 3) * SORT_CTA_SIZE + (r.w >> 2)] = value.w; __syncthreads(); value.x = sMem1[threadIdx.x]; value.y = sMem1[threadIdx.x + SORT_CTA_SIZE]; value.z = sMem1[threadIdx.x + 2 * SORT_CTA_SIZE]; value.w = sMem1[threadIdx.x + 3 * SORT_CTA_SIZE]; #else sMem1[r.x] = key.x; sMem1[r.y] = key.y; sMem1[r.z] = key.z; sMem1[r.w] = key.w; __syncthreads(); // This access has 4-way bank conflicts key = sMem[threadIdx.x]; __syncthreads(); sMem1[r.x] = value.x; sMem1[r.y] = value.y; sMem1[r.z] = value.z; sMem1[r.w] = value.w; __syncthreads(); value = sMem[threadIdx.x]; #endif __syncthreads(); } } /** * @brief Sorts one block. Key-only version. * * Uses rank to sort one bit at a time: Sorts a block according * to bits startbit -> nbits + startbit * @param[in,out] key **/ template<uint nbits, uint startbit> __device__ void radixSortBlockKeysOnly(uint4 &key) { extern __shared__ uint sMem1[]; for(uint shift = startbit; shift < (startbit + nbits); ++shift) { uint4 lsb; lsb.x = !((key.x >> shift) & 0x1); lsb.y = !((key.y >> shift) & 0x1); lsb.z = !((key.z >> shift) & 0x1); lsb.w = !((key.w >> shift) & 0x1); uint4 r = rank4<256>(lsb); #if 1 // This arithmetic strides the ranks across 4 CTA_SIZE regions sMem1[(r.x & 3) * SORT_CTA_SIZE + (r.x >> 2)] = key.x; sMem1[(r.y & 3) * SORT_CTA_SIZE + (r.y >> 2)] = key.y; sMem1[(r.z & 3) * SORT_CTA_SIZE + (r.z >> 2)] = key.z; sMem1[(r.w & 3) * SORT_CTA_SIZE + (r.w >> 2)] = key.w; __syncthreads(); // The above allows us to read without 4-way bank conflicts: key.x = sMem1[threadIdx.x]; key.y = sMem1[threadIdx.x + SORT_CTA_SIZE]; key.z = sMem1[threadIdx.x + 2 * SORT_CTA_SIZE]; key.w = sMem1[threadIdx.x + 3 * SORT_CTA_SIZE]; #else sMem1[r.x] = key.x; sMem1[r.y] = key.y; sMem1[r.z] = key.z; sMem1[r.w] = key.w; __syncthreads(); // This access has 4-way bank conflicts key = sMem[threadIdx.x]; #endif __syncthreads(); } } /** @} */ // end radix sort functions /** @} */ // end cudpp_cta
4b29663e246ac82d74cb6360e9897a201b4b2460.cu
// ------------------------------------------------------------- // CUDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision$ // $Date$ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt // in the root directory of this source distribution. // ------------------------------------------------------------- #include "cudpp_globals.h" #include "cudpp_radixsort.h" #include "scan_cta.cu" #include "cudpp.h" #include <stdio.h> #include "cudpp_util.h" #include <math.h> #include "sharedmem.h" #ifdef __DEVICE_EMULATION__ #define __EMUSYNC __syncthreads() #else #define __EMUSYNC #endif /** * @file * sort_cta.cu * * @brief CUDPP CTA-level sort routines */ /** \addtogroup cudpp_cta * @{ */ /** @name Radix Sort Functions * @{ */ typedef unsigned int uint; /** * @brief Flips bits of single-precision floating-point number (parameterized by doFlip) * * flip a float for sorting * finds SIGN of fp number. * if it's 1 (negative float), it flips all bits * if it's 0 (positive float), it flips the sign only * @param[in] f floating-point input (passed as unsigned int) * @see floatUnflip **/ template <bool doFlip> __device__ uint floatFlip(uint f) { if (doFlip) { uint mask = -int(f >> 31) | 0x80000000; return f ^ mask; } else return f; } /** * @brief Reverses bit-flip of single-precision floating-point number (parameterized by doFlip) * * flip a float back (invert FloatFlip) * signed was flipped from above, so: * if sign is 1 (negative), it flips the sign bit back * if sign is 0 (positive), it flips all bits back * @param[in] f floating-point input (passed as unsigned int) * @see floatFlip **/ template <bool doFlip> __device__ uint floatUnflip(uint f) { if (doFlip) { uint mask = ((f >> 31) - 1) | 0x80000000; return f ^ mask; } else return f; } /** * @brief Scans one warp quickly, optimized for 32-element warps, using shared memory * * Scans each warp in parallel ("warp-scan"), one element per thread. * uses 2 numElements of shared memory per thread (64 numElements per warp) * * @param[in] val Elements per thread to scan * @param[in,out] sData **/ template<class T, int maxlevel> __device__ T scanwarp(T val, volatile T* sData) { // The following is the same as 2 * WARP_SIZE * warpId + threadInWarp = // 64*(threadIdx.x >> 5) + (threadIdx.x & (WARP_SIZE - 1)) int idx = 2 * threadIdx.x - (threadIdx.x & (WARP_SIZE - 1)); sData[idx] = 0; idx += WARP_SIZE; T t = sData[idx] = val; __EMUSYNC; #ifdef __DEVICE_EMULATION__ t = sData[idx - 1]; __EMUSYNC; sData[idx] += t; __EMUSYNC; t = sData[idx - 2]; __EMUSYNC; sData[idx] += t; __EMUSYNC; t = sData[idx - 4]; __EMUSYNC; sData[idx] += t; __EMUSYNC; t = sData[idx - 8]; __EMUSYNC; sData[idx] += t; __EMUSYNC; t = sData[idx - 16]; __EMUSYNC; sData[idx] += t; __EMUSYNC; #else if (0 <= maxlevel) { sData[idx] = t = t + sData[idx - 1]; } __EMUSYNC; if (1 <= maxlevel) { sData[idx] = t = t + sData[idx - 2]; } __EMUSYNC; if (2 <= maxlevel) { sData[idx] = t = t + sData[idx - 4]; } __EMUSYNC; if (3 <= maxlevel) { sData[idx] = t = t + sData[idx - 8]; } __EMUSYNC; if (4 <= maxlevel) { sData[idx] = t = t + sData[idx -16]; } __EMUSYNC; #endif return sData[idx] - val; // convert inclusive -> exclusive } /** * @brief Scans 4*CTA_SIZE unsigned ints in a block * * scan4 scans 4*CTA_SIZE numElements in a block (4 per * thread), using a warp-scan algorithm * * @param[in] idata 4-vector of integers to scan **/ __device__ uint4 scan4(uint4 idata) { extern __shared__ uint ptr[]; uint idx = threadIdx.x; uint4 val4 = idata; uint sum[3]; sum[0] = val4.x; sum[1] = val4.y + sum[0]; sum[2] = val4.z + sum[1]; uint val = val4.w + sum[2]; val = scanwarp<uint, 4>(val, ptr); __syncthreads(); if ((idx & (WARP_SIZE - 1)) == WARP_SIZE - 1) { ptr[idx >> 5] = val + val4.w + sum[2]; } __syncthreads(); #ifndef __DEVICE_EMULATION__ if (idx < WARP_SIZE) #endif { ptr[idx] = scanwarp<uint, 2>(ptr[idx], ptr); } __syncthreads(); val += ptr[idx >> 5]; val4.x = val; val4.y = val + sum[0]; val4.z = val + sum[1]; val4.w = val + sum[2]; return val4; } /** * @brief Computes output position for each thread given predicate; trues come first then falses * * Rank is the core of the radix sort loop. Given a predicate, it * computes the output position for each thread in an ordering where all * True threads come first, followed by all False threads. * This version handles 4 predicates per thread; hence, "rank4". * * @param[in] preds true/false values for each of the 4 elements in this thread * * @todo is the description of "preds" correct? **/ template <int ctasize> __device__ uint4 rank4(uint4 preds) { uint4 address = scan4(preds); __shared__ uint numtrue; if (threadIdx.x == ctasize-1) { numtrue = address.w + preds.w; } __syncthreads(); uint4 rank; uint idx = threadIdx.x << 2; rank.x = (preds.x) ? address.x : numtrue + idx - address.x; rank.y = (preds.y) ? address.y : numtrue + idx + 1 - address.y; rank.z = (preds.z) ? address.z : numtrue + idx + 2 - address.z; rank.w = (preds.w) ? address.w : numtrue + idx + 3 - address.w; return rank; } /** * @brief Sorts one block * * Uses rank to sort one bit at a time: Sorts a block according * to bits startbit -> nbits + startbit * @param[in,out] key * @param[in,out] value **/ template<uint nbits, uint startbit> __device__ void radixSortBlock(uint4 &key, uint4 &value) { extern __shared__ uint sMem1[]; for(uint shift = startbit; shift < (startbit + nbits); ++shift) { uint4 lsb; lsb.x = !((key.x >> shift) & 0x1); lsb.y = !((key.y >> shift) & 0x1); lsb.z = !((key.z >> shift) & 0x1); lsb.w = !((key.w >> shift) & 0x1); uint4 r = rank4<256>(lsb); #if 1 // This arithmetic strides the ranks across 4 SORT_CTA_SIZE regions sMem1[(r.x & 3) * SORT_CTA_SIZE + (r.x >> 2)] = key.x; sMem1[(r.y & 3) * SORT_CTA_SIZE + (r.y >> 2)] = key.y; sMem1[(r.z & 3) * SORT_CTA_SIZE + (r.z >> 2)] = key.z; sMem1[(r.w & 3) * SORT_CTA_SIZE + (r.w >> 2)] = key.w; __syncthreads(); // The above allows us to read without 4-way bank conflicts: key.x = sMem1[threadIdx.x]; key.y = sMem1[threadIdx.x + SORT_CTA_SIZE]; key.z = sMem1[threadIdx.x + 2 * SORT_CTA_SIZE]; key.w = sMem1[threadIdx.x + 3 * SORT_CTA_SIZE]; __syncthreads(); sMem1[(r.x & 3) * SORT_CTA_SIZE + (r.x >> 2)] = value.x; sMem1[(r.y & 3) * SORT_CTA_SIZE + (r.y >> 2)] = value.y; sMem1[(r.z & 3) * SORT_CTA_SIZE + (r.z >> 2)] = value.z; sMem1[(r.w & 3) * SORT_CTA_SIZE + (r.w >> 2)] = value.w; __syncthreads(); value.x = sMem1[threadIdx.x]; value.y = sMem1[threadIdx.x + SORT_CTA_SIZE]; value.z = sMem1[threadIdx.x + 2 * SORT_CTA_SIZE]; value.w = sMem1[threadIdx.x + 3 * SORT_CTA_SIZE]; #else sMem1[r.x] = key.x; sMem1[r.y] = key.y; sMem1[r.z] = key.z; sMem1[r.w] = key.w; __syncthreads(); // This access has 4-way bank conflicts key = sMem[threadIdx.x]; __syncthreads(); sMem1[r.x] = value.x; sMem1[r.y] = value.y; sMem1[r.z] = value.z; sMem1[r.w] = value.w; __syncthreads(); value = sMem[threadIdx.x]; #endif __syncthreads(); } } /** * @brief Sorts one block. Key-only version. * * Uses rank to sort one bit at a time: Sorts a block according * to bits startbit -> nbits + startbit * @param[in,out] key **/ template<uint nbits, uint startbit> __device__ void radixSortBlockKeysOnly(uint4 &key) { extern __shared__ uint sMem1[]; for(uint shift = startbit; shift < (startbit + nbits); ++shift) { uint4 lsb; lsb.x = !((key.x >> shift) & 0x1); lsb.y = !((key.y >> shift) & 0x1); lsb.z = !((key.z >> shift) & 0x1); lsb.w = !((key.w >> shift) & 0x1); uint4 r = rank4<256>(lsb); #if 1 // This arithmetic strides the ranks across 4 CTA_SIZE regions sMem1[(r.x & 3) * SORT_CTA_SIZE + (r.x >> 2)] = key.x; sMem1[(r.y & 3) * SORT_CTA_SIZE + (r.y >> 2)] = key.y; sMem1[(r.z & 3) * SORT_CTA_SIZE + (r.z >> 2)] = key.z; sMem1[(r.w & 3) * SORT_CTA_SIZE + (r.w >> 2)] = key.w; __syncthreads(); // The above allows us to read without 4-way bank conflicts: key.x = sMem1[threadIdx.x]; key.y = sMem1[threadIdx.x + SORT_CTA_SIZE]; key.z = sMem1[threadIdx.x + 2 * SORT_CTA_SIZE]; key.w = sMem1[threadIdx.x + 3 * SORT_CTA_SIZE]; #else sMem1[r.x] = key.x; sMem1[r.y] = key.y; sMem1[r.z] = key.z; sMem1[r.w] = key.w; __syncthreads(); // This access has 4-way bank conflicts key = sMem[threadIdx.x]; #endif __syncthreads(); } } /** @} */ // end radix sort functions /** @} */ // end cudpp_cta
80f315906183cb839d65d5c6737a6cd55ac8264d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdio.h" #include "stdint.h" #include "vector" #include "cuckoo_1h1p.h" #include "cuda_util.h" #include <chrono> #include <thread> namespace Cuckoo1h1p { unsigned ComputeMaxIterations(const unsigned n, const unsigned table_size, const unsigned num_functions) { float lg_input_size = (float)(log((double)n) / log(2.0)); // #define CONSTANT_ITERATIONS //#ifdef CONSTANT_ITERATIONS // // Set the maximum number of iterations to 7lg(N). // const unsigned MAX_ITERATION_CONSTANT = 7; // unsigned max_iterations = MAX_ITERATION_CONSTANT * lg_input_size; //#else // Use an empirical formula for determining what the maximum number of // iterations should be. Works OK in most situations. float load_factor = float(n) / table_size; float ln_load_factor = (float)(log(load_factor) / log(2.71828183)); unsigned max_iterations = (unsigned)(4.0 * ceil(-1.0 / (0.028255 + 1.1594772 * ln_load_factor) * lg_input_size)); //#endif return max_iterations; } //! Makes an 64-bit Entry out of a key-value pair for the hash table. inline __device__ __host__ KeyValue make_entry(unsigned key, unsigned value) { return (KeyValue(key) << 32) + value; } //! Returns the key of an Entry. inline __device__ __host__ unsigned get_key(KeyValue entry) { return (unsigned)(entry >> 32); } //! Returns the value of an Entry. inline __device__ __host__ unsigned get_value(KeyValue entry) { return (unsigned)(entry & 0xffffffff); } inline __device__ __host__ unsigned stash_hash_function(const unsigned key) { //TODO:: might need to change type sig return (2720648079 ^ key + 13) % kStashSize; } // 32 bit Murmur3 hash __device__ uint hash(uint k, uint capacity) { k ^= k >> 16; k *= 0x85ebca6b; k ^= k >> 13; k *= 0xc2b2ae35; k ^= k >> 16; return k & (capacity - 1); } //! Determine where to insert the key next. The hash functions are used in round-robin order. __device__ unsigned determine_next_location(const unsigned table_size, const unsigned key, const unsigned previous_location) { uint next_location = hash(key, table_size); if (next_location == previous_location) { return (next_location + 1) & (table_size - 1); } return next_location; } // Create a hash table. For linear probing, this is just an array of KeyValues KeyValue *create_hashtable(uint capacity, uint** stash_count) { // Allocate memory KeyValue *hashtable; hipMalloc(&hashtable, sizeof(KeyValue) * (capacity + kStashSize)); // Initialize hash table to empty static_assert(kEmpty == 0xffffffff, "memset expected kEmpty=0xffffffff"); hipMemset(hashtable, 0xff, sizeof(KeyValue) * (capacity + kStashSize)); CUDA_SAFE_CALL(hipMalloc((void**)stash_count, sizeof(uint))); CUDA_SAFE_CALL(hipMemset(*stash_count, 0, sizeof(uint))); //printf("Hash table created successfully"); //std::chrono::seconds dura(5); //std::this_thread::sleep_for(dura); return hashtable; } // Insert the key/values in kvs into the hashtable __global__ void gpu_hashtable_insert(KeyValue *hashtable, uint capacity, uint max_iteration_attempts, const KeyValue *kvs, unsigned int numkvs, uint *stash_count, uint *fail_count) { unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x; if (threadid < numkvs) { KeyValue entry = kvs[threadid]; unsigned key = get_key(entry); unsigned prev_key = key; // The key is always inserted into its first slot at the start. uint location = hash(key, capacity); //printf("%u\n", max_iteration_attempts); // Keep inserting until an empty slot is found or the eviction chain grows too large. for (unsigned its = 1; its <= max_iteration_attempts; its++) { // Insert the new entry. prev_key = key; entry = atomicExch(&hashtable[location], entry); key = get_key(entry); // If no key was evicted or this key is already present, we're done. if (key == kEmpty || prev_key == key) { // *iterations_used = its; return; } // Otherwise, determine where the evicted key will go. location = determine_next_location(capacity, key, location); } //printf("failed insert with key %u whose prev_key is %u \n", key, prev_key); if (key != kEmpty) { //printf("failed insert will stash now after max_iter = %u \n", max_iteration_attempts); // Shove it into the stash. uint slot = stash_hash_function(key); KeyValue *stash = hashtable + capacity; KeyValue replaced_entry = atomicCAS((stash + slot), kvEmpty, entry); if (replaced_entry == kvEmpty) { atomicAdd(stash_count, 1); } else if (get_key(replaced_entry) != key) { atomicAdd(fail_count, 1); } } } } void insert_hashtable(KeyValue *pHashTable, Logger* logger, uint capacity, uint max_iteration_attempts, const KeyValue *kvs, uint num_kvs, uint *d_stash_count) { // Copy the keyvalues to the GPU KeyValue *device_kvs; CUDA_SAFE_CALL(hipMalloc(&device_kvs, sizeof(KeyValue) * num_kvs)); hipMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, hipMemcpyHostToDevice); // Have CUDA calculate the thread block size int mingridsize; int threadblocksize; hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0); // Create events for GPU timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // unsigned* d_stash_count = NULL; // CUDA_SAFE_CALL(hipMalloc((void**)&d_stash_count, sizeof(uint))); // CUDA_SAFE_CALL(hipMemset(d_stash_count, 0, sizeof(uint))); unsigned* d_fail_count = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_fail_count, sizeof(uint))); CUDA_SAFE_CALL(hipMemset(d_fail_count, 0, sizeof(uint))); // Insert all the keys into the hash table int gridsize = ((uint) num_kvs + threadblocksize - 1) / threadblocksize; hipLaunchKernelGGL(( gpu_hashtable_insert) , dim3(gridsize), dim3(threadblocksize), 0, 0, pHashTable, capacity, max_iteration_attempts, device_kvs, (uint) num_kvs, d_stash_count, d_fail_count); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); float seconds = milliseconds / 1000.0f; printf(" GPU inserted %d items in %f ms (%f million keys/second) \n", num_kvs, milliseconds, num_kvs / (double) seconds / 1000000.0f); // Copy out the stash size. uint stash_count; CUDA_SAFE_CALL(hipMemcpy(&stash_count, d_stash_count, sizeof(unsigned), hipMemcpyDeviceToHost)); if (stash_count != 0) { printf(" stash count is %u\n", stash_count); } // Copy out the stash size. uint fail_count; CUDA_SAFE_CALL(hipMemcpy(&fail_count, d_fail_count, sizeof(unsigned), hipMemcpyDeviceToHost)); if (fail_count != 0) { printf(" fail count is %u\n", fail_count); } logger->logInsert(capacity, num_kvs * 1.0 / capacity, num_kvs, milliseconds, max_iteration_attempts, stash_count, fail_count); hipFree(d_fail_count); hipFree(device_kvs); } // Lookup keys in the hashtable, and return the values __global__ void gpu_hashtable_lookup(KeyValue *hashtable, uint capacity, KeyValue *kvs, unsigned int numkvs, uint* stash_count) { unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x; if (threadid < numkvs) { uint key = get_key(kvs[threadid]); uint hash_val = hash(key, capacity); KeyValue slot0 = hashtable[hash_val]; if (get_key(slot0) == key) { kvs[threadid] = slot0; return; } KeyValue slot1 = hashtable[(hash_val + 1) & (capacity - 1)]; if (get_key(slot1) == key) { kvs[threadid] = slot1; return; } if (*stash_count) { uint slot = stash_hash_function(key); KeyValue *stash = hashtable + capacity; KeyValue entry = stash[slot]; if (get_key(entry) == key) { kvs[threadid] = entry; return; } } kvs[threadid] = make_entry(key, kEmpty); } } void lookup_hashtable(KeyValue *pHashTable, Logger* logger, uint capacity, KeyValue *kvs, uint num_kvs, uint* stash_count) { // Copy the keyvalues to the GPU KeyValue *device_kvs; hipMalloc(&device_kvs, sizeof(KeyValue) * num_kvs); hipMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, hipMemcpyHostToDevice); // Have CUDA calculate the thread block size int mingridsize; int threadblocksize; hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0); // Create events for GPU timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // Insert all the keys into the hash table int gridsize = ((uint) num_kvs + threadblocksize - 1) / threadblocksize; hipLaunchKernelGGL(( gpu_hashtable_lookup) , dim3(gridsize), dim3(threadblocksize) , 0, 0, pHashTable, capacity, device_kvs, (uint) num_kvs, stash_count); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); float seconds = milliseconds / 1000.0f; printf(" GPU lookup %d items in %f ms (%f million keys/second)\n", num_kvs, milliseconds, num_kvs / (double) seconds / 1000000.0f); logger->logLookup(num_kvs, milliseconds); hipFree(device_kvs); } // Delete each key in kvs from the hash table, if the key exists // A deleted key is left in the hash table, but its value is set to kEmpty // Deleted keys are not reused; once a key is assigned a slot, it never moves __global__ void gpu_hashtable_delete(KeyValue *hashtable, uint capacity, const KeyValue *kvs, unsigned int numkvs, uint* stash_count) { unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x; if (threadid < numkvs) { uint key = get_key(kvs[threadid]); // TODO fix!!! uint hash_val = hash(key, capacity); KeyValue slot0 = hashtable[hash_val]; if (get_key(slot0) == key) { hashtable[hash_val] = kvEmpty; return; } hash_val = (hash_val + 1) & (capacity - 1); KeyValue slot1 = hashtable[hash_val]; if (get_key(slot1) == key) { hashtable[hash_val] = kvEmpty; return; } if (*stash_count) { uint slot = stash_hash_function(key); KeyValue *stash = hashtable + capacity; KeyValue entry = stash[slot]; if (get_key(entry) == key) { stash[slot] = kvEmpty; return; } } } } void delete_hashtable(KeyValue *pHashTable, Logger* logger, uint capacity, const KeyValue *kvs, uint num_kvs, uint* stash_count) { // Copy the keyvalues to the GPU KeyValue *device_kvs; hipMalloc(&device_kvs, sizeof(KeyValue) * num_kvs); hipMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, hipMemcpyHostToDevice); // Have CUDA calculate the thread block size int mingridsize; int threadblocksize; hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0); // Create events for GPU timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // Insert all the keys into the hash table int gridsize = ((uint) num_kvs + threadblocksize - 1) / threadblocksize; hipLaunchKernelGGL(( gpu_hashtable_delete) , dim3(gridsize), dim3(threadblocksize) , 0, 0, pHashTable, capacity, device_kvs, (uint) num_kvs, stash_count); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); float seconds = milliseconds / 1000.0f; printf(" GPU delete %d items in %f ms (%f million keys/second)\n", num_kvs, milliseconds, num_kvs / (double) seconds / 1000000.0f); logger->logDelete(num_kvs, milliseconds); hipFree(device_kvs); } // Iterate over every item in the hashtable; return non-empty key/values __global__ void gpu_iterate_hashtable(KeyValue *pHashTable, uint capacity, KeyValue *kvs, uint *kvs_size) { unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x; if (threadid < capacity) { if (get_key(pHashTable[threadid]) != kEmpty) { uint value = get_value(pHashTable[threadid]); if (value != kEmpty) { uint size = atomicAdd(kvs_size, 1); kvs[size] = pHashTable[threadid]; } } } } std::vector <KeyValue> iterate_hashtable(KeyValue *pHashTable, uint capacity) { uint *device_num_kvs; hipMalloc(&device_num_kvs, sizeof(uint)); hipMemset(device_num_kvs, 0, sizeof(uint)); KeyValue *device_kvs; hipMalloc(&device_kvs, sizeof(KeyValue) * kNumKeyValues); int mingridsize; int threadblocksize; hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_iterate_hashtable, 0, 0); int gridsize = (kHashTableCapacity + threadblocksize - 1) / threadblocksize; hipLaunchKernelGGL(( gpu_iterate_hashtable) , dim3(gridsize), dim3(threadblocksize) , 0, 0, pHashTable, capacity, device_kvs, device_num_kvs); uint num_kvs; hipMemcpy(&num_kvs, device_num_kvs, sizeof(uint), hipMemcpyDeviceToHost); std::vector <KeyValue> kvs; kvs.resize(num_kvs); hipMemcpy(kvs.data(), device_kvs, sizeof(KeyValue) * num_kvs, hipMemcpyDeviceToHost); hipFree(device_kvs); hipFree(device_num_kvs); return kvs; } // Free the memory of the hashtable void destroy_hashtable(KeyValue *pHashTable) { hipFree(pHashTable); } }
80f315906183cb839d65d5c6737a6cd55ac8264d.cu
#include "stdio.h" #include "stdint.h" #include "vector" #include "cuckoo_1h1p.h" #include "cuda_util.h" #include <chrono> #include <thread> namespace Cuckoo1h1p { unsigned ComputeMaxIterations(const unsigned n, const unsigned table_size, const unsigned num_functions) { float lg_input_size = (float)(log((double)n) / log(2.0)); // #define CONSTANT_ITERATIONS //#ifdef CONSTANT_ITERATIONS // // Set the maximum number of iterations to 7lg(N). // const unsigned MAX_ITERATION_CONSTANT = 7; // unsigned max_iterations = MAX_ITERATION_CONSTANT * lg_input_size; //#else // Use an empirical formula for determining what the maximum number of // iterations should be. Works OK in most situations. float load_factor = float(n) / table_size; float ln_load_factor = (float)(log(load_factor) / log(2.71828183)); unsigned max_iterations = (unsigned)(4.0 * ceil(-1.0 / (0.028255 + 1.1594772 * ln_load_factor) * lg_input_size)); //#endif return max_iterations; } //! Makes an 64-bit Entry out of a key-value pair for the hash table. inline __device__ __host__ KeyValue make_entry(unsigned key, unsigned value) { return (KeyValue(key) << 32) + value; } //! Returns the key of an Entry. inline __device__ __host__ unsigned get_key(KeyValue entry) { return (unsigned)(entry >> 32); } //! Returns the value of an Entry. inline __device__ __host__ unsigned get_value(KeyValue entry) { return (unsigned)(entry & 0xffffffff); } inline __device__ __host__ unsigned stash_hash_function(const unsigned key) { //TODO:: might need to change type sig return (2720648079 ^ key + 13) % kStashSize; } // 32 bit Murmur3 hash __device__ uint hash(uint k, uint capacity) { k ^= k >> 16; k *= 0x85ebca6b; k ^= k >> 13; k *= 0xc2b2ae35; k ^= k >> 16; return k & (capacity - 1); } //! Determine where to insert the key next. The hash functions are used in round-robin order. __device__ unsigned determine_next_location(const unsigned table_size, const unsigned key, const unsigned previous_location) { uint next_location = hash(key, table_size); if (next_location == previous_location) { return (next_location + 1) & (table_size - 1); } return next_location; } // Create a hash table. For linear probing, this is just an array of KeyValues KeyValue *create_hashtable(uint capacity, uint** stash_count) { // Allocate memory KeyValue *hashtable; cudaMalloc(&hashtable, sizeof(KeyValue) * (capacity + kStashSize)); // Initialize hash table to empty static_assert(kEmpty == 0xffffffff, "memset expected kEmpty=0xffffffff"); cudaMemset(hashtable, 0xff, sizeof(KeyValue) * (capacity + kStashSize)); CUDA_SAFE_CALL(cudaMalloc((void**)stash_count, sizeof(uint))); CUDA_SAFE_CALL(cudaMemset(*stash_count, 0, sizeof(uint))); //printf("Hash table created successfully"); //std::chrono::seconds dura(5); //std::this_thread::sleep_for(dura); return hashtable; } // Insert the key/values in kvs into the hashtable __global__ void gpu_hashtable_insert(KeyValue *hashtable, uint capacity, uint max_iteration_attempts, const KeyValue *kvs, unsigned int numkvs, uint *stash_count, uint *fail_count) { unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x; if (threadid < numkvs) { KeyValue entry = kvs[threadid]; unsigned key = get_key(entry); unsigned prev_key = key; // The key is always inserted into its first slot at the start. uint location = hash(key, capacity); //printf("%u\n", max_iteration_attempts); // Keep inserting until an empty slot is found or the eviction chain grows too large. for (unsigned its = 1; its <= max_iteration_attempts; its++) { // Insert the new entry. prev_key = key; entry = atomicExch(&hashtable[location], entry); key = get_key(entry); // If no key was evicted or this key is already present, we're done. if (key == kEmpty || prev_key == key) { // *iterations_used = its; return; } // Otherwise, determine where the evicted key will go. location = determine_next_location(capacity, key, location); } //printf("failed insert with key %u whose prev_key is %u \n", key, prev_key); if (key != kEmpty) { //printf("failed insert will stash now after max_iter = %u \n", max_iteration_attempts); // Shove it into the stash. uint slot = stash_hash_function(key); KeyValue *stash = hashtable + capacity; KeyValue replaced_entry = atomicCAS((stash + slot), kvEmpty, entry); if (replaced_entry == kvEmpty) { atomicAdd(stash_count, 1); } else if (get_key(replaced_entry) != key) { atomicAdd(fail_count, 1); } } } } void insert_hashtable(KeyValue *pHashTable, Logger* logger, uint capacity, uint max_iteration_attempts, const KeyValue *kvs, uint num_kvs, uint *d_stash_count) { // Copy the keyvalues to the GPU KeyValue *device_kvs; CUDA_SAFE_CALL(cudaMalloc(&device_kvs, sizeof(KeyValue) * num_kvs)); cudaMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, cudaMemcpyHostToDevice); // Have CUDA calculate the thread block size int mingridsize; int threadblocksize; cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0); // Create events for GPU timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // unsigned* d_stash_count = NULL; // CUDA_SAFE_CALL(cudaMalloc((void**)&d_stash_count, sizeof(uint))); // CUDA_SAFE_CALL(cudaMemset(d_stash_count, 0, sizeof(uint))); unsigned* d_fail_count = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_fail_count, sizeof(uint))); CUDA_SAFE_CALL(cudaMemset(d_fail_count, 0, sizeof(uint))); // Insert all the keys into the hash table int gridsize = ((uint) num_kvs + threadblocksize - 1) / threadblocksize; gpu_hashtable_insert <<<gridsize, threadblocksize>>>(pHashTable, capacity, max_iteration_attempts, device_kvs, (uint) num_kvs, d_stash_count, d_fail_count); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float seconds = milliseconds / 1000.0f; printf(" GPU inserted %d items in %f ms (%f million keys/second) \n", num_kvs, milliseconds, num_kvs / (double) seconds / 1000000.0f); // Copy out the stash size. uint stash_count; CUDA_SAFE_CALL(cudaMemcpy(&stash_count, d_stash_count, sizeof(unsigned), cudaMemcpyDeviceToHost)); if (stash_count != 0) { printf(" stash count is %u\n", stash_count); } // Copy out the stash size. uint fail_count; CUDA_SAFE_CALL(cudaMemcpy(&fail_count, d_fail_count, sizeof(unsigned), cudaMemcpyDeviceToHost)); if (fail_count != 0) { printf(" fail count is %u\n", fail_count); } logger->logInsert(capacity, num_kvs * 1.0 / capacity, num_kvs, milliseconds, max_iteration_attempts, stash_count, fail_count); cudaFree(d_fail_count); cudaFree(device_kvs); } // Lookup keys in the hashtable, and return the values __global__ void gpu_hashtable_lookup(KeyValue *hashtable, uint capacity, KeyValue *kvs, unsigned int numkvs, uint* stash_count) { unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x; if (threadid < numkvs) { uint key = get_key(kvs[threadid]); uint hash_val = hash(key, capacity); KeyValue slot0 = hashtable[hash_val]; if (get_key(slot0) == key) { kvs[threadid] = slot0; return; } KeyValue slot1 = hashtable[(hash_val + 1) & (capacity - 1)]; if (get_key(slot1) == key) { kvs[threadid] = slot1; return; } if (*stash_count) { uint slot = stash_hash_function(key); KeyValue *stash = hashtable + capacity; KeyValue entry = stash[slot]; if (get_key(entry) == key) { kvs[threadid] = entry; return; } } kvs[threadid] = make_entry(key, kEmpty); } } void lookup_hashtable(KeyValue *pHashTable, Logger* logger, uint capacity, KeyValue *kvs, uint num_kvs, uint* stash_count) { // Copy the keyvalues to the GPU KeyValue *device_kvs; cudaMalloc(&device_kvs, sizeof(KeyValue) * num_kvs); cudaMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, cudaMemcpyHostToDevice); // Have CUDA calculate the thread block size int mingridsize; int threadblocksize; cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0); // Create events for GPU timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // Insert all the keys into the hash table int gridsize = ((uint) num_kvs + threadblocksize - 1) / threadblocksize; gpu_hashtable_lookup <<< gridsize, threadblocksize >>> (pHashTable, capacity, device_kvs, (uint) num_kvs, stash_count); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float seconds = milliseconds / 1000.0f; printf(" GPU lookup %d items in %f ms (%f million keys/second)\n", num_kvs, milliseconds, num_kvs / (double) seconds / 1000000.0f); logger->logLookup(num_kvs, milliseconds); cudaFree(device_kvs); } // Delete each key in kvs from the hash table, if the key exists // A deleted key is left in the hash table, but its value is set to kEmpty // Deleted keys are not reused; once a key is assigned a slot, it never moves __global__ void gpu_hashtable_delete(KeyValue *hashtable, uint capacity, const KeyValue *kvs, unsigned int numkvs, uint* stash_count) { unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x; if (threadid < numkvs) { uint key = get_key(kvs[threadid]); // TODO fix!!! uint hash_val = hash(key, capacity); KeyValue slot0 = hashtable[hash_val]; if (get_key(slot0) == key) { hashtable[hash_val] = kvEmpty; return; } hash_val = (hash_val + 1) & (capacity - 1); KeyValue slot1 = hashtable[hash_val]; if (get_key(slot1) == key) { hashtable[hash_val] = kvEmpty; return; } if (*stash_count) { uint slot = stash_hash_function(key); KeyValue *stash = hashtable + capacity; KeyValue entry = stash[slot]; if (get_key(entry) == key) { stash[slot] = kvEmpty; return; } } } } void delete_hashtable(KeyValue *pHashTable, Logger* logger, uint capacity, const KeyValue *kvs, uint num_kvs, uint* stash_count) { // Copy the keyvalues to the GPU KeyValue *device_kvs; cudaMalloc(&device_kvs, sizeof(KeyValue) * num_kvs); cudaMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, cudaMemcpyHostToDevice); // Have CUDA calculate the thread block size int mingridsize; int threadblocksize; cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0); // Create events for GPU timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // Insert all the keys into the hash table int gridsize = ((uint) num_kvs + threadblocksize - 1) / threadblocksize; gpu_hashtable_delete <<< gridsize, threadblocksize >>> (pHashTable, capacity, device_kvs, (uint) num_kvs, stash_count); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float seconds = milliseconds / 1000.0f; printf(" GPU delete %d items in %f ms (%f million keys/second)\n", num_kvs, milliseconds, num_kvs / (double) seconds / 1000000.0f); logger->logDelete(num_kvs, milliseconds); cudaFree(device_kvs); } // Iterate over every item in the hashtable; return non-empty key/values __global__ void gpu_iterate_hashtable(KeyValue *pHashTable, uint capacity, KeyValue *kvs, uint *kvs_size) { unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x; if (threadid < capacity) { if (get_key(pHashTable[threadid]) != kEmpty) { uint value = get_value(pHashTable[threadid]); if (value != kEmpty) { uint size = atomicAdd(kvs_size, 1); kvs[size] = pHashTable[threadid]; } } } } std::vector <KeyValue> iterate_hashtable(KeyValue *pHashTable, uint capacity) { uint *device_num_kvs; cudaMalloc(&device_num_kvs, sizeof(uint)); cudaMemset(device_num_kvs, 0, sizeof(uint)); KeyValue *device_kvs; cudaMalloc(&device_kvs, sizeof(KeyValue) * kNumKeyValues); int mingridsize; int threadblocksize; cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_iterate_hashtable, 0, 0); int gridsize = (kHashTableCapacity + threadblocksize - 1) / threadblocksize; gpu_iterate_hashtable <<< gridsize, threadblocksize >>> (pHashTable, capacity, device_kvs, device_num_kvs); uint num_kvs; cudaMemcpy(&num_kvs, device_num_kvs, sizeof(uint), cudaMemcpyDeviceToHost); std::vector <KeyValue> kvs; kvs.resize(num_kvs); cudaMemcpy(kvs.data(), device_kvs, sizeof(KeyValue) * num_kvs, cudaMemcpyDeviceToHost); cudaFree(device_kvs); cudaFree(device_num_kvs); return kvs; } // Free the memory of the hashtable void destroy_hashtable(KeyValue *pHashTable) { cudaFree(pHashTable); } }
62b788f4219fa1c55fe1c6f0701f9c236d431073.hip
// !!! This is a file automatically generated by hipify!!! /* This version is "NO Streaming" version. 12/16 Try streaming! */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <time.h> // #define TIME // #define CUDA_NVPROF const int BLOCKING_FACTOR = 32; // 32, 16, 8, 4, 2 const int INF = ((1 << 30) - 1); // Global var stored in Data Section. // const int V = 40010; void input(char* inFileName); void output(char* outFileName); void print_ans(int num_V, char* ans_file); void block_FW(int B); void block_FW_Large_N(int B); int ceil(int a, int b); // void cal(int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height); // Shared memory: For each block, each thread brings d[i][j] to s[i][j] ! // // extern __shared__ int S[]; __device__ inline int Addr(int matrixIdx, int i, int j, int N){ return( N*N*matrixIdx + i*N + j); } // W: width, H: height // __device__ inline int Addr2(int matrixIdx, int i, int j, int W, int H){ // return( W*H*matrixIdx + i*W + j); // } // TODO: Bank Conflict! // TRY pahse1: Let thread(Idx.x, Idx.y) access in diagonally! Same WARP NO bank conflict. // PHASE 1 : ONE Block do k iterations with B*B threads. // __global__ void cal(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height){ __global__ void cal(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){ __shared__ int S[32*32]; int i = block_start_y*B + threadIdx.y; int j = block_start_x*B + threadIdx.x; if(i<n && j<n){ // S[ (i%B)*B + (j%B) ] = device_Dist[i*n + j]; S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)]; // S[Addr(0, (i%B), (j%B), B)] = device_Dist[Addr(0,i,j,n)]; // S[ (i%B)*(B+1) + (j%(B+1)) ] = device_Dist[i*n + j]; // __syncthreads(); // This for-loop CANNOT be serialize! // for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) { for (int iter = 0; iter<B && Round*B+iter <n; iter++){ __syncthreads(); if (S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) { S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)]; } } device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)]; }// end if(i<n && j<n ) } // Why cal3 don't need sync_threads() and can perform all correct? // Each thread do k calculation (O(k)) // __global__ void cal3(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height){ __global__ void cal3(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){ __shared__ int S[32*32*3]; int i = block_start_y* B + blockIdx.y * B + threadIdx.y; int j = block_start_x* B + blockIdx.x * B + threadIdx.x; // S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)]; // S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)]; if(i<n && (Round*B + threadIdx.x) <n) S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)]; if(j<n && (Round*B + threadIdx.y)<n) S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)]; if(i<n && j<n){ // For each thread, calculate one edge. S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)]; __syncthreads(); // This for-loop CANNOT be parallelize! // for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) { /// KEY!! Don't USE % on K. for (int iter = 0; iter<B && Round*B+iter <n; iter++){ //k = Round * B; k < (Round + 1) * B && k < n; ++k) { // __syncthreads(); // if (S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)] < S[Addr(0, (i%B), (j%B), B)] ) { // S[Addr(0, (i%B), (j%B), B)] = S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)]; // } // i , k // k , j // i , j if (S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) { S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)]; } // if (S[Addr(1, threadIdx.y, (k%B), B)]+ S[Addr(2, (k%B), threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) { // S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(1, threadIdx.y, (k%B), B)]+ S[Addr(2, (k%B), threadIdx.x, B)]; // } } device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)]; } } int n, m; // static int Dist[V][V]; int* Dist; int main(int argc, char* argv[]) { #ifdef TIME // struct timespec start, end, temp; struct timespec total_starttime; struct timespec total_temp; struct timespec start; struct timespec end; struct timespec temp; double IO_time=0.0; double Total_time = 0.0; clock_gettime(CLOCK_MONOTONIC, &total_starttime); clock_gettime(CLOCK_MONOTONIC, &start); #endif input(argv[1]); #ifdef TIME clock_gettime(CLOCK_MONOTONIC, &end); if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } IO_time += temp.tv_sec + (double) temp.tv_nsec / 1000000000.0; #endif // printf("%f second on input\n", time_used); // we have num_v, num_e, adj_matrix (Dist[V][V]) now // int B = 512; // Note: Since B*B threads, maximum B : 32 (MAX 1024 threads per block) int B; // B = 32; // 16: faster .(WHY?) communication. MAX: 32 B = BLOCKING_FACTOR; // B = 7; // int B = 4; // blocking factor. // if(n>=5000) block_FW_Large_N(B); // else block_FW(B); block_FW(B); // if(n>=5000) block_FW_Large_N(16); // else block_FW(32); #ifdef TIME clock_gettime(CLOCK_MONOTONIC, &start); #endif output(argv[2]); #ifdef TIME clock_gettime(CLOCK_MONOTONIC, &end); // IO Time if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } // Total Time if ((end.tv_nsec - total_starttime.tv_nsec) < 0) { total_temp.tv_sec = end.tv_sec-total_starttime.tv_sec-1; total_temp.tv_nsec = 1000000000 + end.tv_nsec - total_starttime.tv_nsec; } else { total_temp.tv_sec = end.tv_sec - total_starttime.tv_sec; total_temp.tv_nsec = end.tv_nsec - total_starttime.tv_nsec; } IO_time += temp.tv_sec + (double) temp.tv_nsec / 1000000000.0; Total_time = total_temp.tv_sec + (double) total_temp.tv_nsec / 1000000000.0; #endif #ifdef TIME printf("IO Time: %.8f seconds\n", IO_time); printf("Total Time: %.8f seconds\n",Total_time); #endif // Communicatoin time: (Memcpy H2D, D2H). // printf("Computation Time: %.8f\n",); //GPU Kernel // print_ans(n); // print_ans(n, argv[3]); // output(argv[2]); return 0; } void input(char* infile) { FILE* file = fopen(infile, "rb"); fread(&n, sizeof(int), 1, file); // n = num_vertices fread(&m, sizeof(int), 1, file); // m = num_edges printf("V: %d, E: %d\n",n,m); Dist = (int*) malloc(sizeof(int)*n*n); // Initialize adjacency matrix for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i == j) { Dist[i*n+j] = 0; // Dist[i][j] = 0; } else { Dist[i*n+j] = INF; // Dist[i][j] = INF; } } } // Sequentially read input edges and fill them into adj matrix. int pair[3]; for (int i = 0; i < m; ++i) { fread(pair, sizeof(int), 3, file); // Dist[pair[0]][pair[1]] = pair[2]; Dist[ pair[0]*n+ pair[1]] = pair[2]; } fclose(file); } void print_ans(int num_V, char* ans_file){ FILE* file = fopen(ans_file, "rb"); int* Ans = (int*)malloc(sizeof(int)*n*n); fread(Ans, sizeof(int), n*n, file); for(int i=0; i<num_V*num_V; i++){ if(Dist[i] != Ans[i]){ printf("Wrong at offset %d, expected %d but get %d\n", i*4, Ans[i], Dist[i]); printf("Fron %d to %d , cost: %d\n", (i/n), (i%n), Ans[i] ); } // printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]); } } void output(char* outFileName) { FILE* outfile = fopen(outFileName, "w"); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (Dist[i*n+j] >= INF) Dist[i*n+j] = INF; } fwrite(Dist+i*n, sizeof(int), n, outfile); } fclose(outfile); } int ceil(int a, int b) { return (a + b - 1) / b; } // 1204: Idea1 : one stream with 9 serialize kernel launch? // memory to pass to GPU: B, r, r, r, 1, 1. ALL constant! No memory copy. void block_FW(int B) { // printf("Blocking factor: %d (num of pixel(adj entries) in a Block)\n",B); // printf(" %d * %d block\n",B,B); int round = ceil(n, B); // hipMemcpy(); int *device_Dist; // hipMalloc(&device_Dist, V * V* sizeof(unsigned int)); hipMalloc(&device_Dist, n * n* sizeof(unsigned int)); #ifdef TIME hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); #endif // hipMemcpy(...) copy source image to device (mask matrix if necessary) hipMemcpy(device_Dist, Dist, n* n*sizeof(unsigned int), hipMemcpyHostToDevice); #ifdef TIME hipEventRecord(stop); hipEventSynchronize(stop); // WAIT until 'stop' complete. float Comm_time; // H2D hipEventElapsedTime(&Comm_time, start, stop); // printf("Took %.8f milliseconds on computation.",time); #endif // printf("Initial matrix: \n"); // print_ans(n); // 2*2 threadIdx.x from 0 to 1, Idx.y from 0 to 1 dim3 num_threads(B,B); #ifdef TIME hipEvent_t compt_start, compt_stop; hipEventCreate(&compt_start); hipEventCreate(&compt_stop); hipEventRecord(compt_start); #endif #ifdef CUDA_NVPROF hipProfilerStart(); #endif for (int r = 0; r < round; ++r) { // printf("%d %d\n", r, round); fflush(stdout); /* Phase 1*/ // EX: 3*3 Blocks. At iteration k (round r), send D(r,r) // cal<<< 1, num_threads , sizeof(int)*B*(B+1)>>> (device_Dist, n, B, r, r, r, 1, 1); //hipLaunchKernelGGL(( cal), dim3(1), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_Dist, n, B, r, r, r, 1, 1); hipLaunchKernelGGL(( cal), dim3(1), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_Dist, n, B, r, r, r); /* Phase 2*/ // hipProfilerStart(); if(r !=0){ dim3 nB(1,r); //hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_Dist, n, B, r, r, 0, r, 1); hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_Dist, n, B, r, r, 0); } if(round -r-1 !=0){ dim3 nB(1,round - r - 1); //hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_Dist, n, B, r, r, r + 1, round - r - 1, 1); hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_Dist, n, B, r, r, r + 1); } //////////// HEIGHT blocks (width == 1) ///////////// if(r!=0){ dim3 nB(r,1); // hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, 0, r, 1, r); hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, 0, r); } if(round-r-1 !=0) { dim3 nB(round - r - 1,1); //hipLaunchKernelGGL(( cal3), dim3(nB) , dim3(num_threads), sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, r, 1, round - r - 1); hipLaunchKernelGGL(( cal3), dim3(nB) , dim3(num_threads), sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, r); } // hipProfilerStop(); /* Phase 3*/ // => USE 2D block! // block // pivot block x y blocks // hipProfilerStart(); if(r != 0){ dim3 nB(r,r); //hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, 0, 0, r, r); hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, 0, 0); } if(r !=0 && (round-r-1) !=0){ dim3 nB(r,(round-r-1)); //hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, 0, r + 1, round - r - 1, r); hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, 0, r + 1); } if(r !=0 && round-r-1 !=0){ dim3 nB((round-r-1),r); //hipLaunchKernelGGL(( cal3), dim3(nB) ,dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, 0, r, round - r - 1); hipLaunchKernelGGL(( cal3), dim3(nB) ,dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, 0); } if(round-r-1 !=0){ dim3 nB_p3(round - r - 1, round - r - 1); //hipLaunchKernelGGL(( cal3), dim3(nB_p3), dim3(num_threads), sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, r + 1, round - r - 1, round - r - 1); hipLaunchKernelGGL(( cal3), dim3(nB_p3), dim3(num_threads), sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, r + 1); } // hipProfilerStop(); } #ifdef CUDA_NVPROF hipProfilerStop(); #endif #ifdef TIME hipEventRecord(compt_stop); hipEventSynchronize(compt_stop); // WAIT until 'stop' complete. float compt_time; hipEventElapsedTime(&compt_time, compt_start, compt_stop); printf("Computation Time: %.8f seconds\n",compt_time/1000); #endif #ifdef TIME hipEventRecord(start); #endif hipMemcpy(Dist, device_Dist, n * n *sizeof(unsigned int), hipMemcpyDeviceToHost); #ifdef TIME hipEventRecord(stop); hipEventSynchronize(stop); // WAIT until 'stop' complete. float D2H_Comm_time; hipEventElapsedTime(&D2H_Comm_time, start, stop); printf("Memory Copy Time: %.8f seconds\n", (D2H_Comm_time + Comm_time ) /1000); #endif } // // For Large n.: Don't use Synchronize. // // n > 5000 // void block_FW_Large_N(int B) { // printf("Blocking factor: %d (num of pixel(adj entries) in a Block)\n",B); // printf(" %d * %d block\n",B,B); // int round = ceil(n, B); // // #ifdef TIME // // hipEvent_t start, stop; // // hipEventCreate(&start); // // hipEventCreate(&stop); // // hipEventRecord(start); // // #endif // // hipMemcpy(); // int *device_Dist; // // hipMalloc(&device_Dist, V * V* sizeof(unsigned int)); // hipMalloc(&device_Dist, n * n* sizeof(unsigned int)); // // hipMemcpy(...) copy source image to device (mask matrix if necessary) // hipMemcpy(device_Dist, Dist, n* n*sizeof(unsigned int), hipMemcpyHostToDevice); // #ifdef TIME // hipEvent_t start, stop; // hipEventCreate(&start); // hipEventCreate(&stop); // hipEventRecord(start); // #endif // // printf("Initial matrix: \n"); // // print_ans(n); // // 2*2 threadIdx.x from 0 to 1, Idx.y from 0 to 1 // dim3 num_threads(B,B); // /////// CREATE 4 STREAMS /////////// // const int num_streams = 4; // hipStream_t streams[num_streams]; // float *data[num_streams]; // for (int i = 0; i < num_streams; i++) { // hipStreamCreate(&streams[i]); // } // // hipDeviceReset(); // for (int r = 0; r < round; ++r) { // // printf("%d %d\n", r, round); // fflush(stdout); // /* Phase 1*/ // // EX: 3*3 Blocks. At iteration k (round r), send D(r,r) // // cal<<< 1, num_threads , sizeof(int)*B*(B+1)>>> (device_Dist, n, B, r, r, r, 1, 1); // cal<<< 1, num_threads , sizeof(int)*B*B>>> (device_Dist, n, B, r, r, r, 1, 1); // // hipDeviceSynchronize(); // /* Phase 2*/ // ////////////// WIDTH blocks (height == 1) ///////////////// // // if(r !=0){ // // dim3 nB(1,r); // // cal2<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, 0, r, 1); // // } // // if(round -r-1 !=0){ // // dim3 nB(1,round - r - 1); // // cal2<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, r + 1, round - r - 1, 1); // // } // // //////////// HEIGHT blocks (width == 1) ///////////// // // if(r!=0){ // // dim3 nB(r,1); // // cal2<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r, 1, r); // // } // // if(round-r-1 !=0) { // // dim3 nB(round - r - 1,1); // // cal2<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r, 1, round - r - 1); // // } // if(r !=0){ // dim3 nB(1,r); // cal3<<< nB, num_threads , sizeof(int)*B*B*3,streams[0]>>>(device_Dist, n, B, r, r, 0, r, 1); // } // if(round -r-1 !=0){ // dim3 nB(1,round - r - 1); // cal3<<< nB, num_threads , sizeof(int)*B*B*3,streams[1]>>>(device_Dist, n, B, r, r, r + 1, round - r - 1, 1); // } // //////////// HEIGHT blocks (width == 1) ///////////// // if(r!=0){ // dim3 nB(r,1); // cal3<<< nB, num_threads , sizeof(int)*B*B*3 ,streams[2]>>>(device_Dist, n, B, r, 0, r, 1, r); // } // if(round-r-1 !=0) { // dim3 nB(round - r - 1,1); // cal3<<< nB , num_threads, sizeof(int)*B*B*3,streams[3] >>>(device_Dist, n, B, r, r + 1, r, 1, round - r - 1); // } // // hipDeviceSynchronize(); // /* Phase 3*/ // => USE 2D block! // // block // // pivot block x y blocks // if(r != 0){ // dim3 nB(r,r); // cal3<<< nB, num_threads , sizeof(int)*B*B*3 ,streams[0] >>>(device_Dist, n, B, r, 0, 0, r, r); // } // if(r !=0 && (round-r-1) !=0){ // dim3 nB(r,(round-r-1)); // cal3<<< nB, num_threads , sizeof(int)*B*B*3 ,streams[1] >>>(device_Dist, n, B, r, 0, r + 1, round - r - 1, r); // } // if(r !=0 && round-r-1 !=0){ // dim3 nB((round-r-1),r); // cal3<<< nB ,num_threads , sizeof(int)*B*B*3 ,streams[2] >>>(device_Dist, n, B, r, r + 1, 0, r, round - r - 1); // } // if(round-r-1 !=0){ // dim3 nB_p3(round - r - 1, round - r - 1); // cal3<<< nB_p3, num_threads, sizeof(int)*B*B*3 ,streams[3] >>>(device_Dist, n, B, r, r + 1, r + 1, round - r - 1, round - r - 1); // } // // hipDeviceSynchronize(); // } // // hipMemcpy(Dist, device_Dist, n * n *sizeof(unsigned int), hipMemcpyDeviceToHost); // #ifdef TIME // hipEventRecord(stop); // hipEventSynchronize(stop); // WAIT until 'stop' complete. // float time; // hipEventElapsedTime(&time, start, stop); // // printf("Took %.8f milliseconds",time); // printf("Took %.8f seconds",time/1000); // #endif // hipMemcpy(Dist, device_Dist, n * n *sizeof(unsigned int), hipMemcpyDeviceToHost); // }
62b788f4219fa1c55fe1c6f0701f9c236d431073.cu
/* This version is "NO Streaming" version. 12/16 Try streaming! */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_profiler_api.h> #include <time.h> // #define TIME // #define CUDA_NVPROF const int BLOCKING_FACTOR = 32; // 32, 16, 8, 4, 2 const int INF = ((1 << 30) - 1); // Global var stored in Data Section. // const int V = 40010; void input(char* inFileName); void output(char* outFileName); void print_ans(int num_V, char* ans_file); void block_FW(int B); void block_FW_Large_N(int B); int ceil(int a, int b); // void cal(int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height); // Shared memory: For each block, each thread brings d[i][j] to s[i][j] ! // // extern __shared__ int S[]; __device__ inline int Addr(int matrixIdx, int i, int j, int N){ return( N*N*matrixIdx + i*N + j); } // W: width, H: height // __device__ inline int Addr2(int matrixIdx, int i, int j, int W, int H){ // return( W*H*matrixIdx + i*W + j); // } // TODO: Bank Conflict! // TRY pahse1: Let thread(Idx.x, Idx.y) access in diagonally! Same WARP NO bank conflict. // PHASE 1 : ONE Block do k iterations with B*B threads. // __global__ void cal(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height){ __global__ void cal(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){ __shared__ int S[32*32]; int i = block_start_y*B + threadIdx.y; int j = block_start_x*B + threadIdx.x; if(i<n && j<n){ // S[ (i%B)*B + (j%B) ] = device_Dist[i*n + j]; S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)]; // S[Addr(0, (i%B), (j%B), B)] = device_Dist[Addr(0,i,j,n)]; // S[ (i%B)*(B+1) + (j%(B+1)) ] = device_Dist[i*n + j]; // __syncthreads(); // This for-loop CANNOT be serialize! // for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) { for (int iter = 0; iter<B && Round*B+iter <n; iter++){ __syncthreads(); if (S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) { S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)]; } } device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)]; }// end if(i<n && j<n ) } // Why cal3 don't need sync_threads() and can perform all correct? // Each thread do k calculation (O(k)) // __global__ void cal3(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height){ __global__ void cal3(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){ __shared__ int S[32*32*3]; int i = block_start_y* B + blockIdx.y * B + threadIdx.y; int j = block_start_x* B + blockIdx.x * B + threadIdx.x; // S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)]; // S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)]; if(i<n && (Round*B + threadIdx.x) <n) S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)]; if(j<n && (Round*B + threadIdx.y)<n) S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)]; if(i<n && j<n){ // For each thread, calculate one edge. S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)]; __syncthreads(); // This for-loop CANNOT be parallelize! // for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) { /// KEY!! Don't USE % on K. for (int iter = 0; iter<B && Round*B+iter <n; iter++){ //k = Round * B; k < (Round + 1) * B && k < n; ++k) { // __syncthreads(); // if (S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)] < S[Addr(0, (i%B), (j%B), B)] ) { // S[Addr(0, (i%B), (j%B), B)] = S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)]; // } // i , k // k , j // i , j if (S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) { S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)]; } // if (S[Addr(1, threadIdx.y, (k%B), B)]+ S[Addr(2, (k%B), threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) { // S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(1, threadIdx.y, (k%B), B)]+ S[Addr(2, (k%B), threadIdx.x, B)]; // } } device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)]; } } int n, m; // static int Dist[V][V]; int* Dist; int main(int argc, char* argv[]) { #ifdef TIME // struct timespec start, end, temp; struct timespec total_starttime; struct timespec total_temp; struct timespec start; struct timespec end; struct timespec temp; double IO_time=0.0; double Total_time = 0.0; clock_gettime(CLOCK_MONOTONIC, &total_starttime); clock_gettime(CLOCK_MONOTONIC, &start); #endif input(argv[1]); #ifdef TIME clock_gettime(CLOCK_MONOTONIC, &end); if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } IO_time += temp.tv_sec + (double) temp.tv_nsec / 1000000000.0; #endif // printf("%f second on input\n", time_used); // we have num_v, num_e, adj_matrix (Dist[V][V]) now // int B = 512; // Note: Since B*B threads, maximum B : 32 (MAX 1024 threads per block) int B; // B = 32; // 16: faster .(WHY?) communication. MAX: 32 B = BLOCKING_FACTOR; // B = 7; // int B = 4; // blocking factor. // if(n>=5000) block_FW_Large_N(B); // else block_FW(B); block_FW(B); // if(n>=5000) block_FW_Large_N(16); // else block_FW(32); #ifdef TIME clock_gettime(CLOCK_MONOTONIC, &start); #endif output(argv[2]); #ifdef TIME clock_gettime(CLOCK_MONOTONIC, &end); // IO Time if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } // Total Time if ((end.tv_nsec - total_starttime.tv_nsec) < 0) { total_temp.tv_sec = end.tv_sec-total_starttime.tv_sec-1; total_temp.tv_nsec = 1000000000 + end.tv_nsec - total_starttime.tv_nsec; } else { total_temp.tv_sec = end.tv_sec - total_starttime.tv_sec; total_temp.tv_nsec = end.tv_nsec - total_starttime.tv_nsec; } IO_time += temp.tv_sec + (double) temp.tv_nsec / 1000000000.0; Total_time = total_temp.tv_sec + (double) total_temp.tv_nsec / 1000000000.0; #endif #ifdef TIME printf("IO Time: %.8f seconds\n", IO_time); printf("Total Time: %.8f seconds\n",Total_time); #endif // Communicatoin time: (Memcpy H2D, D2H). // printf("Computation Time: %.8f\n",); //GPU Kernel // print_ans(n); // print_ans(n, argv[3]); // output(argv[2]); return 0; } void input(char* infile) { FILE* file = fopen(infile, "rb"); fread(&n, sizeof(int), 1, file); // n = num_vertices fread(&m, sizeof(int), 1, file); // m = num_edges printf("V: %d, E: %d\n",n,m); Dist = (int*) malloc(sizeof(int)*n*n); // Initialize adjacency matrix for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i == j) { Dist[i*n+j] = 0; // Dist[i][j] = 0; } else { Dist[i*n+j] = INF; // Dist[i][j] = INF; } } } // Sequentially read input edges and fill them into adj matrix. int pair[3]; for (int i = 0; i < m; ++i) { fread(pair, sizeof(int), 3, file); // Dist[pair[0]][pair[1]] = pair[2]; Dist[ pair[0]*n+ pair[1]] = pair[2]; } fclose(file); } void print_ans(int num_V, char* ans_file){ FILE* file = fopen(ans_file, "rb"); int* Ans = (int*)malloc(sizeof(int)*n*n); fread(Ans, sizeof(int), n*n, file); for(int i=0; i<num_V*num_V; i++){ if(Dist[i] != Ans[i]){ printf("Wrong at offset %d, expected %d but get %d\n", i*4, Ans[i], Dist[i]); printf("Fron %d to %d , cost: %d\n", (i/n), (i%n), Ans[i] ); } // printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]); } } void output(char* outFileName) { FILE* outfile = fopen(outFileName, "w"); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (Dist[i*n+j] >= INF) Dist[i*n+j] = INF; } fwrite(Dist+i*n, sizeof(int), n, outfile); } fclose(outfile); } int ceil(int a, int b) { return (a + b - 1) / b; } // 1204: Idea1 : one stream with 9 serialize kernel launch? // memory to pass to GPU: B, r, r, r, 1, 1. ALL constant! No memory copy. void block_FW(int B) { // printf("Blocking factor: %d (num of pixel(adj entries) in a Block)\n",B); // printf(" %d * %d block\n",B,B); int round = ceil(n, B); // cudaMemcpy(); int *device_Dist; // cudaMalloc(&device_Dist, V * V* sizeof(unsigned int)); cudaMalloc(&device_Dist, n * n* sizeof(unsigned int)); #ifdef TIME cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); #endif // cudaMemcpy(...) copy source image to device (mask matrix if necessary) cudaMemcpy(device_Dist, Dist, n* n*sizeof(unsigned int), cudaMemcpyHostToDevice); #ifdef TIME cudaEventRecord(stop); cudaEventSynchronize(stop); // WAIT until 'stop' complete. float Comm_time; // H2D cudaEventElapsedTime(&Comm_time, start, stop); // printf("Took %.8f milliseconds on computation.",time); #endif // printf("Initial matrix: \n"); // print_ans(n); // 2*2 threadIdx.x from 0 to 1, Idx.y from 0 to 1 dim3 num_threads(B,B); #ifdef TIME cudaEvent_t compt_start, compt_stop; cudaEventCreate(&compt_start); cudaEventCreate(&compt_stop); cudaEventRecord(compt_start); #endif #ifdef CUDA_NVPROF cudaProfilerStart(); #endif for (int r = 0; r < round; ++r) { // printf("%d %d\n", r, round); fflush(stdout); /* Phase 1*/ // EX: 3*3 Blocks. At iteration k (round r), send D(r,r) // cal<<< 1, num_threads , sizeof(int)*B*(B+1)>>> (device_Dist, n, B, r, r, r, 1, 1); // cal<<< 1, num_threads , sizeof(int)*B*B*3>>> (device_Dist, n, B, r, r, r, 1, 1); cal<<< 1, num_threads , sizeof(int)*B*B*3>>> (device_Dist, n, B, r, r, r); /* Phase 2*/ // cudaProfilerStart(); if(r !=0){ dim3 nB(1,r); // cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, 0, r, 1); cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, 0); } if(round -r-1 !=0){ dim3 nB(1,round - r - 1); // cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, r + 1, round - r - 1, 1); cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, r + 1); } //////////// HEIGHT blocks (width == 1) ///////////// if(r!=0){ dim3 nB(r,1); // cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r, 1, r); cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r); } if(round-r-1 !=0) { dim3 nB(round - r - 1,1); // cal3<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r, 1, round - r - 1); cal3<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r); } // cudaProfilerStop(); /* Phase 3*/ // => USE 2D block! // 計算其他的 block // 和pivot block 在 x 軸和 y 軸都沒有交集的 blocks! // cudaProfilerStart(); if(r != 0){ dim3 nB(r,r); // cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, 0, r, r); cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, 0); } if(r !=0 && (round-r-1) !=0){ dim3 nB(r,(round-r-1)); // cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r + 1, round - r - 1, r); cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r + 1); } if(r !=0 && round-r-1 !=0){ dim3 nB((round-r-1),r); // cal3<<< nB ,num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, 0, r, round - r - 1); cal3<<< nB ,num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, 0); } if(round-r-1 !=0){ dim3 nB_p3(round - r - 1, round - r - 1); // cal3<<< nB_p3, num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r + 1, round - r - 1, round - r - 1); cal3<<< nB_p3, num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r + 1); } // cudaProfilerStop(); } #ifdef CUDA_NVPROF cudaProfilerStop(); #endif #ifdef TIME cudaEventRecord(compt_stop); cudaEventSynchronize(compt_stop); // WAIT until 'stop' complete. float compt_time; cudaEventElapsedTime(&compt_time, compt_start, compt_stop); printf("Computation Time: %.8f seconds\n",compt_time/1000); #endif #ifdef TIME cudaEventRecord(start); #endif cudaMemcpy(Dist, device_Dist, n * n *sizeof(unsigned int), cudaMemcpyDeviceToHost); #ifdef TIME cudaEventRecord(stop); cudaEventSynchronize(stop); // WAIT until 'stop' complete. float D2H_Comm_time; cudaEventElapsedTime(&D2H_Comm_time, start, stop); printf("Memory Copy Time: %.8f seconds\n", (D2H_Comm_time + Comm_time ) /1000); #endif } // // For Large n.: Don't use Synchronize. // // n > 5000 // void block_FW_Large_N(int B) { // printf("Blocking factor: %d (num of pixel(adj entries) in a Block)\n",B); // printf(" %d * %d block\n",B,B); // int round = ceil(n, B); // // #ifdef TIME // // cudaEvent_t start, stop; // // cudaEventCreate(&start); // // cudaEventCreate(&stop); // // cudaEventRecord(start); // // #endif // // cudaMemcpy(); // int *device_Dist; // // cudaMalloc(&device_Dist, V * V* sizeof(unsigned int)); // cudaMalloc(&device_Dist, n * n* sizeof(unsigned int)); // // cudaMemcpy(...) copy source image to device (mask matrix if necessary) // cudaMemcpy(device_Dist, Dist, n* n*sizeof(unsigned int), cudaMemcpyHostToDevice); // #ifdef TIME // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // cudaEventRecord(start); // #endif // // printf("Initial matrix: \n"); // // print_ans(n); // // 2*2 threadIdx.x from 0 to 1, Idx.y from 0 to 1 // dim3 num_threads(B,B); // /////// CREATE 4 STREAMS /////////// // const int num_streams = 4; // cudaStream_t streams[num_streams]; // float *data[num_streams]; // for (int i = 0; i < num_streams; i++) { // cudaStreamCreate(&streams[i]); // } // // cudaDeviceReset(); // for (int r = 0; r < round; ++r) { // // printf("%d %d\n", r, round); // fflush(stdout); // /* Phase 1*/ // // EX: 3*3 Blocks. At iteration k (round r), send D(r,r) // // cal<<< 1, num_threads , sizeof(int)*B*(B+1)>>> (device_Dist, n, B, r, r, r, 1, 1); // cal<<< 1, num_threads , sizeof(int)*B*B>>> (device_Dist, n, B, r, r, r, 1, 1); // // cudaDeviceSynchronize(); // /* Phase 2*/ // ////////////// WIDTH blocks (height == 1) ///////////////// // // if(r !=0){ // // dim3 nB(1,r); // // cal2<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, 0, r, 1); // // } // // if(round -r-1 !=0){ // // dim3 nB(1,round - r - 1); // // cal2<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, r + 1, round - r - 1, 1); // // } // // //////////// HEIGHT blocks (width == 1) ///////////// // // if(r!=0){ // // dim3 nB(r,1); // // cal2<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r, 1, r); // // } // // if(round-r-1 !=0) { // // dim3 nB(round - r - 1,1); // // cal2<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r, 1, round - r - 1); // // } // if(r !=0){ // dim3 nB(1,r); // cal3<<< nB, num_threads , sizeof(int)*B*B*3,streams[0]>>>(device_Dist, n, B, r, r, 0, r, 1); // } // if(round -r-1 !=0){ // dim3 nB(1,round - r - 1); // cal3<<< nB, num_threads , sizeof(int)*B*B*3,streams[1]>>>(device_Dist, n, B, r, r, r + 1, round - r - 1, 1); // } // //////////// HEIGHT blocks (width == 1) ///////////// // if(r!=0){ // dim3 nB(r,1); // cal3<<< nB, num_threads , sizeof(int)*B*B*3 ,streams[2]>>>(device_Dist, n, B, r, 0, r, 1, r); // } // if(round-r-1 !=0) { // dim3 nB(round - r - 1,1); // cal3<<< nB , num_threads, sizeof(int)*B*B*3,streams[3] >>>(device_Dist, n, B, r, r + 1, r, 1, round - r - 1); // } // // cudaDeviceSynchronize(); // /* Phase 3*/ // => USE 2D block! // // 計算其他的 block // // 和pivot block 在 x 軸和 y 軸都沒有交集的 blocks! // if(r != 0){ // dim3 nB(r,r); // cal3<<< nB, num_threads , sizeof(int)*B*B*3 ,streams[0] >>>(device_Dist, n, B, r, 0, 0, r, r); // } // if(r !=0 && (round-r-1) !=0){ // dim3 nB(r,(round-r-1)); // cal3<<< nB, num_threads , sizeof(int)*B*B*3 ,streams[1] >>>(device_Dist, n, B, r, 0, r + 1, round - r - 1, r); // } // if(r !=0 && round-r-1 !=0){ // dim3 nB((round-r-1),r); // cal3<<< nB ,num_threads , sizeof(int)*B*B*3 ,streams[2] >>>(device_Dist, n, B, r, r + 1, 0, r, round - r - 1); // } // if(round-r-1 !=0){ // dim3 nB_p3(round - r - 1, round - r - 1); // cal3<<< nB_p3, num_threads, sizeof(int)*B*B*3 ,streams[3] >>>(device_Dist, n, B, r, r + 1, r + 1, round - r - 1, round - r - 1); // } // // cudaDeviceSynchronize(); // } // // cudaMemcpy(Dist, device_Dist, n * n *sizeof(unsigned int), cudaMemcpyDeviceToHost); // #ifdef TIME // cudaEventRecord(stop); // cudaEventSynchronize(stop); // WAIT until 'stop' complete. // float time; // cudaEventElapsedTime(&time, start, stop); // // printf("Took %.8f milliseconds",time); // printf("Took %.8f seconds",time/1000); // #endif // cudaMemcpy(Dist, device_Dist, n * n *sizeof(unsigned int), cudaMemcpyDeviceToHost); // }
7f39baa560fc20845e65003847d0475e21ee39c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include "caffe2/core/context_gpu.h" #include "softmax_with_loss_op.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* Pdata, const int* labeldata, const float* weights, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D); float weight = weights ? weights[i] : 1.0; Ydata[i] = -logf(max(Pdata[i * D + labeldata[i]], FLT_MIN)) * weight; } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Pdata, const int* labeldata, float* dXdata, const float *weights) { if (weights == NULL) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = Pdata[idx] - 1.; } } else { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = Pdata[idx] - 1.; float weight = weights[i]; for(int d=0; d<D; d++) { int idx = i * D + d; dXdata[idx] *= weight; } } } } __global__ void RowMaxKernel(const int num, const int D, const float* data, float* out) { CUDA_1D_KERNEL_LOOP(index, num) { float maxval = -FLT_MAX; for (int d = 0; d < D; ++d) { maxval = max(data[index * D + d], maxval); } out[index] = maxval; } } __global__ void SpatialSoftmaxKernel(const int num, const int D, const int W, const int H, const float* Xdata, float* Pdata) { CUDA_1D_KERNEL_LOOP(index, num * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; // Subtract max on each cell for numerical reasons float max_val = -FLT_MAX; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; max_val = max(max_val, Xdata[idx]); } // Exponentiate float expsum = 0.0f; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; float expx = exp(Xdata[idx] - max_val); Pdata[idx] = expx; expsum += expx; } // Normalize for(int c=0; c<D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; Pdata[idx] /= expsum; } } } #define DONTCARE (-1) __global__ void SpatialCrossEntropyLossKernel(const int N, const int D, const int W, const int H, const float* Pdata, const int* label_data, const float *weights, float* loss_data, float* weight_data) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { float weight = (weights == NULL ? 1.0 : weights[index]); loss_data[index] = -log(max( Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight; weight_data[index] = weight; } else { loss_data[index] = 0; weight_data[index] = 0; } } } __global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D, const int W, const int H, const int* label_data, const float* weights, float* dX_data, float* weights_) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { int data_idx = i * (H * W * D) + label * (H * W) + y * W + x; dX_data[data_idx] -= 1.0; if (weights != NULL) { float weight = weights[index]; for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] *= weight; } weights_[index] = weight; } else { weights_[index] = 1.0; } } else { // Ignore-label, so set all gradients for this positions // tp zero for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] = 0.0; } weights_[index] = 0.0; } } } __global__ void SoftmaxNormalizeKernel( const int nthreads, const int D, const float* Pdata, const float* scales, float* out) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / D; out[index] = Pdata[index] / scales[n]; } } void Softmax(const int N, const int D, const float* logits, const int* labels, const float* sum_multiplier, float* scales, float* probs, CUDAContext* context) { const int size = N * D; hipLaunchKernelGGL(( RowMaxKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, D, logits, scales); // Put the intermediate result X - max(X) into Y context->Copy<float, CUDAContext, CUDAContext>(size, logits, probs); // Subtract the scale math::Gemm<float, CUDAContext>(CblasNoTrans, CblasNoTrans, N, D, 1, -1, scales, sum_multiplier, 1, probs, context); // Exponentiation math::Exp<float, CUDAContext>(size, probs, probs, context); // Sum exponentiated values math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier, 0, scales, context); // Normalize hipLaunchKernelGGL(( SoftmaxNormalizeKernel), dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, D, probs, scales, probs); } } // namespace template<> bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets auto* P = Output(0); // Probabilities from softmax auto* avg_loss = Output(1); // Average loss const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL); int N = X.dim32(0); int D = X.dim32(1); P->ResizeLike(X); total_weight_ptr_.Resize(1); if (!spatial_mode_) { DCHECK_EQ(X.ndim(), 2); DCHECK((T.ndim() == 1) || (T.ndim() == 2 && T.dim32(1) == 1)); DCHECK_EQ(T.dim32(0), N); avg_loss->Resize(vector<TIndex>()); if (losses_.size() != N) { losses_.Resize(N); } if (sum_multiplier_.size() != D) { sum_multiplier_.Resize(D); math::Set<float, CUDAContext>( D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } Softmax(N, D, X.data<float>(), T.data<int>(), sum_multiplier_.data<float>(), losses_.mutable_data<float>(), P->mutable_data<float>(), &context_); // Compute label xent loss per example hipLaunchKernelGGL(( LabelCrossEntropyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P->data<float>(), T.data<int>(), weights, losses_.mutable_data<float>()); float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>(N, weights, total_weight_ptr_.mutable_data<float>(), &context_); hipMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream()); } // Sum of all losses float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); // Average of input batch size math::Scale<float, CUDAContext>( 1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_); } else { DCHECK_EQ(X.ndim(), 4); DCHECK_EQ(T.ndim(), 3); DCHECK_EQ(T.dim32(0), N); int H = X.dim32(2); int W = X.dim32(3); if (losses_.size() != N * W * H) { losses_.Resize(N * W * H); } if (weights_.size() != N * W * H) { weights_.Resize(N * W * H); } const float* Xdata = X.data<float>(); float* Pdata = P->mutable_data<float>(); // Softmax for each x,y location hipLaunchKernelGGL(( SpatialSoftmaxKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, W, H, Xdata, Pdata); // Cross entropy avg_loss->Resize(vector<TIndex>()); float* avg_loss_data = avg_loss->mutable_data<float>(); math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_); const int* label_data = T.data<int>(); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); hipLaunchKernelGGL(( SpatialCrossEntropyLossKernel), dim3(CAFFE_GET_BLOCKS(N * W * H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, W, H, P->data<float>(), label_data, weights, losses_.mutable_data<float>(), weights_.mutable_data<float>()); // Somewhat awkward scalar passing from device to host float h_total_weight; math::Sum<float, CUDAContext>( weights_.size(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_); hipMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream()); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); // Final scaling if (h_total_weight > 0) { math::Scale<float, CUDAContext>( 1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_); } } return true; } template<> bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets // Input(2) is weights, if given auto& P = Input(InputSize() - 2); // Probabilities from softmax auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL); auto* dX = Output(0); int N = X.dim32(0); int D = X.dim32(1); dX->ResizeLike(X); total_weight_ptr_.Resize(1); if (!spatial_mode_) { DCHECK_EQ(X.ndim(), 2); DCHECK((T.ndim() == 1) || (T.ndim() == 2 && T.dim32(1) == 1)); DCHECK_EQ(T.dim32(0), N); // Copy softmax probabilities into dX context_.Copy<float, CUDAContext, CUDAContext>( P.size(), P.data<float>(), dX->mutable_data<float>()); // Subtract 1 from labeled positions hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>(), weights); float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>( N, weights, total_weight_ptr_.mutable_data<float>(), &context_); hipMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream()); } // Scale by d_avg_loss / N math::Scale<float, CUDAContext>( dX->size(), scale_ / total_weight, dX->data<float>(), dX->mutable_data<float>(), &context_); math::Scale<float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } else { // Spatial mode, compute softmax for each x, y location DCHECK_EQ(X.ndim(), 4); DCHECK_EQ(T.ndim(), 3); int H = X.dim32(2); int W = X.dim32(3); dX->ResizeLike(X); if (weights_.size() != N * W * H) { weights_.Resize(N * W * H); } const float* Pdata = P.data<float>(); float* dX_data = dX->mutable_data<float>(); const int* label_data = T.data<int>(); const float* d_avg_loss_data = d_avg_loss.data<float>(); // Copy softmax probabilities into dX. All but the neuron // corresponding to the correct label has gradient equaling e(x_j) // which is the probability under softmax. context_.Copy<float, CUDAContext, CUDAContext>(P.size(), Pdata, dX_data); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); hipLaunchKernelGGL(( SpatialSoftmaxLossGradientKernel), dim3(CAFFE_GET_BLOCKS(N * W * H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>()); math::Sum<float, CUDAContext>( weights_.size(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_); // Somewhat awkward scalar passing from device to host float h_total_weight; hipMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream()); // Final scaling if (h_total_weight > 0) { math::Scale<float, CUDAContext>( dX->size(), scale_ / h_total_weight, dX->data<float>(), dX->mutable_data<float>(), &context_); } math::Scale<float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } return true; } namespace { REGISTER_CUDA_OPERATOR(SoftmaxWithLoss, SoftmaxWithLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient, SoftmaxWithLossGradientOp<float, CUDAContext>); } // namespace } // namespace caffe2
7f39baa560fc20845e65003847d0475e21ee39c7.cu
#include <cfloat> #include "caffe2/core/context_gpu.h" #include "softmax_with_loss_op.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* Pdata, const int* labeldata, const float* weights, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D); float weight = weights ? weights[i] : 1.0; Ydata[i] = -logf(max(Pdata[i * D + labeldata[i]], FLT_MIN)) * weight; } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Pdata, const int* labeldata, float* dXdata, const float *weights) { if (weights == NULL) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = Pdata[idx] - 1.; } } else { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = Pdata[idx] - 1.; float weight = weights[i]; for(int d=0; d<D; d++) { int idx = i * D + d; dXdata[idx] *= weight; } } } } __global__ void RowMaxKernel(const int num, const int D, const float* data, float* out) { CUDA_1D_KERNEL_LOOP(index, num) { float maxval = -FLT_MAX; for (int d = 0; d < D; ++d) { maxval = max(data[index * D + d], maxval); } out[index] = maxval; } } __global__ void SpatialSoftmaxKernel(const int num, const int D, const int W, const int H, const float* Xdata, float* Pdata) { CUDA_1D_KERNEL_LOOP(index, num * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; // Subtract max on each cell for numerical reasons float max_val = -FLT_MAX; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; max_val = max(max_val, Xdata[idx]); } // Exponentiate float expsum = 0.0f; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; float expx = exp(Xdata[idx] - max_val); Pdata[idx] = expx; expsum += expx; } // Normalize for(int c=0; c<D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; Pdata[idx] /= expsum; } } } #define DONTCARE (-1) __global__ void SpatialCrossEntropyLossKernel(const int N, const int D, const int W, const int H, const float* Pdata, const int* label_data, const float *weights, float* loss_data, float* weight_data) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { float weight = (weights == NULL ? 1.0 : weights[index]); loss_data[index] = -log(max( Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight; weight_data[index] = weight; } else { loss_data[index] = 0; weight_data[index] = 0; } } } __global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D, const int W, const int H, const int* label_data, const float* weights, float* dX_data, float* weights_) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { int data_idx = i * (H * W * D) + label * (H * W) + y * W + x; dX_data[data_idx] -= 1.0; if (weights != NULL) { float weight = weights[index]; for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] *= weight; } weights_[index] = weight; } else { weights_[index] = 1.0; } } else { // Ignore-label, so set all gradients for this positions // tp zero for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] = 0.0; } weights_[index] = 0.0; } } } __global__ void SoftmaxNormalizeKernel( const int nthreads, const int D, const float* Pdata, const float* scales, float* out) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / D; out[index] = Pdata[index] / scales[n]; } } void Softmax(const int N, const int D, const float* logits, const int* labels, const float* sum_multiplier, float* scales, float* probs, CUDAContext* context) { const int size = N * D; RowMaxKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, logits, scales); // Put the intermediate result X - max(X) into Y context->Copy<float, CUDAContext, CUDAContext>(size, logits, probs); // Subtract the scale math::Gemm<float, CUDAContext>(CblasNoTrans, CblasNoTrans, N, D, 1, -1, scales, sum_multiplier, 1, probs, context); // Exponentiation math::Exp<float, CUDAContext>(size, probs, probs, context); // Sum exponentiated values math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier, 0, scales, context); // Normalize SoftmaxNormalizeKernel<<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( size, D, probs, scales, probs); } } // namespace template<> bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets auto* P = Output(0); // Probabilities from softmax auto* avg_loss = Output(1); // Average loss const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL); int N = X.dim32(0); int D = X.dim32(1); P->ResizeLike(X); total_weight_ptr_.Resize(1); if (!spatial_mode_) { DCHECK_EQ(X.ndim(), 2); DCHECK((T.ndim() == 1) || (T.ndim() == 2 && T.dim32(1) == 1)); DCHECK_EQ(T.dim32(0), N); avg_loss->Resize(vector<TIndex>()); if (losses_.size() != N) { losses_.Resize(N); } if (sum_multiplier_.size() != D) { sum_multiplier_.Resize(D); math::Set<float, CUDAContext>( D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } Softmax(N, D, X.data<float>(), T.data<int>(), sum_multiplier_.data<float>(), losses_.mutable_data<float>(), P->mutable_data<float>(), &context_); // Compute label xent loss per example LabelCrossEntropyKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P->data<float>(), T.data<int>(), weights, losses_.mutable_data<float>()); float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>(N, weights, total_weight_ptr_.mutable_data<float>(), &context_); cudaMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream()); } // Sum of all losses float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); // Average of input batch size math::Scale<float, CUDAContext>( 1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_); } else { DCHECK_EQ(X.ndim(), 4); DCHECK_EQ(T.ndim(), 3); DCHECK_EQ(T.dim32(0), N); int H = X.dim32(2); int W = X.dim32(3); if (losses_.size() != N * W * H) { losses_.Resize(N * W * H); } if (weights_.size() != N * W * H) { weights_.Resize(N * W * H); } const float* Xdata = X.data<float>(); float* Pdata = P->mutable_data<float>(); // Softmax for each x,y location SpatialSoftmaxKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, W, H, Xdata, Pdata); // Cross entropy avg_loss->Resize(vector<TIndex>()); float* avg_loss_data = avg_loss->mutable_data<float>(); math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_); const int* label_data = T.data<int>(); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); SpatialCrossEntropyLossKernel<<<CAFFE_GET_BLOCKS(N * W * H), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, W, H, P->data<float>(), label_data, weights, losses_.mutable_data<float>(), weights_.mutable_data<float>()); // Somewhat awkward scalar passing from device to host float h_total_weight; math::Sum<float, CUDAContext>( weights_.size(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_); cudaMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream()); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); // Final scaling if (h_total_weight > 0) { math::Scale<float, CUDAContext>( 1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_); } } return true; } template<> bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets // Input(2) is weights, if given auto& P = Input(InputSize() - 2); // Probabilities from softmax auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL); auto* dX = Output(0); int N = X.dim32(0); int D = X.dim32(1); dX->ResizeLike(X); total_weight_ptr_.Resize(1); if (!spatial_mode_) { DCHECK_EQ(X.ndim(), 2); DCHECK((T.ndim() == 1) || (T.ndim() == 2 && T.dim32(1) == 1)); DCHECK_EQ(T.dim32(0), N); // Copy softmax probabilities into dX context_.Copy<float, CUDAContext, CUDAContext>( P.size(), P.data<float>(), dX->mutable_data<float>()); // Subtract 1 from labeled positions LabelCrossEntropyGradientKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>(), weights); float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>( N, weights, total_weight_ptr_.mutable_data<float>(), &context_); cudaMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream()); } // Scale by d_avg_loss / N math::Scale<float, CUDAContext>( dX->size(), scale_ / total_weight, dX->data<float>(), dX->mutable_data<float>(), &context_); math::Scale<float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } else { // Spatial mode, compute softmax for each x, y location DCHECK_EQ(X.ndim(), 4); DCHECK_EQ(T.ndim(), 3); int H = X.dim32(2); int W = X.dim32(3); dX->ResizeLike(X); if (weights_.size() != N * W * H) { weights_.Resize(N * W * H); } const float* Pdata = P.data<float>(); float* dX_data = dX->mutable_data<float>(); const int* label_data = T.data<int>(); const float* d_avg_loss_data = d_avg_loss.data<float>(); // Copy softmax probabilities into dX. All but the neuron // corresponding to the correct label has gradient equaling e(x_j) // which is the probability under softmax. context_.Copy<float, CUDAContext, CUDAContext>(P.size(), Pdata, dX_data); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); SpatialSoftmaxLossGradientKernel<<<CAFFE_GET_BLOCKS(N * W * H), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>()); math::Sum<float, CUDAContext>( weights_.size(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_); // Somewhat awkward scalar passing from device to host float h_total_weight; cudaMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream()); // Final scaling if (h_total_weight > 0) { math::Scale<float, CUDAContext>( dX->size(), scale_ / h_total_weight, dX->data<float>(), dX->mutable_data<float>(), &context_); } math::Scale<float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } return true; } namespace { REGISTER_CUDA_OPERATOR(SoftmaxWithLoss, SoftmaxWithLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient, SoftmaxWithLossGradientOp<float, CUDAContext>); } // namespace } // namespace caffe2
9edac1e659b4065d92493ef3c6e443026d972fc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/preconditioner/jacobi_kernels.hpp" #include <ginkgo/config.hpp> #include <ginkgo/core/base/exception_helpers.hpp> #include "core/base/extended_float.hpp" #include "core/preconditioner/jacobi_utils.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/diagonal_block_manipulation.cuh" #include "cuda/components/thread_ids.cuh" #include "cuda/components/uninitialized_array.hpp" #include "cuda/components/warp_blas.cuh" #include "cuda/preconditioner/jacobi_common.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The Jacobi preconditioner namespace. * @ref Jacobi * @ingroup jacobi */ namespace jacobi { namespace kernel { template <int max_block_size, typename ReducedType, typename Group, typename ValueType, typename IndexType> __device__ __forceinline__ bool validate_precision_reduction_feasibility( Group &__restrict__ group, IndexType block_size, ValueType *__restrict__ row, ValueType *__restrict__ work, size_type stride) { using gko::detail::float_traits; // save original data and reduce precision if (group.thread_rank() < block_size) { #pragma unroll for (auto i = 0u; i < max_block_size; ++i) { if (i >= block_size) { break; } work[i * stride + group.thread_rank()] = row[i]; row[i] = static_cast<ValueType>(static_cast<ReducedType>(row[i])); } } // compute the condition number auto perm = group.thread_rank(); auto trans_perm = perm; auto block_cond = compute_infinity_norm<max_block_size>(group, block_size, block_size, row); auto succeeded = invert_block<max_block_size>(group, block_size, row, perm, trans_perm); block_cond *= compute_infinity_norm<max_block_size>(group, block_size, block_size, row); // restore original data if (group.thread_rank() < block_size) { #pragma unroll for (auto i = 0u; i < max_block_size; ++i) { if (i >= block_size) { break; } row[i] = work[i * stride + group.thread_rank()]; } } return succeeded && block_cond >= 1.0 && block_cond * float_traits<remove_complex<ValueType>>::eps < 1e-3; } template <int max_block_size, int subwarp_size, int warps_per_block, typename ValueType, typename IndexType> __global__ void __launch_bounds__(warps_per_block *cuda_config::warp_size) generate(size_type num_rows, const IndexType *__restrict__ row_ptrs, const IndexType *__restrict__ col_idxs, const ValueType *__restrict__ values, ValueType *__restrict__ block_data, preconditioner::block_interleaved_storage_scheme<IndexType> storage_scheme, const IndexType *__restrict__ block_ptrs, size_type num_blocks) { const auto block_id = thread::get_subwarp_id<subwarp_size, warps_per_block>(); const auto block = group::this_thread_block(); ValueType row[max_block_size]; __shared__ UninitializedArray<ValueType, max_block_size * warps_per_block> workspace; csr::extract_transposed_diag_blocks<max_block_size, warps_per_block>( block, cuda_config::warp_size / subwarp_size, row_ptrs, col_idxs, values, block_ptrs, num_blocks, row, 1, workspace + threadIdx.z * max_block_size); const auto subwarp = group::tiled_partition<subwarp_size>(block); if (block_id < num_blocks) { const auto block_size = block_ptrs[block_id + 1] - block_ptrs[block_id]; auto perm = subwarp.thread_rank(); auto trans_perm = subwarp.thread_rank(); invert_block<max_block_size>(subwarp, block_size, row, perm, trans_perm); copy_matrix<max_block_size, and_transpose>( subwarp, block_size, row, 1, perm, trans_perm, block_data + storage_scheme.get_global_block_offset(block_id), storage_scheme.get_stride()); } } template <int max_block_size, int subwarp_size, int warps_per_block, typename ValueType, typename IndexType> __global__ void __launch_bounds__(warps_per_block *cuda_config::warp_size) adaptive_generate( size_type num_rows, const IndexType *__restrict__ row_ptrs, const IndexType *__restrict__ col_idxs, const ValueType *__restrict__ values, remove_complex<ValueType> accuracy, ValueType *__restrict__ block_data, preconditioner::block_interleaved_storage_scheme<IndexType> storage_scheme, remove_complex<ValueType> *__restrict__ conditioning, precision_reduction *__restrict__ block_precisions, const IndexType *__restrict__ block_ptrs, size_type num_blocks) { // extract blocks const auto block_id = thread::get_subwarp_id<subwarp_size, warps_per_block>(); const auto block = group::this_thread_block(); ValueType row[max_block_size]; __shared__ UninitializedArray<ValueType, max_block_size * warps_per_block> workspace; csr::extract_transposed_diag_blocks<max_block_size, warps_per_block>( block, cuda_config::warp_size / subwarp_size, row_ptrs, col_idxs, values, block_ptrs, num_blocks, row, 1, workspace + threadIdx.z * max_block_size); // compute inverse and figure out the correct precision const auto subwarp = group::tiled_partition<subwarp_size>(block); const auto block_size = block_id < num_blocks ? block_ptrs[block_id + 1] - block_ptrs[block_id] : 0; auto perm = subwarp.thread_rank(); auto trans_perm = subwarp.thread_rank(); auto prec_descriptor = ~uint32{}; if (block_id < num_blocks) { auto block_cond = compute_infinity_norm<max_block_size>( subwarp, block_size, block_size, row); invert_block<max_block_size>(subwarp, block_size, row, perm, trans_perm); block_cond *= compute_infinity_norm<max_block_size>(subwarp, block_size, block_size, row); conditioning[block_id] = block_cond; const auto prec = block_precisions[block_id]; prec_descriptor = preconditioner::detail::precision_reduction_descriptor::singleton( prec); if (prec == precision_reduction::autodetect()) { using preconditioner::detail::get_supported_storage_reductions; prec_descriptor = get_supported_storage_reductions<ValueType>( accuracy, block_cond, [&subwarp, &block_size, &row, &block_data, &storage_scheme, &block_id] { using target = reduce_precision<ValueType>; return validate_precision_reduction_feasibility< max_block_size, target>( subwarp, block_size, row, block_data + storage_scheme.get_global_block_offset(block_id), storage_scheme.get_stride()); }, [&subwarp, &block_size, &row, &block_data, &storage_scheme, &block_id] { using target = reduce_precision<reduce_precision<ValueType>>; return validate_precision_reduction_feasibility< max_block_size, target>( subwarp, block_size, row, block_data + storage_scheme.get_global_block_offset(block_id), storage_scheme.get_stride()); }); } } // make sure all blocks in the group have the same precision const auto warp = group::tiled_partition<cuda_config::warp_size>(block); const auto prec = preconditioner::detail::get_optimal_storage_reduction(reduce( warp, prec_descriptor, [](uint32 x, uint32 y) { return x & y; })); // store the block back into memory if (block_id < num_blocks) { block_precisions[block_id] = prec; GKO_PRECONDITIONER_JACOBI_RESOLVE_PRECISION( ValueType, prec, copy_matrix<max_block_size, and_transpose>( subwarp, block_size, row, 1, perm, trans_perm, reinterpret_cast<resolved_precision *>( block_data + storage_scheme.get_group_offset(block_id)) + storage_scheme.get_block_offset(block_id), storage_scheme.get_stride())); } } } // namespace kernel namespace { template <int warps_per_block, int max_block_size, typename ValueType, typename IndexType> void generate(syn::value_list<int, max_block_size>, const matrix::Csr<ValueType, IndexType> *mtx, remove_complex<ValueType> accuracy, ValueType *block_data, const preconditioner::block_interleaved_storage_scheme<IndexType> &storage_scheme, remove_complex<ValueType> *conditioning, precision_reduction *block_precisions, const IndexType *block_ptrs, size_type num_blocks) { constexpr int subwarp_size = get_larger_power(max_block_size); constexpr int blocks_per_warp = cuda_config::warp_size / subwarp_size; const dim3 grid_size(ceildiv(num_blocks, warps_per_block * blocks_per_warp), 1, 1); const dim3 block_size(subwarp_size, blocks_per_warp, warps_per_block); if (block_precisions) { hipLaunchKernelGGL(( kernel::adaptive_generate<max_block_size, subwarp_size, warps_per_block>) , dim3(grid_size), dim3(block_size), 0, 0, mtx->get_size()[0], mtx->get_const_row_ptrs(), mtx->get_const_col_idxs(), as_cuda_type(mtx->get_const_values()), as_cuda_type(accuracy), as_cuda_type(block_data), storage_scheme, as_cuda_type(conditioning), block_precisions, block_ptrs, num_blocks); } else { hipLaunchKernelGGL(( kernel::generate<max_block_size, subwarp_size, warps_per_block>) , dim3(grid_size), dim3(block_size), 0, 0, mtx->get_size()[0], mtx->get_const_row_ptrs(), mtx->get_const_col_idxs(), as_cuda_type(mtx->get_const_values()), as_cuda_type(block_data), storage_scheme, block_ptrs, num_blocks); } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_generate, generate); } // namespace template <typename ValueType, typename IndexType> void generate(std::shared_ptr<const CudaExecutor> exec, const matrix::Csr<ValueType, IndexType> *system_matrix, size_type num_blocks, uint32 max_block_size, remove_complex<ValueType> accuracy, const preconditioner::block_interleaved_storage_scheme<IndexType> &storage_scheme, Array<remove_complex<ValueType>> &conditioning, Array<precision_reduction> &block_precisions, const Array<IndexType> &block_pointers, Array<ValueType> &blocks) { select_generate(compiled_kernels(), [&](int compiled_block_size) { return max_block_size <= compiled_block_size; }, syn::value_list<int, cuda_config::min_warps_per_block>(), syn::type_list<>(), system_matrix, accuracy, blocks.get_data(), storage_scheme, conditioning.get_data(), block_precisions.get_data(), block_pointers.get_const_data(), num_blocks); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_JACOBI_GENERATE_KERNEL); } // namespace jacobi } // namespace cuda } // namespace kernels } // namespace gko
9edac1e659b4065d92493ef3c6e443026d972fc9.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/preconditioner/jacobi_kernels.hpp" #include <ginkgo/config.hpp> #include <ginkgo/core/base/exception_helpers.hpp> #include "core/base/extended_float.hpp" #include "core/preconditioner/jacobi_utils.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/diagonal_block_manipulation.cuh" #include "cuda/components/thread_ids.cuh" #include "cuda/components/uninitialized_array.hpp" #include "cuda/components/warp_blas.cuh" #include "cuda/preconditioner/jacobi_common.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The Jacobi preconditioner namespace. * @ref Jacobi * @ingroup jacobi */ namespace jacobi { namespace kernel { template <int max_block_size, typename ReducedType, typename Group, typename ValueType, typename IndexType> __device__ __forceinline__ bool validate_precision_reduction_feasibility( Group &__restrict__ group, IndexType block_size, ValueType *__restrict__ row, ValueType *__restrict__ work, size_type stride) { using gko::detail::float_traits; // save original data and reduce precision if (group.thread_rank() < block_size) { #pragma unroll for (auto i = 0u; i < max_block_size; ++i) { if (i >= block_size) { break; } work[i * stride + group.thread_rank()] = row[i]; row[i] = static_cast<ValueType>(static_cast<ReducedType>(row[i])); } } // compute the condition number auto perm = group.thread_rank(); auto trans_perm = perm; auto block_cond = compute_infinity_norm<max_block_size>(group, block_size, block_size, row); auto succeeded = invert_block<max_block_size>(group, block_size, row, perm, trans_perm); block_cond *= compute_infinity_norm<max_block_size>(group, block_size, block_size, row); // restore original data if (group.thread_rank() < block_size) { #pragma unroll for (auto i = 0u; i < max_block_size; ++i) { if (i >= block_size) { break; } row[i] = work[i * stride + group.thread_rank()]; } } return succeeded && block_cond >= 1.0 && block_cond * float_traits<remove_complex<ValueType>>::eps < 1e-3; } template <int max_block_size, int subwarp_size, int warps_per_block, typename ValueType, typename IndexType> __global__ void __launch_bounds__(warps_per_block *cuda_config::warp_size) generate(size_type num_rows, const IndexType *__restrict__ row_ptrs, const IndexType *__restrict__ col_idxs, const ValueType *__restrict__ values, ValueType *__restrict__ block_data, preconditioner::block_interleaved_storage_scheme<IndexType> storage_scheme, const IndexType *__restrict__ block_ptrs, size_type num_blocks) { const auto block_id = thread::get_subwarp_id<subwarp_size, warps_per_block>(); const auto block = group::this_thread_block(); ValueType row[max_block_size]; __shared__ UninitializedArray<ValueType, max_block_size * warps_per_block> workspace; csr::extract_transposed_diag_blocks<max_block_size, warps_per_block>( block, cuda_config::warp_size / subwarp_size, row_ptrs, col_idxs, values, block_ptrs, num_blocks, row, 1, workspace + threadIdx.z * max_block_size); const auto subwarp = group::tiled_partition<subwarp_size>(block); if (block_id < num_blocks) { const auto block_size = block_ptrs[block_id + 1] - block_ptrs[block_id]; auto perm = subwarp.thread_rank(); auto trans_perm = subwarp.thread_rank(); invert_block<max_block_size>(subwarp, block_size, row, perm, trans_perm); copy_matrix<max_block_size, and_transpose>( subwarp, block_size, row, 1, perm, trans_perm, block_data + storage_scheme.get_global_block_offset(block_id), storage_scheme.get_stride()); } } template <int max_block_size, int subwarp_size, int warps_per_block, typename ValueType, typename IndexType> __global__ void __launch_bounds__(warps_per_block *cuda_config::warp_size) adaptive_generate( size_type num_rows, const IndexType *__restrict__ row_ptrs, const IndexType *__restrict__ col_idxs, const ValueType *__restrict__ values, remove_complex<ValueType> accuracy, ValueType *__restrict__ block_data, preconditioner::block_interleaved_storage_scheme<IndexType> storage_scheme, remove_complex<ValueType> *__restrict__ conditioning, precision_reduction *__restrict__ block_precisions, const IndexType *__restrict__ block_ptrs, size_type num_blocks) { // extract blocks const auto block_id = thread::get_subwarp_id<subwarp_size, warps_per_block>(); const auto block = group::this_thread_block(); ValueType row[max_block_size]; __shared__ UninitializedArray<ValueType, max_block_size * warps_per_block> workspace; csr::extract_transposed_diag_blocks<max_block_size, warps_per_block>( block, cuda_config::warp_size / subwarp_size, row_ptrs, col_idxs, values, block_ptrs, num_blocks, row, 1, workspace + threadIdx.z * max_block_size); // compute inverse and figure out the correct precision const auto subwarp = group::tiled_partition<subwarp_size>(block); const auto block_size = block_id < num_blocks ? block_ptrs[block_id + 1] - block_ptrs[block_id] : 0; auto perm = subwarp.thread_rank(); auto trans_perm = subwarp.thread_rank(); auto prec_descriptor = ~uint32{}; if (block_id < num_blocks) { auto block_cond = compute_infinity_norm<max_block_size>( subwarp, block_size, block_size, row); invert_block<max_block_size>(subwarp, block_size, row, perm, trans_perm); block_cond *= compute_infinity_norm<max_block_size>(subwarp, block_size, block_size, row); conditioning[block_id] = block_cond; const auto prec = block_precisions[block_id]; prec_descriptor = preconditioner::detail::precision_reduction_descriptor::singleton( prec); if (prec == precision_reduction::autodetect()) { using preconditioner::detail::get_supported_storage_reductions; prec_descriptor = get_supported_storage_reductions<ValueType>( accuracy, block_cond, [&subwarp, &block_size, &row, &block_data, &storage_scheme, &block_id] { using target = reduce_precision<ValueType>; return validate_precision_reduction_feasibility< max_block_size, target>( subwarp, block_size, row, block_data + storage_scheme.get_global_block_offset(block_id), storage_scheme.get_stride()); }, [&subwarp, &block_size, &row, &block_data, &storage_scheme, &block_id] { using target = reduce_precision<reduce_precision<ValueType>>; return validate_precision_reduction_feasibility< max_block_size, target>( subwarp, block_size, row, block_data + storage_scheme.get_global_block_offset(block_id), storage_scheme.get_stride()); }); } } // make sure all blocks in the group have the same precision const auto warp = group::tiled_partition<cuda_config::warp_size>(block); const auto prec = preconditioner::detail::get_optimal_storage_reduction(reduce( warp, prec_descriptor, [](uint32 x, uint32 y) { return x & y; })); // store the block back into memory if (block_id < num_blocks) { block_precisions[block_id] = prec; GKO_PRECONDITIONER_JACOBI_RESOLVE_PRECISION( ValueType, prec, copy_matrix<max_block_size, and_transpose>( subwarp, block_size, row, 1, perm, trans_perm, reinterpret_cast<resolved_precision *>( block_data + storage_scheme.get_group_offset(block_id)) + storage_scheme.get_block_offset(block_id), storage_scheme.get_stride())); } } } // namespace kernel namespace { template <int warps_per_block, int max_block_size, typename ValueType, typename IndexType> void generate(syn::value_list<int, max_block_size>, const matrix::Csr<ValueType, IndexType> *mtx, remove_complex<ValueType> accuracy, ValueType *block_data, const preconditioner::block_interleaved_storage_scheme<IndexType> &storage_scheme, remove_complex<ValueType> *conditioning, precision_reduction *block_precisions, const IndexType *block_ptrs, size_type num_blocks) { constexpr int subwarp_size = get_larger_power(max_block_size); constexpr int blocks_per_warp = cuda_config::warp_size / subwarp_size; const dim3 grid_size(ceildiv(num_blocks, warps_per_block * blocks_per_warp), 1, 1); const dim3 block_size(subwarp_size, blocks_per_warp, warps_per_block); if (block_precisions) { kernel::adaptive_generate<max_block_size, subwarp_size, warps_per_block> <<<grid_size, block_size, 0, 0>>>( mtx->get_size()[0], mtx->get_const_row_ptrs(), mtx->get_const_col_idxs(), as_cuda_type(mtx->get_const_values()), as_cuda_type(accuracy), as_cuda_type(block_data), storage_scheme, as_cuda_type(conditioning), block_precisions, block_ptrs, num_blocks); } else { kernel::generate<max_block_size, subwarp_size, warps_per_block> <<<grid_size, block_size, 0, 0>>>( mtx->get_size()[0], mtx->get_const_row_ptrs(), mtx->get_const_col_idxs(), as_cuda_type(mtx->get_const_values()), as_cuda_type(block_data), storage_scheme, block_ptrs, num_blocks); } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_generate, generate); } // namespace template <typename ValueType, typename IndexType> void generate(std::shared_ptr<const CudaExecutor> exec, const matrix::Csr<ValueType, IndexType> *system_matrix, size_type num_blocks, uint32 max_block_size, remove_complex<ValueType> accuracy, const preconditioner::block_interleaved_storage_scheme<IndexType> &storage_scheme, Array<remove_complex<ValueType>> &conditioning, Array<precision_reduction> &block_precisions, const Array<IndexType> &block_pointers, Array<ValueType> &blocks) { select_generate(compiled_kernels(), [&](int compiled_block_size) { return max_block_size <= compiled_block_size; }, syn::value_list<int, cuda_config::min_warps_per_block>(), syn::type_list<>(), system_matrix, accuracy, blocks.get_data(), storage_scheme, conditioning.get_data(), block_precisions.get_data(), block_pointers.get_const_data(), num_blocks); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_JACOBI_GENERATE_KERNEL); } // namespace jacobi } // namespace cuda } // namespace kernels } // namespace gko
16769ced3a9db6f27233acaf1092a802949ef128.hip
// !!! This is a file automatically generated by hipify!!! #ifndef SYNC_AND_CHECK_CUDA_ERRORS #define SYNC_AND_CHECK_CUDA_ERRORS {hipStreamSynchronize(0); hipError_t x = hipGetLastError(); if ((x) != hipSuccess) { printf("Error: %s\n", hipGetErrorString(x)); fclose(stdout); exit(1); }} #endif #include <cstdio> #include <cstdlib> #include <ctime> #include <string> #include "World.h" #include "utils.hpp" int main(void) { time_t tm; time(&tm); freopen("output.log", "a", stdout); time_t start_time; time(&start_time); clock_t start_clock = clock(); struct tm* s_time = localtime(&start_time); printf("================= Runnning. time = %04d/%02d/%02d %02d:%02d:%02d =================\n", s_time->tm_year + 1900, s_time->tm_mon + 1, s_time->tm_mday, s_time->tm_hour, s_time->tm_min, s_time->tm_sec); CUDAInfo(); World* w; hipMallocManaged(&w, sizeof(World)); w->init(1 * make_int2(256, 128), 4, 64); printf("Building scene\n\t"); w->build_scene(); printf("\tOK\n"); //Tracing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); w->render_scene(64 * 64); hipEventRecord(stop, 0); hipStreamSynchronize(0); float _time; hipEventElapsedTime(&_time, start, stop); printf("Tracing time: %.2f ms\n\n", _time); //Saving image w->save_image("./kek.bmp"); w->clear(); hipFree(w); clock_t end_clock = clock(); printf("TIME ELAPSED: %lf\n", (end_clock - start_clock) / 1000.0); fclose(stdout); return 0; }
16769ced3a9db6f27233acaf1092a802949ef128.cu
#ifndef SYNC_AND_CHECK_CUDA_ERRORS #define SYNC_AND_CHECK_CUDA_ERRORS {cudaStreamSynchronize(0); cudaError_t x = cudaGetLastError(); if ((x) != cudaSuccess) { printf("Error: %s\n", cudaGetErrorString(x)); fclose(stdout); exit(1); }} #endif #include <cstdio> #include <cstdlib> #include <ctime> #include <string> #include "World.h" #include "utils.hpp" int main(void) { time_t tm; time(&tm); freopen("output.log", "a", stdout); time_t start_time; time(&start_time); clock_t start_clock = clock(); struct tm* s_time = localtime(&start_time); printf("================= Runnning. time = %04d/%02d/%02d %02d:%02d:%02d =================\n", s_time->tm_year + 1900, s_time->tm_mon + 1, s_time->tm_mday, s_time->tm_hour, s_time->tm_min, s_time->tm_sec); CUDAInfo(); World* w; cudaMallocManaged(&w, sizeof(World)); w->init(1 * make_int2(256, 128), 4, 64); printf("Building scene\n\t"); w->build_scene(); printf("\tOK\n"); //Tracing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); w->render_scene(64 * 64); cudaEventRecord(stop, 0); cudaStreamSynchronize(0); float _time; cudaEventElapsedTime(&_time, start, stop); printf("Tracing time: %.2f ms\n\n", _time); //Saving image w->save_image("./kek.bmp"); w->clear(); cudaFree(w); clock_t end_clock = clock(); printf("TIME ELAPSED: %lf\n", (end_clock - start_clock) / 1000.0); fclose(stdout); return 0; }
6ffe4e890fd25432c6b750cd5c104f521a00fd6a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" const float PI = 3.14159265359; const float HALFPI = 0.5*PI; texture<unsigned char, hipTextureType3D, hipReadModeElementType> tcExpData; texture<float, hipTextureType2D, hipReadModeElementType> tfG; // texture to store scattering vectors; typedef struct { int iNPixelJ, iNPixelK; float fPixelJ, fPixelK; float afCoordOrigin[3]; float afNorm[3]; float afJVector[3]; float afKVector[3]; float fNRot, fAngleStart,fAngleEnd; } DetInfo; __global__ void display_rand(float* afRandom, int iNRand){ int i = blockIdx.x*blockDim.x + threadIdx.x; printf("=%d=",i); if (i<iNRand){ printf(" %f ||", afRandom[i]); } }
6ffe4e890fd25432c6b750cd5c104f521a00fd6a.cu
#include "includes.h" const float PI = 3.14159265359; const float HALFPI = 0.5*PI; texture<unsigned char, cudaTextureType3D, cudaReadModeElementType> tcExpData; texture<float, cudaTextureType2D, cudaReadModeElementType> tfG; // texture to store scattering vectors; typedef struct { int iNPixelJ, iNPixelK; float fPixelJ, fPixelK; float afCoordOrigin[3]; float afNorm[3]; float afJVector[3]; float afKVector[3]; float fNRot, fAngleStart,fAngleEnd; } DetInfo; __global__ void display_rand(float* afRandom, int iNRand){ int i = blockIdx.x*blockDim.x + threadIdx.x; printf("=%d=",i); if (i<iNRand){ printf(" %f ||", afRandom[i]); } }
72947aed4a6634a219519e341acdee87d7625b48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "SteerForAlignmentCUDA.cuh" extern "C" { __host__ void SteerForAlignmentKernelBindTextures( float4 const* pdBPosition, float4 const* pdBDirection, uint const numB ); __host__ void SteerForAlignmentKernelUnindTextures( void ); __global__ void SteerForAlignmentCUDAKernel( float4 const* pdPosition, float4 const* pdDirection, float4 * pdSteering, size_t const numA, uint const* pdKNNIndices, size_t const k, uint const numB, float const minDistance, float const maxDistance, float const cosMaxAngle, float const fWeight, uint * pdAppliedKernels, uint const doNotApplyWith ); } using namespace OpenSteer; SteerForAlignmentCUDA::SteerForAlignmentCUDA( AgentGroup * pAgentGroup, KNNData * pKNNData, AgentGroup * pOtherGroup, float const minDistance, float const maxDistance, float const cosMaxAngle, float const fWeight, uint const doNotApplyWith ) : AbstractCUDAKernel( pAgentGroup, fWeight, doNotApplyWith ), m_pKNNData( pKNNData ), m_pOtherGroup( pOtherGroup ), m_fMinDistance( minDistance ), m_fMaxDistance( maxDistance ), m_fCosMaxAngle( cosMaxAngle ) { // Nothing to do. } void SteerForAlignmentCUDA::init( void ) { // Nothing to do. } void SteerForAlignmentCUDA::run( void ) { dim3 grid = gridDim(); dim3 block = blockDim(); float4 const* pdAPosition = m_pAgentGroupData->pdPosition(); float4 const* pdADirection = m_pAgentGroupData->pdDirection(); float4 * pdASteering = m_pAgentGroupData->pdSteering(); uint const& numA = getNumAgents(); uint const* pdKNNIndices = m_pKNNData->pdKNNIndices(); uint const& k = m_pKNNData->k(); float4 const* pdBPosition = m_pOtherGroup->pdPosition(); float4 const* pdBDirection = m_pOtherGroup->pdDirection(); uint const& numB = m_pOtherGroup->Size(); uint * pdAppliedKernels = m_pAgentGroupData->pdAppliedKernels(); size_t const shMemSize = k * THREADSPERBLOCK * sizeof(uint); // Bind the textures. SteerForAlignmentKernelBindTextures( pdBPosition, pdBDirection, numB ); hipLaunchKernelGGL(( SteerForAlignmentCUDAKernel), dim3(grid), dim3(block), shMemSize , 0, pdAPosition, pdADirection, pdASteering, numA, pdKNNIndices, k, numB, m_fMinDistance, m_fMaxDistance, m_fCosMaxAngle, m_fWeight, pdAppliedKernels, m_doNotApplyWith ); cutilCheckMsg( "SteerForAlignmentCUDAKernel failed" ); //CUDA_SAFE_CALL( hipDeviceSynchronize() ); // Unbind the textures. SteerForAlignmentKernelUnindTextures(); } void SteerForAlignmentCUDA::close( void ) { // Agent group data may have changed. m_pAgentGroup->SetSyncHost(); }
72947aed4a6634a219519e341acdee87d7625b48.cu
#include "SteerForAlignmentCUDA.cuh" extern "C" { __host__ void SteerForAlignmentKernelBindTextures( float4 const* pdBPosition, float4 const* pdBDirection, uint const numB ); __host__ void SteerForAlignmentKernelUnindTextures( void ); __global__ void SteerForAlignmentCUDAKernel( float4 const* pdPosition, float4 const* pdDirection, float4 * pdSteering, size_t const numA, uint const* pdKNNIndices, size_t const k, uint const numB, float const minDistance, float const maxDistance, float const cosMaxAngle, float const fWeight, uint * pdAppliedKernels, uint const doNotApplyWith ); } using namespace OpenSteer; SteerForAlignmentCUDA::SteerForAlignmentCUDA( AgentGroup * pAgentGroup, KNNData * pKNNData, AgentGroup * pOtherGroup, float const minDistance, float const maxDistance, float const cosMaxAngle, float const fWeight, uint const doNotApplyWith ) : AbstractCUDAKernel( pAgentGroup, fWeight, doNotApplyWith ), m_pKNNData( pKNNData ), m_pOtherGroup( pOtherGroup ), m_fMinDistance( minDistance ), m_fMaxDistance( maxDistance ), m_fCosMaxAngle( cosMaxAngle ) { // Nothing to do. } void SteerForAlignmentCUDA::init( void ) { // Nothing to do. } void SteerForAlignmentCUDA::run( void ) { dim3 grid = gridDim(); dim3 block = blockDim(); float4 const* pdAPosition = m_pAgentGroupData->pdPosition(); float4 const* pdADirection = m_pAgentGroupData->pdDirection(); float4 * pdASteering = m_pAgentGroupData->pdSteering(); uint const& numA = getNumAgents(); uint const* pdKNNIndices = m_pKNNData->pdKNNIndices(); uint const& k = m_pKNNData->k(); float4 const* pdBPosition = m_pOtherGroup->pdPosition(); float4 const* pdBDirection = m_pOtherGroup->pdDirection(); uint const& numB = m_pOtherGroup->Size(); uint * pdAppliedKernels = m_pAgentGroupData->pdAppliedKernels(); size_t const shMemSize = k * THREADSPERBLOCK * sizeof(uint); // Bind the textures. SteerForAlignmentKernelBindTextures( pdBPosition, pdBDirection, numB ); SteerForAlignmentCUDAKernel<<< grid, block, shMemSize >>>( pdAPosition, pdADirection, pdASteering, numA, pdKNNIndices, k, numB, m_fMinDistance, m_fMaxDistance, m_fCosMaxAngle, m_fWeight, pdAppliedKernels, m_doNotApplyWith ); cutilCheckMsg( "SteerForAlignmentCUDAKernel failed" ); //CUDA_SAFE_CALL( cudaThreadSynchronize() ); // Unbind the textures. SteerForAlignmentKernelUnindTextures(); } void SteerForAlignmentCUDA::close( void ) { // Agent group data may have changed. m_pAgentGroup->SetSyncHost(); }
b2759faa0f919d2666ca977312717505aceb1531.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include "stencil.cuh" using namespace std; __global__ void conv_kernel(const float* image, const float* mask, float* output, unsigned int r, unsigned int c) { int tidx = threadIdx.x, tidy = threadIdx.y; int bidx = blockIdx.x, bidy = blockIdx.y; int bdy = blockDim.y, bdx = blockDim.x; float avg_intensity = 0.5; extern __shared__ float arr[]; float* img = &arr[0]; float* msk = &arr[(bdx + 2) * (bdy + 2)]; float* out = &arr[(bdx + 2) * (bdy + 2) + 3*3]; long x_idx = tidx + (long)bdx * (long)bidx; // long since can be > 2^31 -1 long y_idx = tidy + (long)bdy * (long)bidy; // load image elements in-lace if (x_idx < c && y_idx < r) img[(tidy+1)*bdy + tidx+1] = image[y_idx * c + x_idx]; else img[(tidy+1)*bdy + tidx+1] = avg_intensity; if (tidx < 3 && tidy < 3) msk[tidy*bdy + tidx] = mask[tidy*bdy + tidx]; if (tidx == 0 && tidy == 0) { // leftmost top corner if (x_idx >= 1 && y_idx >= 1) img[tidy*bdy + tidx] = image[(y_idx-1) * c + x_idx-1]; else img[tidy*bdy + tidx] = avg_intensity; } else if (tidx == 0 && tidy == bdy - 1) { // leftmost bottom corner if (x_idx >= 1 && y_idx < bdy-1) img[(tidy+2)*bdy + tidx] = image[(y_idx+1) * c + x_idx-1]; else img[(tidy+2)*bdy + tidx] = avg_intensity; } else if (tidx == bdx - 1 && tidy == 0) { // rightmost top corner if (x_idx < bdx -1 && y_idx >= 1) img[tidy*bdy + tidx+2] = image[(y_idx-1) * c + x_idx+1]; else img[tidy*bdy + tidx+2] = avg_intensity; } else if (tidx == bdx - 1 && tidy == bdy -1) { // rightmost bottom corner if (x_idx < bdx -1 && y_idx < bdy-1) img[(tidy+2)*bdy + tidx+2] = image[(y_idx+1) * c + x_idx+1]; else img[(tidy+2)*bdy + tidx+2] = avg_intensity; } if (tidx == 0) { // leftmost col if (x_idx >= 1) img[(tidy+1)*bdy + tidx] = image[y_idx*c + x_idx-1]; else img[(tidy+1)*bdy + tidx] = avg_intensity; } else if (tidx == bdx - 1) { // rightmost col if (x_idx < bdx-1) img[(tidy+1)*bdy + tidx+2] = image[y_idx*c + x_idx+1]; else img[(tidy+1)*bdy + tidx+2] = avg_intensity; } else if (tidy == 0) { // top row if (y_idx >= 1) img[tidy*bdy + tidx+1] = image[(y_idx-1)*c + x_idx]; else img[tidy*bdy + tidx+1] = avg_intensity; } else if (tidy == bdy - 1) { // bottom row if (y_idx < bdy-1) img[(tidy+2)*bdy + tidx+1] = image[(y_idx+1)*c + x_idx]; else img[(tidy+2)*bdy + tidx+1] = avg_intensity; } __syncthreads(); out[tidx] = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { out[tidy*bdy+tidx] += img[(tidy+i)*bdy + (tidx+j)] * msk[i*3+j]; } } __syncthreads(); if (x_idx < c && y_idx < r) output[y_idx*c+r] = out[tidy*bdy+tidx]; } __host__ void conv(const float* image, const float* mask, float* output, unsigned int r, unsigned int c, unsigned int bdx, unsigned int bdy) { dim3 block(bdx, bdy); dim3 grid((c + block.x - 1) / block.x, (r + block.y - 1) / block.y); hipLaunchKernelGGL(( conv_kernel), dim3(grid), dim3(block), sizeof(float) * (bdx + 2) * (bdy + 2) + 3 * 3 * sizeof(float) + sizeof(float) * bdx * bdy, 0, image, mask, output, r, c); hipDeviceSynchronize(); }
b2759faa0f919d2666ca977312717505aceb1531.cu
#include <cstdio> #include "stencil.cuh" using namespace std; __global__ void conv_kernel(const float* image, const float* mask, float* output, unsigned int r, unsigned int c) { int tidx = threadIdx.x, tidy = threadIdx.y; int bidx = blockIdx.x, bidy = blockIdx.y; int bdy = blockDim.y, bdx = blockDim.x; float avg_intensity = 0.5; extern __shared__ float arr[]; float* img = &arr[0]; float* msk = &arr[(bdx + 2) * (bdy + 2)]; float* out = &arr[(bdx + 2) * (bdy + 2) + 3*3]; long x_idx = tidx + (long)bdx * (long)bidx; // long since can be > 2^31 -1 long y_idx = tidy + (long)bdy * (long)bidy; // load image elements in-lace if (x_idx < c && y_idx < r) img[(tidy+1)*bdy + tidx+1] = image[y_idx * c + x_idx]; else img[(tidy+1)*bdy + tidx+1] = avg_intensity; if (tidx < 3 && tidy < 3) msk[tidy*bdy + tidx] = mask[tidy*bdy + tidx]; if (tidx == 0 && tidy == 0) { // leftmost top corner if (x_idx >= 1 && y_idx >= 1) img[tidy*bdy + tidx] = image[(y_idx-1) * c + x_idx-1]; else img[tidy*bdy + tidx] = avg_intensity; } else if (tidx == 0 && tidy == bdy - 1) { // leftmost bottom corner if (x_idx >= 1 && y_idx < bdy-1) img[(tidy+2)*bdy + tidx] = image[(y_idx+1) * c + x_idx-1]; else img[(tidy+2)*bdy + tidx] = avg_intensity; } else if (tidx == bdx - 1 && tidy == 0) { // rightmost top corner if (x_idx < bdx -1 && y_idx >= 1) img[tidy*bdy + tidx+2] = image[(y_idx-1) * c + x_idx+1]; else img[tidy*bdy + tidx+2] = avg_intensity; } else if (tidx == bdx - 1 && tidy == bdy -1) { // rightmost bottom corner if (x_idx < bdx -1 && y_idx < bdy-1) img[(tidy+2)*bdy + tidx+2] = image[(y_idx+1) * c + x_idx+1]; else img[(tidy+2)*bdy + tidx+2] = avg_intensity; } if (tidx == 0) { // leftmost col if (x_idx >= 1) img[(tidy+1)*bdy + tidx] = image[y_idx*c + x_idx-1]; else img[(tidy+1)*bdy + tidx] = avg_intensity; } else if (tidx == bdx - 1) { // rightmost col if (x_idx < bdx-1) img[(tidy+1)*bdy + tidx+2] = image[y_idx*c + x_idx+1]; else img[(tidy+1)*bdy + tidx+2] = avg_intensity; } else if (tidy == 0) { // top row if (y_idx >= 1) img[tidy*bdy + tidx+1] = image[(y_idx-1)*c + x_idx]; else img[tidy*bdy + tidx+1] = avg_intensity; } else if (tidy == bdy - 1) { // bottom row if (y_idx < bdy-1) img[(tidy+2)*bdy + tidx+1] = image[(y_idx+1)*c + x_idx]; else img[(tidy+2)*bdy + tidx+1] = avg_intensity; } __syncthreads(); out[tidx] = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { out[tidy*bdy+tidx] += img[(tidy+i)*bdy + (tidx+j)] * msk[i*3+j]; } } __syncthreads(); if (x_idx < c && y_idx < r) output[y_idx*c+r] = out[tidy*bdy+tidx]; } __host__ void conv(const float* image, const float* mask, float* output, unsigned int r, unsigned int c, unsigned int bdx, unsigned int bdy) { dim3 block(bdx, bdy); dim3 grid((c + block.x - 1) / block.x, (r + block.y - 1) / block.y); conv_kernel<<<grid, block, sizeof(float) * (bdx + 2) * (bdy + 2) + 3 * 3 * sizeof(float) + sizeof(float) * bdx * bdy>>>(image, mask, output, r, c); cudaDeviceSynchronize(); }
070266a7ec06d413ac026338c00adde9a4344b35.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define NO_HIDDEN_NEURONS 5 extern "C" __global__ void deltasBatch(float *inputs, float *outputs, float *weights, float *weightsDeltas, int noInputs, int inputSize){ int gid = blockIdx.x * blockDim.x + threadIdx.x; float sum=0; int offsetDeltas = ((inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS+1)*gid; int offsetInput = noInputs*inputSize*gid; int offsetOutputs = noInputs*gid; float activationHidden[NO_HIDDEN_NEURONS]; float error; for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){ for(int imageIndex=0;imageIndex<=inputSize;imageIndex++){ weightsDeltas[offsetDeltas+(inputSize+1)*hidden+imageIndex]=0; } } for(int hidden=0;hidden<=NO_HIDDEN_NEURONS;hidden++){ weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+hidden]=0; } for (int i=0;i<noInputs;i++){ for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){ sum=0; for(int imageIndex=0;imageIndex<inputSize;imageIndex++){ sum+=inputs[offsetInput+i*inputSize+imageIndex]*weights[(inputSize+1)*hidden+imageIndex]; } sum+=weights[(inputSize+1)*hidden+inputSize]; if(sum>0) activationHidden[hidden]=1; else activationHidden[hidden]=0; //activationHidden[hidden]=sum/(1+abs(sum)); } sum=0; for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){ sum+=activationHidden[hidden]*weights[(inputSize+1)*NO_HIDDEN_NEURONS+hidden]; } sum+=weights[(inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS]; if(sum>0)sum=1; else sum=0; sum=outputs[offsetOutputs+i]-sum; if(sum!=0){ for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){ weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+hidden]+=sum*activationHidden[hidden]; } weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS]+=sum; for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){ error=sum*weights[(inputSize+1)*NO_HIDDEN_NEURONS+hidden]; if(error>0)error=1; else error=0; error=error-activationHidden[hidden]; if(error!=0){ for(int imageIndex=0;imageIndex<inputSize;imageIndex++){ weightsDeltas[offsetDeltas+(inputSize+1)*hidden+imageIndex]+=error*inputs[offsetInput+i*inputSize+imageIndex]; } weightsDeltas[offsetDeltas+(inputSize+1)*hidden+inputSize]+=error; } } } } }
070266a7ec06d413ac026338c00adde9a4344b35.cu
#define NO_HIDDEN_NEURONS 5 extern "C" __global__ void deltasBatch(float *inputs, float *outputs, float *weights, float *weightsDeltas, int noInputs, int inputSize){ int gid = blockIdx.x * blockDim.x + threadIdx.x; float sum=0; int offsetDeltas = ((inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS+1)*gid; int offsetInput = noInputs*inputSize*gid; int offsetOutputs = noInputs*gid; float activationHidden[NO_HIDDEN_NEURONS]; float error; for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){ for(int imageIndex=0;imageIndex<=inputSize;imageIndex++){ weightsDeltas[offsetDeltas+(inputSize+1)*hidden+imageIndex]=0; } } for(int hidden=0;hidden<=NO_HIDDEN_NEURONS;hidden++){ weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+hidden]=0; } for (int i=0;i<noInputs;i++){ for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){ sum=0; for(int imageIndex=0;imageIndex<inputSize;imageIndex++){ sum+=inputs[offsetInput+i*inputSize+imageIndex]*weights[(inputSize+1)*hidden+imageIndex]; } sum+=weights[(inputSize+1)*hidden+inputSize]; if(sum>0) activationHidden[hidden]=1; else activationHidden[hidden]=0; //activationHidden[hidden]=sum/(1+abs(sum)); } sum=0; for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){ sum+=activationHidden[hidden]*weights[(inputSize+1)*NO_HIDDEN_NEURONS+hidden]; } sum+=weights[(inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS]; if(sum>0)sum=1; else sum=0; sum=outputs[offsetOutputs+i]-sum; if(sum!=0){ for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){ weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+hidden]+=sum*activationHidden[hidden]; } weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS]+=sum; for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){ error=sum*weights[(inputSize+1)*NO_HIDDEN_NEURONS+hidden]; if(error>0)error=1; else error=0; error=error-activationHidden[hidden]; if(error!=0){ for(int imageIndex=0;imageIndex<inputSize;imageIndex++){ weightsDeltas[offsetDeltas+(inputSize+1)*hidden+imageIndex]+=error*inputs[offsetInput+i*inputSize+imageIndex]; } weightsDeltas[offsetDeltas+(inputSize+1)*hidden+inputSize]+=error; } } } } }
ExpWalk.hip
// !!! This is a file automatically generated by hipify!!! /* Exp Walk * Written by Owen Welsh * Latest version dated August 27, 2012 * * Simulates Pollard's multiplicative Rho algorithm using a walk on the * exponents, using a GPU to run many simulations at once. Runs about * 6.666 times faster than an equivalent algoritm running on the CPU. */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define LOOPS 965 /* Number of loops each thread will execute */ #define THREADS 256 /* Must be a power of 2 */ #define BLOCKS 13 /* Same as the number of prime orders to be used */ typedef unsigned long long int uint64_t; typedef unsigned int uint; __device__ __constant__ uint seedszD[BLOCKS]; __device__ __constant__ uint seedswD[BLOCKS]; /* An RNG that uses the values of two seeds to generate output */ __device__ uint GetUInt(uint *m_z, uint *m_w) { *m_z = 36969 * ((*m_z) & 65535) + ((*m_z) >> 16); *m_w = 18000 * ((*m_w) & 65535) + ((*m_w) >> 16); return ((*m_z) << 16) + *m_w; } /* Used so that seeds that start out with similar values don't generate similar output for the first few itterations of GetUInt */ __device__ void shuffle(uint *m_z, uint *m_w) { GetUInt(m_z, m_w); GetUInt(m_z, m_w); GetUInt(m_z, m_w); GetUInt(m_z, m_w); GetUInt(m_z, m_w); } /* A hash function that uses a modified version of the Mersenne Twister algorithm to randomly map values to 0, 1, or 2 */ __device__ uint64_t case3(uint64_t x) { x = 1812433253 * (x ^ (x >> 30)) + 1; x = x ^ (x >> 11); x = x ^ ((x << 7) & 2636928640); x = x ^ ((x << 15) & 4022730752); x = x ^ (x >> 18); return x % 3; } /* Runs one step of the exponent walk with x as the currect state. Returns the new state. */ __device__ uint64_t func(uint64_t x, uint64_t *a, uint64_t *b, uint64_t k, uint64_t N) { switch(case3(x)) { case 0: (*a)++; break; case 1: (*b)++; break; case 2: (*a) *= 2; (*b) *= 2; break; } *a = (*a) % N; *b = (*b) % N; return ((*a) + (*b) * k) % N; } /* Uses Brent's cycle detection algorithm to determine how long until a self-intersection with x0 as the starting state, k as the solution, and N as the prime order. Returns the number of steps until self-intersection. */ __device__ uint64_t brent(uint64_t x0, uint64_t k, uint64_t N) { int power = 1; int i; uint64_t lambda = 1; uint64_t mu = 0; uint64_t tortoise = x0; uint64_t hare; uint64_t at, ah, bt, bh; ah = at = x0; bh = bt = 0; hare = func(x0, &ah, &bh, k, N); /* Determines the length of the cycle (lambda, i.e. the head of the rho) */ while (tortoise != hare) { if (power == lambda) { tortoise = hare; power *= 2; lambda = 0; } hare = func(hare, &ah, &bh, k, N); lambda++; } tortoise = hare = x0; ah = at = x0; bh = bt = 0; /* Determines the time until first intersection (mu, the tail of the rho)*/ for (i = 0; i < lambda; i++) hare = func(hare, &ah, &bh, k, N); while (tortoise != hare) { tortoise = func(tortoise, &at, &bt, k, N); hare = func(hare, &ah, &bh, k, N); mu++; } return lambda + mu; } /* Uses brent() LOOPS times in THREADS threads to simulate Pollard's Rho and determine the average required number of steps until self-intersection. Uses random k and x0 values in each run. */ __global__ void simulate(float* stepsD, uint64_t *primesD) { __shared__ float steps[THREADS]; uint64_t N = primesD[blockIdx.x]; uint64_t x0, k; uint64_t stepsI = 0; int i; uint id = threadIdx.x; uint stride, mz, mw; mz = seedszD[blockIdx.x] + id; mw = seedswD[blockIdx.x] + id; shuffle(&mz, &mw); for (i = 0; i < LOOPS; i++) { x0 = (uint64_t) GetUInt(&mz, &mw) % N; k = (((uint64_t) GetUInt(&mz, &mw)) % (N - 2)) + 2; stepsI += brent(x0, k, N); } __syncthreads(); steps[id] = ((float) stepsI) / LOOPS; /* Summates every thread's result to later find the average. Requires that THREADS be a power of 2. */ for (stride = blockDim.x / 2; stride > 1; stride >>= 1) { __syncthreads(); if (id < stride) steps[id] += steps[id + stride]; } __syncthreads(); if (id < stride) steps[id] += steps[id + stride]; if (id == 0) stepsD[blockIdx.x] = steps[0] / blockDim.x; } /* Determines I/O methods and which prime orders will be used during simulation, and handles the CUDA bookkeeping. */ int main(int argc, char *argv[]) { uint64_t primes[BLOCKS] = {251, 503, 1009, 2003, 4001, 8009, 16001, 32003, 64007, 128021, 255989, 511997, 1000003}; uint64_t *primesD; float *stepsD; float steps[BLOCKS]; const size_t size = sizeof(uint64_t) * BLOCKS; const size_t sizeF = sizeof(float) * BLOCKS; const size_t sizeU = sizeof(uint) * BLOCKS; uint seedsz[BLOCKS]; uint seedsw[BLOCKS]; int i; srand(time(NULL)); hipMalloc((void**) &stepsD, sizeF); hipMalloc((void**) &primesD, size); hipMemcpy(primesD, primes, size, hipMemcpyHostToDevice); /* If you wish the loop this process, the body of the loop should begin here */ for(i = 0; i < BLOCKS; i++) { seedsz[i] = (uint) rand(); seedsw[i] = (uint) rand(); } hipMemcpyToSymbol(seedszD, seedsz, sizeU, hipMemcpyHostToDevice); hipMemcpyToSymbol(seedswD, seedsw, sizeU, hipMemcpyHostToDevice); hipLaunchKernelGGL(( simulate), dim3(BLOCKS), dim3(THREADS), 0, 0, stepsD, primesD); hipMemcpy(steps, stepsD, sizeF, hipMemcpyDeviceToHost); /* And end here (with the requisit summing mechanism placed after the last call to hipMemcpy()) */ hipFree(stepsD); hipFree(primesD); for (i = 0; i < BLOCKS; i++) printf("N=%lu, steps: %f\n", primes[i], steps[i]); return 0; }
ExpWalk.cu
/* Exp Walk * Written by Owen Welsh * Latest version dated August 27, 2012 * * Simulates Pollard's multiplicative Rho algorithm using a walk on the * exponents, using a GPU to run many simulations at once. Runs about * 6.666 times faster than an equivalent algoritm running on the CPU. */ #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define LOOPS 965 /* Number of loops each thread will execute */ #define THREADS 256 /* Must be a power of 2 */ #define BLOCKS 13 /* Same as the number of prime orders to be used */ typedef unsigned long long int uint64_t; typedef unsigned int uint; __device__ __constant__ uint seedszD[BLOCKS]; __device__ __constant__ uint seedswD[BLOCKS]; /* An RNG that uses the values of two seeds to generate output */ __device__ uint GetUInt(uint *m_z, uint *m_w) { *m_z = 36969 * ((*m_z) & 65535) + ((*m_z) >> 16); *m_w = 18000 * ((*m_w) & 65535) + ((*m_w) >> 16); return ((*m_z) << 16) + *m_w; } /* Used so that seeds that start out with similar values don't generate similar output for the first few itterations of GetUInt */ __device__ void shuffle(uint *m_z, uint *m_w) { GetUInt(m_z, m_w); GetUInt(m_z, m_w); GetUInt(m_z, m_w); GetUInt(m_z, m_w); GetUInt(m_z, m_w); } /* A hash function that uses a modified version of the Mersenne Twister algorithm to randomly map values to 0, 1, or 2 */ __device__ uint64_t case3(uint64_t x) { x = 1812433253 * (x ^ (x >> 30)) + 1; x = x ^ (x >> 11); x = x ^ ((x << 7) & 2636928640); x = x ^ ((x << 15) & 4022730752); x = x ^ (x >> 18); return x % 3; } /* Runs one step of the exponent walk with x as the currect state. Returns the new state. */ __device__ uint64_t func(uint64_t x, uint64_t *a, uint64_t *b, uint64_t k, uint64_t N) { switch(case3(x)) { case 0: (*a)++; break; case 1: (*b)++; break; case 2: (*a) *= 2; (*b) *= 2; break; } *a = (*a) % N; *b = (*b) % N; return ((*a) + (*b) * k) % N; } /* Uses Brent's cycle detection algorithm to determine how long until a self-intersection with x0 as the starting state, k as the solution, and N as the prime order. Returns the number of steps until self-intersection. */ __device__ uint64_t brent(uint64_t x0, uint64_t k, uint64_t N) { int power = 1; int i; uint64_t lambda = 1; uint64_t mu = 0; uint64_t tortoise = x0; uint64_t hare; uint64_t at, ah, bt, bh; ah = at = x0; bh = bt = 0; hare = func(x0, &ah, &bh, k, N); /* Determines the length of the cycle (lambda, i.e. the head of the rho) */ while (tortoise != hare) { if (power == lambda) { tortoise = hare; power *= 2; lambda = 0; } hare = func(hare, &ah, &bh, k, N); lambda++; } tortoise = hare = x0; ah = at = x0; bh = bt = 0; /* Determines the time until first intersection (mu, the tail of the rho)*/ for (i = 0; i < lambda; i++) hare = func(hare, &ah, &bh, k, N); while (tortoise != hare) { tortoise = func(tortoise, &at, &bt, k, N); hare = func(hare, &ah, &bh, k, N); mu++; } return lambda + mu; } /* Uses brent() LOOPS times in THREADS threads to simulate Pollard's Rho and determine the average required number of steps until self-intersection. Uses random k and x0 values in each run. */ __global__ void simulate(float* stepsD, uint64_t *primesD) { __shared__ float steps[THREADS]; uint64_t N = primesD[blockIdx.x]; uint64_t x0, k; uint64_t stepsI = 0; int i; uint id = threadIdx.x; uint stride, mz, mw; mz = seedszD[blockIdx.x] + id; mw = seedswD[blockIdx.x] + id; shuffle(&mz, &mw); for (i = 0; i < LOOPS; i++) { x0 = (uint64_t) GetUInt(&mz, &mw) % N; k = (((uint64_t) GetUInt(&mz, &mw)) % (N - 2)) + 2; stepsI += brent(x0, k, N); } __syncthreads(); steps[id] = ((float) stepsI) / LOOPS; /* Summates every thread's result to later find the average. Requires that THREADS be a power of 2. */ for (stride = blockDim.x / 2; stride > 1; stride >>= 1) { __syncthreads(); if (id < stride) steps[id] += steps[id + stride]; } __syncthreads(); if (id < stride) steps[id] += steps[id + stride]; if (id == 0) stepsD[blockIdx.x] = steps[0] / blockDim.x; } /* Determines I/O methods and which prime orders will be used during simulation, and handles the CUDA bookkeeping. */ int main(int argc, char *argv[]) { uint64_t primes[BLOCKS] = {251, 503, 1009, 2003, 4001, 8009, 16001, 32003, 64007, 128021, 255989, 511997, 1000003}; uint64_t *primesD; float *stepsD; float steps[BLOCKS]; const size_t size = sizeof(uint64_t) * BLOCKS; const size_t sizeF = sizeof(float) * BLOCKS; const size_t sizeU = sizeof(uint) * BLOCKS; uint seedsz[BLOCKS]; uint seedsw[BLOCKS]; int i; srand(time(NULL)); cudaMalloc((void**) &stepsD, sizeF); cudaMalloc((void**) &primesD, size); cudaMemcpy(primesD, primes, size, cudaMemcpyHostToDevice); /* If you wish the loop this process, the body of the loop should begin here */ for(i = 0; i < BLOCKS; i++) { seedsz[i] = (uint) rand(); seedsw[i] = (uint) rand(); } cudaMemcpyToSymbol(seedszD, seedsz, sizeU, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(seedswD, seedsw, sizeU, cudaMemcpyHostToDevice); simulate<<<BLOCKS, THREADS>>>(stepsD, primesD); cudaMemcpy(steps, stepsD, sizeF, cudaMemcpyDeviceToHost); /* And end here (with the requisit summing mechanism placed after the last call to cudaMemcpy()) */ cudaFree(stepsD); cudaFree(primesD); for (i = 0; i < BLOCKS; i++) printf("N=%lu, steps: %f\n", primes[i], steps[i]); return 0; }
da6dc43b4b31d5125d767b0a40dda9d5559bbb69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void f() {} void kernel() {hipLaunchKernelGGL(( f), dim3(1), dim3(1), 0, 0, ); }
da6dc43b4b31d5125d767b0a40dda9d5559bbb69.cu
__global__ void f() {} void kernel() { f<<<1, 1>>>(); }
26aa869356bf4db0397dc7b29eb1b67a17d80022.hip
// !!! This is a file automatically generated by hipify!!! #include "opencv/cv.h" #include "opencv/highgui.h" #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define PI 3.14159265358979323846 #define WIDTH 32 // GPU __global__ void Blur_Kernel(unsigned char *d_Blue, unsigned char *d_Green, unsigned char *d_Red, unsigned char *d_Blue_Blur, unsigned char *d_Green_Blur, unsigned char *d_Red_Blur, float *d_weightArr, int blurRadius, int length, int COL, int ROW); void gaussian_blur(unsigned char *Blue, unsigned char *Green, unsigned char *Red, unsigned char *Blue_Blur, unsigned char *Green_Blur, unsigned char *Red_Blur, float *weightArr, int blurRadius, int length, int COL, int ROW); // Weight Matrix void createWeightMatrix(int blurRadius); float getWeight(int blurRadius, float sigma, int x, int y); void calculateWeightMatrix(float *weightArr, int blurRadius, float sigma); void getFinalWeightMatrix(float *weightArr, int blurRadius); void printArray(float *Array, int ROW, int COL); void printArrayChar(unsigned char *Array, int ROW, int COL); int main(int argc, char **argv){ IplImage* sourceImg; IplImage* outputImg; if((sourceImg = cvLoadImage(argv[1], 1)) == NULL){ printf("%s cannot be openned\n", argv[1]); exit(1); } printf("height of sourceImg: %d\n", sourceImg->height); printf("width of sourceImg: %d\n", sourceImg->width); printf("size of sourceImg: %d\n", sourceImg->imageSize); outputImg = cvLoadImage(argv[1], 1); int blurRadius = atoi(argv[2]); float sigma = atof(argv[3]); int length = blurRadius * 2 + 1; float *weightArr = (float *)malloc((blurRadius * 2 + 1) * (blurRadius * 2 + 1) * sizeof(float)); calculateWeightMatrix(weightArr, blurRadius, sigma); getFinalWeightMatrix(weightArr, blurRadius); int COL_Step = sourceImg->widthStep; int COL = sourceImg->width; int ROW = sourceImg->height; // Input BGR Array unsigned char *Blue = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); unsigned char *Green = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); unsigned char *Red = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); // Output BGR Array unsigned char *Blue_Blur = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); unsigned char *Green_Blur = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); unsigned char *Red_Blur = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); // for(int i = 0; i < ROW; i++){ for(int j = 0; j < COL_Step; j = j + 3){ Blue[i * COL + (j/3)] = sourceImg->imageData[i * COL_Step + j]; Green[i * COL + (j/3)] = sourceImg->imageData[i * COL_Step + j + 1]; Red[i * COL + (j/3)] = sourceImg->imageData[i * COL_Step + j + 2]; } } // GPU function gaussian_blur(Blue, Green, Red, Blue_Blur, Green_Blur, Red_Blur, weightArr, blurRadius, length, COL, ROW); // Set BGR To Output Image for(int i = 0; i < ROW; i++){ for(int j = 0; j < COL_Step; j = j + 3){ outputImg->imageData[i * COL_Step + j] = Blue_Blur[i * COL + (j/3)]; outputImg->imageData[i * COL_Step + j + 1] = Green_Blur[i * COL + (j/3)]; outputImg->imageData[i * COL_Step + j + 2] = Red_Blur[i * COL + (j/3)]; } } cvSaveImage("noG_black_cat.jpg", outputImg, 0); // cvSaveImage("output2.jpg", outputImg, 0); cvShowImage("sourceImg", sourceImg); cvShowImage("GPU", outputImg); cvWaitKey(0); cvDestroyWindow("sourceImg"); cvReleaseImage(&sourceImg); cvDestroyWindow("GPU"); cvReleaseImage(&outputImg); free(Blue); free(Green); free(Red); free(Blue_Blur); free(Green_Blur); free(Red_Blur); free(weightArr); return 0; } __global__ void Blur_Kernel(unsigned char *d_Blue, unsigned char *d_Green, unsigned char *d_Red, unsigned char *d_Blue_Blur, unsigned char *d_Green_Blur, unsigned char *d_Red_Blur, float *d_weightArr, int blurRadius, int length, int COL, int ROW){ int row = (blockIdx.y * blockDim.y) + threadIdx.y; int col = (blockIdx.x * blockDim.x) + threadIdx.x; int i, j; float BBB = 0; float GGG = 0; float RRR = 0; if(row < ROW && col < COL){ for(j = 0; j < length; j++){ for(i = 0; i < length; i++){ if(((row - blurRadius + j) < 0) || ((col - blurRadius + i) < 0) || ((row - blurRadius + j) >= ROW) || ((col - blurRadius + i) >= COL)){ // do nothing } else { BBB += ((float)d_Blue[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); GGG += ((float)d_Green[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); RRR += ((float)d_Red[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); } // if(((col - blurRadius + j) >= 0) && ((row - blurRadius + i) >= 0) && ((col - blurRadius + j) < COL) && ((row - blurRadius + i) < ROW)){ // BBB += ((float)d_Blue[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); // GGG += ((float)d_Green[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); // RRR += ((float)d_Red[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); // } } } d_Blue_Blur[row * COL + col] = (unsigned char)BBB; d_Green_Blur[row * COL + col] = (unsigned char)GGG; d_Red_Blur[row * COL + col] = (unsigned char)RRR; } } void gaussian_blur(unsigned char *Blue, unsigned char *Green, unsigned char *Red, unsigned char *Blue_Blur, unsigned char *Green_Blur, unsigned char *Red_Blur, float *weightArr, int blurRadius, int length, int COL, int ROW){ size_t size_BGR = ROW * COL * sizeof(unsigned char); size_t size_weight = length * length * sizeof(float); unsigned char *d_Blue, *d_Green, *d_Red, *d_Blue_Blur, *d_Green_Blur, *d_Red_Blur; float *d_weightArr; // Allocate hipMalloc((void **)&d_Blue, size_BGR); hipMemcpy(d_Blue, Blue, size_BGR, hipMemcpyHostToDevice); hipMalloc((void **)&d_Green, size_BGR); hipMemcpy(d_Green, Green, size_BGR, hipMemcpyHostToDevice); hipMalloc((void **)&d_Red, size_BGR); hipMemcpy(d_Red, Red, size_BGR, hipMemcpyHostToDevice); hipMalloc((void **)&d_weightArr, size_weight); hipMemcpy(d_weightArr, weightArr, size_weight, hipMemcpyHostToDevice); hipMalloc((void **)&d_Blue_Blur, size_BGR); hipMalloc((void **)&d_Green_Blur, size_BGR); hipMalloc((void **)&d_Red_Blur, size_BGR); // Setup int gridDim_X = (COL + 31)/32; int gridDim_Y = (ROW + 31)/32; dim3 dimGrid(gridDim_X, gridDim_Y); dim3 dimBlock(WIDTH, WIDTH); // Get start time event hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // Invoke kernel hipLaunchKernelGGL(( Blur_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Blue, d_Green, d_Red, d_Blue_Blur, d_Green_Blur, d_Red_Blur, d_weightArr, blurRadius, length, COL, ROW); hipError_t cuda_err = hipGetLastError(); if ( hipSuccess != cuda_err ){ printf("before kernel call: error = %s\n", hipGetErrorString (cuda_err)); exit(1) ; } // Get stop time event hipEventRecord(stop, 0); hipEventSynchronize(stop); // Compute execution time float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("GPU time: %13f msec\n", elapsedTime); hipEventDestroy(start); hipEventDestroy(stop); // Read P from device memory hipMemcpy(Blue_Blur, d_Blue_Blur, size_BGR, hipMemcpyDeviceToHost); hipMemcpy(Green_Blur, d_Green_Blur, size_BGR, hipMemcpyDeviceToHost); hipMemcpy(Red_Blur, d_Red_Blur, size_BGR, hipMemcpyDeviceToHost); // Free device memory hipFree(d_Blue); hipFree(d_Green); hipFree(d_Red); hipFree(d_Blue_Blur); hipFree(d_Green_Blur); hipFree(d_Red_Blur); hipFree(d_weightArr); } float getWeight(int blurRadius, float sigma, int x, int y){ //float sigma = 5.0;//(blurRadius * 2 + 1) / 2; float weight = (1 / (2 * PI * sigma * sigma)) * exp(-(x * x + y * y)/(2 * sigma * sigma)); return weight; } void calculateWeightMatrix(float *weightArr, int blurRadius, float sigma){ int length = blurRadius * 2 + 1; for(int i = 0; i < length; i++){ for(int j = 0; j < length; j++){ weightArr[i * length + j] = 0.0;//getWeight(blurRadius, sigma, j - blurRadius, blurRadius - i); } } } void getFinalWeightMatrix(float *weightArr, int blurRadius){ int length = blurRadius * 2 + 1; float weightSum = 0; for(int i = 0; i < length; i++){ for(int j = 0; j < length; j++){ weightSum += weightArr[i * length + j]; } } for(int i = 0; i < length; i++){ for(int j = 0; j < length; j++ ){ weightArr[i * length + j] = (float) 1 / (length * length);//weightArr[i * length + j] / weightSum; } } } void printArrayChar(unsigned char *Array, int ROW, int COL){ int x, y; for(y = 0; y != ROW; ++y) { for(x = 0; x != COL; ++x) printf("%u ", Array[y * COL + x]); printf("\n"); } printf("==============================================================\n"); } void printArray(float *Array, int ROW, int COL){ int x, y; for(y = 0; y != ROW; ++y) { for(x = 0; x != COL; ++x) printf("%lf ", Array[y * COL + x]); printf("\n"); } printf("==============================================================\n"); }
26aa869356bf4db0397dc7b29eb1b67a17d80022.cu
#include "opencv/cv.h" #include "opencv/highgui.h" #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define PI 3.14159265358979323846 #define WIDTH 32 // GPU __global__ void Blur_Kernel(unsigned char *d_Blue, unsigned char *d_Green, unsigned char *d_Red, unsigned char *d_Blue_Blur, unsigned char *d_Green_Blur, unsigned char *d_Red_Blur, float *d_weightArr, int blurRadius, int length, int COL, int ROW); void gaussian_blur(unsigned char *Blue, unsigned char *Green, unsigned char *Red, unsigned char *Blue_Blur, unsigned char *Green_Blur, unsigned char *Red_Blur, float *weightArr, int blurRadius, int length, int COL, int ROW); // Weight Matrix void createWeightMatrix(int blurRadius); float getWeight(int blurRadius, float sigma, int x, int y); void calculateWeightMatrix(float *weightArr, int blurRadius, float sigma); void getFinalWeightMatrix(float *weightArr, int blurRadius); void printArray(float *Array, int ROW, int COL); void printArrayChar(unsigned char *Array, int ROW, int COL); int main(int argc, char **argv){ IplImage* sourceImg; IplImage* outputImg; if((sourceImg = cvLoadImage(argv[1], 1)) == NULL){ printf("%s cannot be openned\n", argv[1]); exit(1); } printf("height of sourceImg: %d\n", sourceImg->height); printf("width of sourceImg: %d\n", sourceImg->width); printf("size of sourceImg: %d\n", sourceImg->imageSize); outputImg = cvLoadImage(argv[1], 1); int blurRadius = atoi(argv[2]); float sigma = atof(argv[3]); int length = blurRadius * 2 + 1; float *weightArr = (float *)malloc((blurRadius * 2 + 1) * (blurRadius * 2 + 1) * sizeof(float)); calculateWeightMatrix(weightArr, blurRadius, sigma); getFinalWeightMatrix(weightArr, blurRadius); int COL_Step = sourceImg->widthStep; int COL = sourceImg->width; int ROW = sourceImg->height; // Input BGR Array unsigned char *Blue = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); unsigned char *Green = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); unsigned char *Red = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); // Output BGR Array unsigned char *Blue_Blur = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); unsigned char *Green_Blur = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); unsigned char *Red_Blur = (unsigned char *)malloc(ROW * COL * sizeof(unsigned char)); // for(int i = 0; i < ROW; i++){ for(int j = 0; j < COL_Step; j = j + 3){ Blue[i * COL + (j/3)] = sourceImg->imageData[i * COL_Step + j]; Green[i * COL + (j/3)] = sourceImg->imageData[i * COL_Step + j + 1]; Red[i * COL + (j/3)] = sourceImg->imageData[i * COL_Step + j + 2]; } } // GPU function gaussian_blur(Blue, Green, Red, Blue_Blur, Green_Blur, Red_Blur, weightArr, blurRadius, length, COL, ROW); // Set BGR To Output Image for(int i = 0; i < ROW; i++){ for(int j = 0; j < COL_Step; j = j + 3){ outputImg->imageData[i * COL_Step + j] = Blue_Blur[i * COL + (j/3)]; outputImg->imageData[i * COL_Step + j + 1] = Green_Blur[i * COL + (j/3)]; outputImg->imageData[i * COL_Step + j + 2] = Red_Blur[i * COL + (j/3)]; } } cvSaveImage("noG_black_cat.jpg", outputImg, 0); // cvSaveImage("output2.jpg", outputImg, 0); cvShowImage("sourceImg", sourceImg); cvShowImage("GPU", outputImg); cvWaitKey(0); cvDestroyWindow("sourceImg"); cvReleaseImage(&sourceImg); cvDestroyWindow("GPU"); cvReleaseImage(&outputImg); free(Blue); free(Green); free(Red); free(Blue_Blur); free(Green_Blur); free(Red_Blur); free(weightArr); return 0; } __global__ void Blur_Kernel(unsigned char *d_Blue, unsigned char *d_Green, unsigned char *d_Red, unsigned char *d_Blue_Blur, unsigned char *d_Green_Blur, unsigned char *d_Red_Blur, float *d_weightArr, int blurRadius, int length, int COL, int ROW){ int row = (blockIdx.y * blockDim.y) + threadIdx.y; int col = (blockIdx.x * blockDim.x) + threadIdx.x; int i, j; float BBB = 0; float GGG = 0; float RRR = 0; if(row < ROW && col < COL){ for(j = 0; j < length; j++){ for(i = 0; i < length; i++){ if(((row - blurRadius + j) < 0) || ((col - blurRadius + i) < 0) || ((row - blurRadius + j) >= ROW) || ((col - blurRadius + i) >= COL)){ // do nothing } else { BBB += ((float)d_Blue[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); GGG += ((float)d_Green[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); RRR += ((float)d_Red[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); } // if(((col - blurRadius + j) >= 0) && ((row - blurRadius + i) >= 0) && ((col - blurRadius + j) < COL) && ((row - blurRadius + i) < ROW)){ // BBB += ((float)d_Blue[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); // GGG += ((float)d_Green[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); // RRR += ((float)d_Red[(row - blurRadius + j) * COL + (col - blurRadius + i)] * d_weightArr[j * length + i]); // } } } d_Blue_Blur[row * COL + col] = (unsigned char)BBB; d_Green_Blur[row * COL + col] = (unsigned char)GGG; d_Red_Blur[row * COL + col] = (unsigned char)RRR; } } void gaussian_blur(unsigned char *Blue, unsigned char *Green, unsigned char *Red, unsigned char *Blue_Blur, unsigned char *Green_Blur, unsigned char *Red_Blur, float *weightArr, int blurRadius, int length, int COL, int ROW){ size_t size_BGR = ROW * COL * sizeof(unsigned char); size_t size_weight = length * length * sizeof(float); unsigned char *d_Blue, *d_Green, *d_Red, *d_Blue_Blur, *d_Green_Blur, *d_Red_Blur; float *d_weightArr; // Allocate cudaMalloc((void **)&d_Blue, size_BGR); cudaMemcpy(d_Blue, Blue, size_BGR, cudaMemcpyHostToDevice); cudaMalloc((void **)&d_Green, size_BGR); cudaMemcpy(d_Green, Green, size_BGR, cudaMemcpyHostToDevice); cudaMalloc((void **)&d_Red, size_BGR); cudaMemcpy(d_Red, Red, size_BGR, cudaMemcpyHostToDevice); cudaMalloc((void **)&d_weightArr, size_weight); cudaMemcpy(d_weightArr, weightArr, size_weight, cudaMemcpyHostToDevice); cudaMalloc((void **)&d_Blue_Blur, size_BGR); cudaMalloc((void **)&d_Green_Blur, size_BGR); cudaMalloc((void **)&d_Red_Blur, size_BGR); // Setup int gridDim_X = (COL + 31)/32; int gridDim_Y = (ROW + 31)/32; dim3 dimGrid(gridDim_X, gridDim_Y); dim3 dimBlock(WIDTH, WIDTH); // Get start time event cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Invoke kernel Blur_Kernel<<<dimGrid, dimBlock>>>(d_Blue, d_Green, d_Red, d_Blue_Blur, d_Green_Blur, d_Red_Blur, d_weightArr, blurRadius, length, COL, ROW); cudaError_t cuda_err = cudaGetLastError(); if ( cudaSuccess != cuda_err ){ printf("before kernel call: error = %s\n", cudaGetErrorString (cuda_err)); exit(1) ; } // Get stop time event cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Compute execution time float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("GPU time: %13f msec\n", elapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); // Read P from device memory cudaMemcpy(Blue_Blur, d_Blue_Blur, size_BGR, cudaMemcpyDeviceToHost); cudaMemcpy(Green_Blur, d_Green_Blur, size_BGR, cudaMemcpyDeviceToHost); cudaMemcpy(Red_Blur, d_Red_Blur, size_BGR, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_Blue); cudaFree(d_Green); cudaFree(d_Red); cudaFree(d_Blue_Blur); cudaFree(d_Green_Blur); cudaFree(d_Red_Blur); cudaFree(d_weightArr); } float getWeight(int blurRadius, float sigma, int x, int y){ //float sigma = 5.0;//(blurRadius * 2 + 1) / 2; float weight = (1 / (2 * PI * sigma * sigma)) * exp(-(x * x + y * y)/(2 * sigma * sigma)); return weight; } void calculateWeightMatrix(float *weightArr, int blurRadius, float sigma){ int length = blurRadius * 2 + 1; for(int i = 0; i < length; i++){ for(int j = 0; j < length; j++){ weightArr[i * length + j] = 0.0;//getWeight(blurRadius, sigma, j - blurRadius, blurRadius - i); } } } void getFinalWeightMatrix(float *weightArr, int blurRadius){ int length = blurRadius * 2 + 1; float weightSum = 0; for(int i = 0; i < length; i++){ for(int j = 0; j < length; j++){ weightSum += weightArr[i * length + j]; } } for(int i = 0; i < length; i++){ for(int j = 0; j < length; j++ ){ weightArr[i * length + j] = (float) 1 / (length * length);//weightArr[i * length + j] / weightSum; } } } void printArrayChar(unsigned char *Array, int ROW, int COL){ int x, y; for(y = 0; y != ROW; ++y) { for(x = 0; x != COL; ++x) printf("%u ", Array[y * COL + x]); printf("\n"); } printf("==============================================================\n"); } void printArray(float *Array, int ROW, int COL){ int x, y; for(y = 0; y != ROW; ++y) { for(x = 0; x != COL; ++x) printf("%lf ", Array[y * COL + x]); printf("\n"); } printf("==============================================================\n"); }
36b4f74dd0a0ad220826fd5923aec86ca6a75705.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <cusparse_v2.h> #include "rocblas.h" #include <hiprand/hiprand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" /* * log_p_y * log likelihood for a poisson * y = observed count * dt = length of observation */ __device__ KC_FP_TYPE log_p_y( KC_FP_TYPE y, KC_FP_TYPE rate, KC_FP_TYPE dt) { return y*(KC_LOG(rate)+KC_LOG(dt)) - dt*rate;// - KC_GAMMALN(y+1) } __global__ void kcComputeLogPY(KC_FP_TYPE * lp_y, const KC_FP_TYPE * y, const KC_FP_TYPE * alphas, const KC_FP_TYPE dt, const int TT) { int tt = blockIdx.x*blockDim.x+threadIdx.x; if(tt < TT) { for(int ii = 0; ii < 3; ii++) { lp_y[tt+ii*TT] = log_p_y(y[tt],alphas[ii],dt); } } } /* kcSampleSMStates * kernel runs on each trial (not timebin) * outputs: * z = jump times per each trial * s = which state jumped to * sampleStats = (3,2,NT) array, spike counts observed in each hidden state (divided up by trial) * inputs * y = spike counts * trialIndex = index for y ( first spike count for trial i is y[trialIndex[i]] and the last spike count is y[trialIndex[i+1]-1] * y is indexed at 0. This array includes final value that should be length of y) * trialCoh = coherence level for each trial (coherence controls prior jump time distribution and jump to state probability) * coherence labels/indices begin at 0 instead of 1 to be consistent with C, unlike MATLAB * NT = number of trials * alpha = (3,1) array, spike rates * phi = (numCoherences,1) jump probabilities (p(s=3) = phi, p(s=2) = 1-phi), trial coherence dependent * delta_t = length of each timebin * maxJump = the longest to calculate out possible jump time values for * randU = (NT,1) array a set of uniform random numbers on [0,1] * * nbPDF = (maxJump,numberOfCoherences) array, negative binomial pdf values (up to some limit) for each of the parameters of coherences * * jumpToProbs = (maxJump*NT,2) preallocated space to do calculations over */ __global__ void kcSampleSMStates(KC_FP_TYPE * z, KC_FP_TYPE * s, KC_FP_TYPE * sampleStats, KC_FP_TYPE * y, KC_FP_TYPE * lp_y, int * trialIndex, int * trialCoh, int NT, KC_FP_TYPE * alphas, KC_FP_TYPE * phi, KC_FP_TYPE delta_t, int maxJump, KC_FP_TYPE * randU, KC_FP_TYPE * nbPDF, KC_FP_TYPE * jumpToProbs, const int TT) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { int T1 = trialIndex[idx]; int T = trialIndex[idx+1]-T1; //index in jumpToProbs for jumping to state 2 int jumpT1_2 = idx*(maxJump*2); //index in jumpToProbs for jumping to state 3 int jumpT1_3 = idx*(maxJump*2) + maxJump; int cohIndex = trialCoh[idx]*maxJump; KC_FP_TYPE p2 = (phi[trialCoh[idx]] < 1)?KC_LOG(1-phi[trialCoh[idx]]):0; KC_FP_TYPE p3 = KC_LOG(phi[trialCoh[idx]]); //calculate jump time probabilities for jump time happening within observed window (else model says jump happens after trial observations end) for(int ii = T-1; ii >= 0; ii--) { //taking a cumulative sum over p(y_{ii:end}|z=ii,s=2 or 3) jumpToProbs[jumpT1_2+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_2+ii+1]):(0)) + lp_y[T1+ii+TT];//log_p_y(y[T1+ii],alphas[1],delta_t) ; jumpToProbs[jumpT1_3+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_3+ii+1]):(0)) + lp_y[T1+ii+TT*2];//log_p_y(y[T1+ii],alphas[2],delta_t) ; } KC_FP_TYPE initStateCumsum = 0; KC_FP_TYPE maxLog = 0; for(int ii = 0; ii < maxJump; ii++) { // p (y_{1:t}|z==ii<=T), my comments are starting indexes at 1 while the code starts at 0 if(ii < T) { KC_FP_TYPE p_y_init = lp_y[T1+ii]; initStateCumsum += p_y_init; if(ii < T-1) { jumpToProbs[jumpT1_2+ii+1] += initStateCumsum; jumpToProbs[jumpT1_3+ii+1] += initStateCumsum; } } else { jumpToProbs[jumpT1_2+ii] = initStateCumsum; jumpToProbs[jumpT1_3+ii] = initStateCumsum; } jumpToProbs[jumpT1_2+ii] = jumpToProbs[jumpT1_2+ii] + nbPDF[cohIndex+ii] + p2; jumpToProbs[jumpT1_3+ii] = jumpToProbs[jumpT1_3+ii] + nbPDF[cohIndex+ii] + p3; maxLog = KC_MAX(KC_MAX(maxLog,jumpToProbs[jumpT1_2+ii]),jumpToProbs[jumpT1_3+ii]); //maxLog = jumpToProbs[jumpT1_2+ii]+jumpToProbs[jumpT1_3+ii]; } //maxLog /= (maxJump*2.0); KC_FP_TYPE maxNumToExp = 8; KC_FP_TYPE minNumToExp = 2; KC_FP_TYPE extraConst = 0; //this helps numerical stability when going from log p to p (quick and dirty method) if(maxLog > maxNumToExp) { extraConst = maxLog-maxNumToExp; } else if(maxLog < minNumToExp) { extraConst = minNumToExp-maxLog; } KC_FP_TYPE totalProbCumsum = 0; for(int ii = 0; ii < maxJump; ii++) { jumpToProbs[jumpT1_3+ii] = KC_EXP(jumpToProbs[jumpT1_3+ii] + extraConst); if(phi[trialCoh[idx]] < 1.0) { jumpToProbs[jumpT1_2+ii] = KC_EXP(jumpToProbs[jumpT1_2+ii] + extraConst); totalProbCumsum += jumpToProbs[jumpT1_3+ii] + jumpToProbs[jumpT1_2+ii]; } else { totalProbCumsum += jumpToProbs[jumpT1_3+ii]; jumpToProbs[jumpT1_2+ii] = 0.0; } } //goes back through and finds a sampling time + sample to state KC_FP_TYPE post_cdf = 0; int switchFound = -1; int switchTime = 0; KC_FP_TYPE randn = randU[idx] * totalProbCumsum; for(int ii = 0; ii < maxJump && switchFound < 1; ii++) { post_cdf += jumpToProbs[jumpT1_2+ii]; if(post_cdf > randn && phi[trialCoh[idx]] < 1) { switchFound = 2; switchTime = ii; } else { post_cdf += jumpToProbs[jumpT1_3+ii]; if(post_cdf > randn) { switchFound = 3; switchTime = ii; } } } if(switchFound <= 0) { //just to make sure it doesn't crash switchFound = (KC_LOG(randU[idx])>p3)?2:3; switchTime = 101; } s[idx] = switchFound; z[idx] = switchTime; //sum up observed spike count info sampleStats[idx*6] = KC_MIN((KC_FP_TYPE)switchTime,(KC_FP_TYPE)T); sampleStats[idx*6+3] = 0; sampleStats[idx*6+4] = 0; sampleStats[idx*6+5] = 0; if(switchFound == 2) { sampleStats[idx*6+1] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ; sampleStats[idx*6+2] = 0.0; for(int ii = 0; ii < T;ii++) { if(ii<switchTime) { sampleStats[idx*6+3] += y[T1+ii]; } else { sampleStats[idx*6+4] += y[T1+ii]; } } } else { sampleStats[idx*6+2] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ; sampleStats[idx*6+1] = 0.0; for(int ii = 0; ii < T;ii++) { if(ii<switchTime) { sampleStats[idx*6+3] += y[T1+ii]; } else { sampleStats[idx*6+5] += y[T1+ii]; } } } } } /* * [SMSamples.z(:,ss) SMSamples.s(:,ss) SMSamples.spikeStats(:,:,ss)] = kcStepTimeSampler(gpu_y,gpu_trIndex,gpu_trCoh,SMSamples.alpha(:,ss-1),SMSamples.phi(:,ss-1),nbPDF,nbCDF); * Inputs: * 0 = y (spikes) - one long vector of all the spike times for all trials (GPU array) * 1 = trial index - 0:end-1 are the trial start times (GPU array) * 2 = trial coherence - on GPU, coherence levels per each trial (GPU array) * 3 = alpha, firing rates per each state (MATLAB array) * 4 = phi, probability of switiching to state 3 for each coherence (MATLAB array) * 5 = nbPDF, negative binomial pdf values (up to some limit) for each of the parameters of coherences nbPDF(k,c) = P(z=k| p_c,r) (MATLAB array) * 6 = delta_t, length of each timebins * * Outputs (all in MATLAB array form) * 0 = z, switching times per each trial, size (NT,1) * 1 = s, which state was switched to per each trial (either 2 or 3), size (NT,1) * 2 = spikeStats, summary statistics on how many spikes were fired per each state of the semi-markov model and how many observations per state, size (3,2) */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { //load up the GPU array inputs unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * y = kcGetArrayData(prhs[0]); unsigned int NT = kcGetArrayNumEl(prhs[1])-1; int * trIndex = kcGetArrayDataInt(prhs[1]); int * cohIndex = kcGetArrayDataInt(prhs[2],NT); //put the precalculated negative binomial PDF, CDF values onto the GPU const mwSize * precalcSize = mxGetDimensions(prhs[5]); int maxJump = precalcSize[0]; int NC = precalcSize[1]; //mexPrintf("Sampling SM states. Max jump = %d, NC = %d, TT = %d, NT = %d\n",maxJump,NC,TT,NT); KC_FP_TYPE * nbPDF; checkCudaErrors(hipMalloc((void**)&nbPDF,sizeof(KC_FP_TYPE)*NC*maxJump)); checkCudaErrors(hipMemcpy(nbPDF,(KC_FP_TYPE*)mxGetPr(prhs[5]),sizeof(KC_FP_TYPE)*NC*maxJump,hipMemcpyHostToDevice)); KC_FP_TYPE dt = mxGetScalar(prhs[6]); //put model parameters onto the GPU KC_FP_TYPE * alphas; checkCudaErrors(hipMalloc((void**)&alphas,sizeof(KC_FP_TYPE)*3)); checkCudaErrors(hipMemcpy(alphas,(KC_FP_TYPE*)mxGetPr(prhs[3]),sizeof(KC_FP_TYPE)*3,hipMemcpyHostToDevice)); KC_FP_TYPE * phi; checkCudaErrors(hipMalloc((void**)&phi,sizeof(KC_FP_TYPE)*NC)); checkCudaErrors(hipMemcpy(phi,(KC_FP_TYPE*)mxGetPr(prhs[4]),sizeof(KC_FP_TYPE)*NC,hipMemcpyHostToDevice)); //setup space on GPU for sampling // z,s,sampleStats // log_post2 - size(TT,1) // log_post3 - size(TT,1) KC_FP_TYPE * log_post2; KC_FP_TYPE * log_post3; checkCudaErrors(hipMalloc((void**)&log_post2,sizeof(KC_FP_TYPE)*TT)); checkCudaErrors(hipMalloc((void**)&log_post3,sizeof(KC_FP_TYPE)*TT)); KC_FP_TYPE * lp_y; checkCudaErrors(hipMalloc((void**)&lp_y,sizeof(KC_FP_TYPE)*TT*3)); KC_FP_TYPE * z; checkCudaErrors(hipMalloc((void**)&z,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * s; checkCudaErrors(hipMalloc((void**)&s,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * sampleStats; checkCudaErrors(hipMalloc((void**)&sampleStats,sizeof(KC_FP_TYPE)*6*NT)); KC_FP_TYPE * calculationSpace; checkCudaErrors(hipMalloc((void**)&calculationSpace,sizeof(KC_FP_TYPE)*maxJump*NT*2)); //setup random number generator hiprandGenerator_t curandGen = 0; hiprandStatus_t hiprandStatus_t; hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND-1 error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors sampling semi markov "); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND-2 error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors sampling semi markov"); } //generate a uniform random number set (size NT*2) KC_FP_TYPE * randU; int randSize = NT+((NT%2==0)?0:1); checkCudaErrors(hipMalloc((void**)&randU,sizeof(KC_FP_TYPE)*randSize)); hiprandStatus_t = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randU,randSize); hipDeviceSynchronize(); //sample the states int nBlocks; int blockSize; int nBlocksN; int blockSizeN; blockSize = 128; nBlocks = TT/blockSize + ((TT%blockSize==0)?0:1); blockSizeN = 4; nBlocksN = NT/blockSizeN + ((NT%blockSizeN==0)?0:1); hipLaunchKernelGGL(( kcComputeLogPY), dim3(nBlocks),dim3(blockSize), 0, 0, lp_y, y, alphas, dt, TT); hipLaunchKernelGGL(( kcSampleSMStates), dim3(nBlocksN),dim3(blockSizeN), 0, 0, z, s, sampleStats, y, lp_y, trIndex, cohIndex, NT, alphas, phi, dt, maxJump, randU, nbPDF, calculationSpace,TT); hipDeviceSynchronize(); //combine the sample stats KC_FP_TYPE * sampleStats_local; sampleStats_local = (KC_FP_TYPE*)malloc(sizeof(KC_FP_TYPE)*6*NT); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)sampleStats_local,sampleStats,sizeof(KC_FP_TYPE)*6*NT,hipMemcpyDeviceToHost)); hipDeviceSynchronize(); plhs[2] = mxCreateNumericMatrix(3,2,KC_FP_TYPE_MATLAB,mxREAL); KC_FP_TYPE * sampleStats_sum = (KC_FP_TYPE*)mxGetPr(plhs[2]); for(int jj = 0; jj < 6; jj++) { sampleStats_sum[jj] = 0; for(int ii = 0; ii < NT; ii++) { sampleStats_sum[jj] += sampleStats_local[ii*6 + jj]; } } //move sampled values to MATLAB plhs[0] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),z,sizeof(KC_FP_TYPE)*NT,hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),s,sizeof(KC_FP_TYPE)*NT,hipMemcpyDeviceToHost)); //clear out random number generator checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hiprandDestroyGenerator(curandGen)); //clear GPU values // negative binomial distribution items checkCudaErrors(hipFree(nbPDF)); // model params checkCudaErrors(hipFree(alphas)); checkCudaErrors(hipFree(phi)); // sampler stuff checkCudaErrors(hipFree(log_post2)); checkCudaErrors(hipFree(log_post3)); checkCudaErrors(hipFree(lp_y)); checkCudaErrors(hipFree(z)); checkCudaErrors(hipFree(s)); checkCudaErrors(hipFree(sampleStats)); free(sampleStats_local); checkCudaErrors(hipFree(calculationSpace)); // random nums checkCudaErrors(hipFree(randU)); }
36b4f74dd0a0ad220826fd5923aec86ca6a75705.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <cusparse_v2.h> #include "cublas_v2.h" #include <curand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" /* * log_p_y * log likelihood for a poisson * y = observed count * dt = length of observation */ __device__ KC_FP_TYPE log_p_y( KC_FP_TYPE y, KC_FP_TYPE rate, KC_FP_TYPE dt) { return y*(KC_LOG(rate)+KC_LOG(dt)) - dt*rate;// - KC_GAMMALN(y+1) } __global__ void kcComputeLogPY(KC_FP_TYPE * lp_y, const KC_FP_TYPE * y, const KC_FP_TYPE * alphas, const KC_FP_TYPE dt, const int TT) { int tt = blockIdx.x*blockDim.x+threadIdx.x; if(tt < TT) { for(int ii = 0; ii < 3; ii++) { lp_y[tt+ii*TT] = log_p_y(y[tt],alphas[ii],dt); } } } /* kcSampleSMStates * kernel runs on each trial (not timebin) * outputs: * z = jump times per each trial * s = which state jumped to * sampleStats = (3,2,NT) array, spike counts observed in each hidden state (divided up by trial) * inputs * y = spike counts * trialIndex = index for y ( first spike count for trial i is y[trialIndex[i]] and the last spike count is y[trialIndex[i+1]-1] * y is indexed at 0. This array includes final value that should be length of y) * trialCoh = coherence level for each trial (coherence controls prior jump time distribution and jump to state probability) * coherence labels/indices begin at 0 instead of 1 to be consistent with C, unlike MATLAB * NT = number of trials * alpha = (3,1) array, spike rates * phi = (numCoherences,1) jump probabilities (p(s=3) = phi, p(s=2) = 1-phi), trial coherence dependent * delta_t = length of each timebin * maxJump = the longest to calculate out possible jump time values for * randU = (NT,1) array a set of uniform random numbers on [0,1] * * nbPDF = (maxJump,numberOfCoherences) array, negative binomial pdf values (up to some limit) for each of the parameters of coherences * * jumpToProbs = (maxJump*NT,2) preallocated space to do calculations over */ __global__ void kcSampleSMStates(KC_FP_TYPE * z, KC_FP_TYPE * s, KC_FP_TYPE * sampleStats, KC_FP_TYPE * y, KC_FP_TYPE * lp_y, int * trialIndex, int * trialCoh, int NT, KC_FP_TYPE * alphas, KC_FP_TYPE * phi, KC_FP_TYPE delta_t, int maxJump, KC_FP_TYPE * randU, KC_FP_TYPE * nbPDF, KC_FP_TYPE * jumpToProbs, const int TT) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { int T1 = trialIndex[idx]; int T = trialIndex[idx+1]-T1; //index in jumpToProbs for jumping to state 2 int jumpT1_2 = idx*(maxJump*2); //index in jumpToProbs for jumping to state 3 int jumpT1_3 = idx*(maxJump*2) + maxJump; int cohIndex = trialCoh[idx]*maxJump; KC_FP_TYPE p2 = (phi[trialCoh[idx]] < 1)?KC_LOG(1-phi[trialCoh[idx]]):0; KC_FP_TYPE p3 = KC_LOG(phi[trialCoh[idx]]); //calculate jump time probabilities for jump time happening within observed window (else model says jump happens after trial observations end) for(int ii = T-1; ii >= 0; ii--) { //taking a cumulative sum over p(y_{ii:end}|z=ii,s=2 or 3) jumpToProbs[jumpT1_2+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_2+ii+1]):(0)) + lp_y[T1+ii+TT];//log_p_y(y[T1+ii],alphas[1],delta_t) ; jumpToProbs[jumpT1_3+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_3+ii+1]):(0)) + lp_y[T1+ii+TT*2];//log_p_y(y[T1+ii],alphas[2],delta_t) ; } KC_FP_TYPE initStateCumsum = 0; KC_FP_TYPE maxLog = 0; for(int ii = 0; ii < maxJump; ii++) { // p (y_{1:t}|z==ii<=T), my comments are starting indexes at 1 while the code starts at 0 if(ii < T) { KC_FP_TYPE p_y_init = lp_y[T1+ii]; initStateCumsum += p_y_init; if(ii < T-1) { jumpToProbs[jumpT1_2+ii+1] += initStateCumsum; jumpToProbs[jumpT1_3+ii+1] += initStateCumsum; } } else { jumpToProbs[jumpT1_2+ii] = initStateCumsum; jumpToProbs[jumpT1_3+ii] = initStateCumsum; } jumpToProbs[jumpT1_2+ii] = jumpToProbs[jumpT1_2+ii] + nbPDF[cohIndex+ii] + p2; jumpToProbs[jumpT1_3+ii] = jumpToProbs[jumpT1_3+ii] + nbPDF[cohIndex+ii] + p3; maxLog = KC_MAX(KC_MAX(maxLog,jumpToProbs[jumpT1_2+ii]),jumpToProbs[jumpT1_3+ii]); //maxLog = jumpToProbs[jumpT1_2+ii]+jumpToProbs[jumpT1_3+ii]; } //maxLog /= (maxJump*2.0); KC_FP_TYPE maxNumToExp = 8; KC_FP_TYPE minNumToExp = 2; KC_FP_TYPE extraConst = 0; //this helps numerical stability when going from log p to p (quick and dirty method) if(maxLog > maxNumToExp) { extraConst = maxLog-maxNumToExp; } else if(maxLog < minNumToExp) { extraConst = minNumToExp-maxLog; } KC_FP_TYPE totalProbCumsum = 0; for(int ii = 0; ii < maxJump; ii++) { jumpToProbs[jumpT1_3+ii] = KC_EXP(jumpToProbs[jumpT1_3+ii] + extraConst); if(phi[trialCoh[idx]] < 1.0) { jumpToProbs[jumpT1_2+ii] = KC_EXP(jumpToProbs[jumpT1_2+ii] + extraConst); totalProbCumsum += jumpToProbs[jumpT1_3+ii] + jumpToProbs[jumpT1_2+ii]; } else { totalProbCumsum += jumpToProbs[jumpT1_3+ii]; jumpToProbs[jumpT1_2+ii] = 0.0; } } //goes back through and finds a sampling time + sample to state KC_FP_TYPE post_cdf = 0; int switchFound = -1; int switchTime = 0; KC_FP_TYPE randn = randU[idx] * totalProbCumsum; for(int ii = 0; ii < maxJump && switchFound < 1; ii++) { post_cdf += jumpToProbs[jumpT1_2+ii]; if(post_cdf > randn && phi[trialCoh[idx]] < 1) { switchFound = 2; switchTime = ii; } else { post_cdf += jumpToProbs[jumpT1_3+ii]; if(post_cdf > randn) { switchFound = 3; switchTime = ii; } } } if(switchFound <= 0) { //just to make sure it doesn't crash switchFound = (KC_LOG(randU[idx])>p3)?2:3; switchTime = 101; } s[idx] = switchFound; z[idx] = switchTime; //sum up observed spike count info sampleStats[idx*6] = KC_MIN((KC_FP_TYPE)switchTime,(KC_FP_TYPE)T); sampleStats[idx*6+3] = 0; sampleStats[idx*6+4] = 0; sampleStats[idx*6+5] = 0; if(switchFound == 2) { sampleStats[idx*6+1] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ; sampleStats[idx*6+2] = 0.0; for(int ii = 0; ii < T;ii++) { if(ii<switchTime) { sampleStats[idx*6+3] += y[T1+ii]; } else { sampleStats[idx*6+4] += y[T1+ii]; } } } else { sampleStats[idx*6+2] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ; sampleStats[idx*6+1] = 0.0; for(int ii = 0; ii < T;ii++) { if(ii<switchTime) { sampleStats[idx*6+3] += y[T1+ii]; } else { sampleStats[idx*6+5] += y[T1+ii]; } } } } } /* * [SMSamples.z(:,ss) SMSamples.s(:,ss) SMSamples.spikeStats(:,:,ss)] = kcStepTimeSampler(gpu_y,gpu_trIndex,gpu_trCoh,SMSamples.alpha(:,ss-1),SMSamples.phi(:,ss-1),nbPDF,nbCDF); * Inputs: * 0 = y (spikes) - one long vector of all the spike times for all trials (GPU array) * 1 = trial index - 0:end-1 are the trial start times (GPU array) * 2 = trial coherence - on GPU, coherence levels per each trial (GPU array) * 3 = alpha, firing rates per each state (MATLAB array) * 4 = phi, probability of switiching to state 3 for each coherence (MATLAB array) * 5 = nbPDF, negative binomial pdf values (up to some limit) for each of the parameters of coherences nbPDF(k,c) = P(z=k| p_c,r) (MATLAB array) * 6 = delta_t, length of each timebins * * Outputs (all in MATLAB array form) * 0 = z, switching times per each trial, size (NT,1) * 1 = s, which state was switched to per each trial (either 2 or 3), size (NT,1) * 2 = spikeStats, summary statistics on how many spikes were fired per each state of the semi-markov model and how many observations per state, size (3,2) */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { //load up the GPU array inputs unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * y = kcGetArrayData(prhs[0]); unsigned int NT = kcGetArrayNumEl(prhs[1])-1; int * trIndex = kcGetArrayDataInt(prhs[1]); int * cohIndex = kcGetArrayDataInt(prhs[2],NT); //put the precalculated negative binomial PDF, CDF values onto the GPU const mwSize * precalcSize = mxGetDimensions(prhs[5]); int maxJump = precalcSize[0]; int NC = precalcSize[1]; //mexPrintf("Sampling SM states. Max jump = %d, NC = %d, TT = %d, NT = %d\n",maxJump,NC,TT,NT); KC_FP_TYPE * nbPDF; checkCudaErrors(cudaMalloc((void**)&nbPDF,sizeof(KC_FP_TYPE)*NC*maxJump)); checkCudaErrors(cudaMemcpy(nbPDF,(KC_FP_TYPE*)mxGetPr(prhs[5]),sizeof(KC_FP_TYPE)*NC*maxJump,cudaMemcpyHostToDevice)); KC_FP_TYPE dt = mxGetScalar(prhs[6]); //put model parameters onto the GPU KC_FP_TYPE * alphas; checkCudaErrors(cudaMalloc((void**)&alphas,sizeof(KC_FP_TYPE)*3)); checkCudaErrors(cudaMemcpy(alphas,(KC_FP_TYPE*)mxGetPr(prhs[3]),sizeof(KC_FP_TYPE)*3,cudaMemcpyHostToDevice)); KC_FP_TYPE * phi; checkCudaErrors(cudaMalloc((void**)&phi,sizeof(KC_FP_TYPE)*NC)); checkCudaErrors(cudaMemcpy(phi,(KC_FP_TYPE*)mxGetPr(prhs[4]),sizeof(KC_FP_TYPE)*NC,cudaMemcpyHostToDevice)); //setup space on GPU for sampling // z,s,sampleStats // log_post2 - size(TT,1) // log_post3 - size(TT,1) KC_FP_TYPE * log_post2; KC_FP_TYPE * log_post3; checkCudaErrors(cudaMalloc((void**)&log_post2,sizeof(KC_FP_TYPE)*TT)); checkCudaErrors(cudaMalloc((void**)&log_post3,sizeof(KC_FP_TYPE)*TT)); KC_FP_TYPE * lp_y; checkCudaErrors(cudaMalloc((void**)&lp_y,sizeof(KC_FP_TYPE)*TT*3)); KC_FP_TYPE * z; checkCudaErrors(cudaMalloc((void**)&z,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * s; checkCudaErrors(cudaMalloc((void**)&s,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * sampleStats; checkCudaErrors(cudaMalloc((void**)&sampleStats,sizeof(KC_FP_TYPE)*6*NT)); KC_FP_TYPE * calculationSpace; checkCudaErrors(cudaMalloc((void**)&calculationSpace,sizeof(KC_FP_TYPE)*maxJump*NT*2)); //setup random number generator curandGenerator_t curandGen = 0; curandStatus_t curandStatus; curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND-1 error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors sampling semi markov "); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND-2 error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors sampling semi markov"); } //generate a uniform random number set (size NT*2) KC_FP_TYPE * randU; int randSize = NT+((NT%2==0)?0:1); checkCudaErrors(cudaMalloc((void**)&randU,sizeof(KC_FP_TYPE)*randSize)); curandStatus = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randU,randSize); cudaDeviceSynchronize(); //sample the states int nBlocks; int blockSize; int nBlocksN; int blockSizeN; blockSize = 128; nBlocks = TT/blockSize + ((TT%blockSize==0)?0:1); blockSizeN = 4; nBlocksN = NT/blockSizeN + ((NT%blockSizeN==0)?0:1); kcComputeLogPY<<<nBlocks,blockSize>>>(lp_y, y, alphas, dt, TT); kcSampleSMStates<<<nBlocksN,blockSizeN>>>(z, s, sampleStats, y, lp_y, trIndex, cohIndex, NT, alphas, phi, dt, maxJump, randU, nbPDF, calculationSpace,TT); cudaDeviceSynchronize(); //combine the sample stats KC_FP_TYPE * sampleStats_local; sampleStats_local = (KC_FP_TYPE*)malloc(sizeof(KC_FP_TYPE)*6*NT); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)sampleStats_local,sampleStats,sizeof(KC_FP_TYPE)*6*NT,cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); plhs[2] = mxCreateNumericMatrix(3,2,KC_FP_TYPE_MATLAB,mxREAL); KC_FP_TYPE * sampleStats_sum = (KC_FP_TYPE*)mxGetPr(plhs[2]); for(int jj = 0; jj < 6; jj++) { sampleStats_sum[jj] = 0; for(int ii = 0; ii < NT; ii++) { sampleStats_sum[jj] += sampleStats_local[ii*6 + jj]; } } //move sampled values to MATLAB plhs[0] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),z,sizeof(KC_FP_TYPE)*NT,cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),s,sizeof(KC_FP_TYPE)*NT,cudaMemcpyDeviceToHost)); //clear out random number generator checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(curandDestroyGenerator(curandGen)); //clear GPU values // negative binomial distribution items checkCudaErrors(cudaFree(nbPDF)); // model params checkCudaErrors(cudaFree(alphas)); checkCudaErrors(cudaFree(phi)); // sampler stuff checkCudaErrors(cudaFree(log_post2)); checkCudaErrors(cudaFree(log_post3)); checkCudaErrors(cudaFree(lp_y)); checkCudaErrors(cudaFree(z)); checkCudaErrors(cudaFree(s)); checkCudaErrors(cudaFree(sampleStats)); free(sampleStats_local); checkCudaErrors(cudaFree(calculationSpace)); // random nums checkCudaErrors(cudaFree(randU)); }
59ee042c73eca9f8314343340066475f08699e77.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cutil_inline.h> #include <iostream> #include <layer_kernels.cuh> #include <layer.cuh> #include <data.cuh> #include <util.cuh> #include <cudaconv2.cuh> #include <matrix.h> #include <GPUmonitor.h> #include <cmath> #include <algorithm> #include <vector> #include <cstring> using namespace std; extern ConvNet* model; extern GPUmonitor *gpuMonitor; float thres = 1.0e5; bool verbose = 0; #define checkNVMatrixNan(dmat,msg) _checkNVMatrixNan(dmat,msg,__FILE__,__LINE__) void _checkNVMatrixNan(NVMatrix &dMat, string msg, const char* filenm, const int linenum) { //int leadingDim = dMat.getLeadingDim(); //int stride = dMat.getStride(); //int followingDim = dMat.getFollowingDim(); //printf("ldDim:%d stride:%d followingDim:%d\n",leadingDim,stride,followingDim); if (isnan(dMat.sum())) { printf("_checkNVMatrixNan File:%s line:%d\n", filenm, linenum); dMat.printShape(msg.c_str()); dMat.fprint(msg.c_str(), dMat.getNumRows(), dMat.getNumCols()); dMat.print(2, 2); printf("min:%f max:%f mean:%f\n", dMat.min(), dMat.max(), dMat.mean()); exit(1); } } /* * ======================= * Layer * ======================= */ Layer::Layer(ConvNet* convNet, PyObject* paramsDict, bool trans) : _convNet(convNet), _trans(trans) { _name = pyDictGetString(paramsDict, "name"); _type = pyDictGetString(paramsDict, "type"); _numGradProducersNext = 0; _foundGradConsumers = false; _gradConsumer = pyDictGetInt(paramsDict, "gradConsumer"); _actsTarget = pyDictGetInt(paramsDict, "actsTarget"); _actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget"); _conserveMem = pyDictGetInt(paramsDict, "conserveMem"); _outputs = _actsTarget < 0 ? new NVMatrix() : NULL; _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL; // can be commented out since it's done in postInit() _GPU = pyDictGetInt(paramsDict, "GPU"); #ifdef MULTIGPU hipSetDevice(_GPU); NVMatrix::checkCUDAError(hipEventCreate(&_fpropEvent), "Layer::Layer Layer::Layer"); #endif printf(" init layer: %s\n", _name.c_str()); printf("_actsTarget:%d _actsGradTarget:%d _GPU:%d\n", _actsTarget, _actsGradTarget, _GPU); } void Layer::fpropNext(PASS_TYPE passType) { for (int i = 0; i < _next.size(); i++) { // printf("_next[%d] name:%s \n",i,_name.c_str()); // if(&_next[i]->getActs()) // printf("_next[%d] name:%s rows:%d cols:%d\n",i,_name.c_str(), // _next[i]->getActs().getNumRows(),_next[i]->getActs().getNumCols()); _next[i]->fprop(passType); } } void Layer::truncBwdActs() { // Only truncate actsGrad if I own it if (_conserveMem && _actsGradTarget < 0) { getActsGrad().truncate(); } if (_conserveMem) { getActs().truncate(); } } void Layer::fpropPreCommon(NVMatrixV& v, PASS_TYPE passType) { // Do nothing by default } void Layer::fpropPostCommon(NVMatrixV& v, PASS_TYPE passType) { } void Layer::bpropPreCommon(NVMatrix& v, PASS_TYPE passType) { #ifdef MULTIGPU NVMatrix::checkCUDAError(hipSetDevice(_GPU), "Layer::bpropPreCommon hipSetDevice"); #endif // Do nothing by default } void Layer::bpropPostCommon(NVMatrix& v, PASS_TYPE passType) { #ifdef MULTIGPU NVMatrix::checkCUDAError(hipSetDevice(_GPU), "Layer::bpropPostCommon hipSetDevice"); #endif //float gradMin=getActsGrad().min(); //float gradMax=getActsGrad().max(); //float gradMean=getActsGrad().mean(); //if(abs(gradMin)>thres || abs(gradMax)>thres){ // printf("layer:%s gradMin:%f gradMax:%f gradMean:%f\n",_name.c_str(), // gradMin,gradMax,gradMean); //} } void Layer::fprop(PASS_TYPE passType) { _rcvdFInputs += 1; if (_rcvdFInputs == _prev.size()) { #ifdef MULTIGPU //reset _bpropEventID = 0; NVMatrix::checkCUDAError(hipSetDevice(_GPU), "Layer::fprop(PASS_TYPE passType) hipSetDevice"); for (int i = 0; i < _prev.size(); ++i) { NVMatrix::checkCUDAError( hipEventSynchronize(_prev[i]->getFpropEvent()), "Layer::fprop(PASS_TYPE passType) (hipEventSynchronize"); } #endif NVMatrixV v; for (int i = 0; i < _prev.size(); i++) { v.push_back(&_prev[i]->getActs()); } fprop(v, passType); } } void Layer::fprop(NVMatrix& v, PASS_TYPE passType) { NVMatrixV vl; vl.push_back(&v); fprop(vl, passType); } void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) { // printf("Layer %s\n",_name.c_str()); // if(_actsTarget<0) // printf("Layer %s fprop. acts rows:%d cols:%d\n",_name.c_str(), // getActs().getNumRows(),getActs().getNumCols()); assert(v.size() == _prev.size()); _inputs.clear(); _inputs.insert(_inputs.begin(), v.begin(), v.end()); _outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget]; _rcvdFInputs = _prev.size(); for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) { (*it)->transpose(_trans); } getActs().transpose(_trans); // printf("Layer %s fprop. acts rows:%d cols:%d\n",_name.c_str(), // getActs().getNumRows(),getActs().getNumCols()); fpropPreCommon(v, passType); // First do fprop on the input whose acts matrix I'm sharing, if any if (_actsTarget >= 0) { fpropActs(_actsTarget, 0, passType); } // Then add the rest of the inputs to that for (int i = 0; i < _prev.size(); i++) { if (i != _actsTarget) { fpropActs(i, _actsTarget >= 0 || i > 0, passType); } } if (verbose) { NVMatrix tmp(getActs()); getActs().apply(NVMatrixOps::Abs(), tmp); float mean_abs_act = tmp.sum() / tmp.getNumElements(); printf("Layer::fprop %s. mean_abs_act:%f\n", _name.c_str(), mean_abs_act); } //float actMax=getActs().max(),actMin=getActs().min(); //if(abs(actMax)>thres || abs(actMin)>thres){ // printf("\nlayer:%s actMax:%f actMin:%f\n",_name.c_str(),actMax,actMin); // for(int i=0;i<_inputs.size();++i){ // float inputMax=(*_inputs[i]).max(); // float inputMin=(*_inputs[i]).min(); // printf("input:%d inputMax:%f inputMin:%f\n",i,inputMax,inputMin); // } //} fpropPostCommon(v, passType); #ifdef MULTIGPU NVMatrix::checkCUDAError(hipEventRecord(_fpropEvent), "Layer::fprop(NVMatrixV& v, PASS_TYPE passType) hipEventRecord"); #endif fpropNext(passType); } void Layer::bprop(PASS_TYPE passType) { if (_rcvdBInputs == _numGradProducersNext) { _rcvdBInputs++; // avoid doing bprop computation twice #ifdef MULTIGPU for (int i = 0; i < _next.size(); ++i) { NVMatrix::checkCUDAError(hipEventSynchronize(_bpropEvent[i]), "Layer::bprop(PASS_TYPE passType) hipEventSynchronize (_bpropEvent[i])"); } #endif bprop(getActsGrad(), passType); } } void Layer::bprop(NVMatrix& v, PASS_TYPE passType) { if (verbose & v.getNumElements() > 0) { NVMatrix tmp(v); v.apply(NVMatrixOps::Abs(), tmp); float meanAbs = tmp.sum() / tmp.getNumElements(); printf("Layer::bprop %s v rows,cols,%d %d mean Abs:%f\n", _name.c_str(), v.getNumRows(), v.getNumCols(), meanAbs); } v.transpose(_trans); for (int i = 0; i < _prev.size(); i++) { _prev[i]->getActs().transpose(_trans); _prev[i]->getActsGrad().transpose(_trans); } getActs().transpose(_trans); bpropPreCommon(v, passType); if (isGradProducer()) { // First propagate activity gradient to all layers whose activity // gradient matrix I'm definitely not sharing. for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer() && _actsGradTarget != i) { #ifdef MULTIGPU // do bprop on previous layer's device. // in the case where previous layer connects to multiple subsequent layers, // there is no need to synchronize the bprop for a previous layer NVMatrix::checkCUDAError(hipSetDevice(_prev[i]->getGPU()), "Layer::bprop(NVMatrix& v, PASS_TYPE passType) hipSetDevice(_prev[i]->getGPU())"); #endif bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[i]->incRcvdBInputs(); #ifdef MULTIGPU NVMatrix::checkCUDAError( hipEventRecord(_prev[i]->getNextBpropEvent()), "Layer::bprop(NVMatrix& v, PASS_TYPE passType) hipEventRecord"); #endif } } // Then propagate activity gradient to the layer whose activity gradient // matrix I'm sharing, if any. if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) { #ifdef MULTIGPU NVMatrix::checkCUDAError( hipSetDevice(_prev[_actsGradTarget]->getGPU()), "Layer::bprop(NVMatrix& v, PASS_TYPE passType) hipSetDevice(_prev[_actsGradTarget]->getGPU())"); #endif bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[_actsGradTarget]->incRcvdBInputs(); #ifdef MULTIGPU NVMatrix::checkCUDAError( hipEventRecord( _prev[_actsGradTarget]->getNextBpropEvent()), "Layer::bprop(NVMatrix& v, PASS_TYPE passType) hipEventRecord(_prev[_actsGradTarget]->getNextBpropEvent())"); #endif } } truncBwdActs(); bpropPostCommon(v, passType); if (isGradProducer()) { for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer()) { _prev[i]->bprop(passType); } } } } void Layer::reset() { _rcvdFInputs = 0; _rcvdBInputs = 0; } string& Layer::getName() { return _name; } string& Layer::getType() { return _type; } int Layer::getRcvdFInputs() { return _rcvdFInputs; } int Layer::getRcvdBInputs() { return _rcvdBInputs; } int Layer::incRcvdBInputs() { return ++_rcvdBInputs; } void Layer::addNext(Layer* l) { _next.push_back(l); _numGradProducersNext += l->isGradProducer(); } void Layer::addPrev(Layer* l) { _prev.push_back(l); } void Layer::postInit() { // _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs(); _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad(); } #ifdef MULTIGPU int Layer::getGPU() { return _GPU; } hipEvent_t Layer::getFpropEvent() { return _fpropEvent; } void Layer::initBpropEvent() { NVMatrix::checkCUDAError(hipSetDevice(_GPU), "Layer::initBpropEvent hipSetDevice"); for (int i = 0; i < _next.size(); ++i) { NVMatrix::checkCUDAError(hipEventCreate(&_bpropEvent[i]), "Layer::initBpropEvent() hipEventCreate(&_bpropEvent[i])"); } } hipEvent_t& Layer::getNextBpropEvent() { return _bpropEvent[_bpropEventID++]; } #endif // Does this layer, or some layer below it, need the gradient // for parameter updates? // Only weight layers should be grad consumers themselves. bool Layer::isGradConsumer() { if (!_foundGradConsumers) { for (int i = 0; i < _prev.size(); i++) { _gradConsumer |= _prev[i]->isGradConsumer(); } _foundGradConsumers = true; } return _gradConsumer; } // Does this layer produce gradient for layers below? bool Layer::isGradProducer() { return true; } vector<Layer*>& Layer::getPrev() { return _prev; } vector<Layer*>& Layer::getNext() { return _next; } NVMatrix& Layer::getActs() { assert(_outputs != NULL); return *_outputs; } NVMatrix& Layer::getActsGrad() { assert(_actsGrad != NULL); return *_actsGrad; } /* * ======================= * NeuronLayer * ======================= */ NeuronLayer::NeuronLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { _neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron")); } void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0); } void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->activate(*_inputs[0], getActs()); } /* * ======================= * WeightLayer * ======================= */ WeightLayer::WeightLayer(ConvNet* convNet, PyObject* paramsDict, bool trans, bool useGrad) : Layer(convNet, paramsDict, trans) { MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases"); Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); float momB = pyDictGetFloat(paramsDict, "momB"); floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW"); float epsB = pyDictGetFloat(paramsDict, "epsB"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); float wcBias = pyDictGetFloat(paramsDict, "wcB"); printf("(epsW,momW,wc) for %d inputs\n", epsW.size()); for (int i = 0; i < momW.size(); ++i) { printf("(%.12f,%.12f,%.12f) ", epsW[i], momW[i], wc[i]); } printf("\n"); printf("momB:%.12f epsB:%.12f wcBias:%.12f\n", momB, epsB, wcBias); // Source layers for shared weights intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); for (int i = 0; i < weightSourceLayerIndices.size(); i++) { int srcLayerIdx = weightSourceLayerIndices[i]; int matrixIdx = weightSourceMatrixIndices[i]; if (srcLayerIdx == convNet->getNumLayers()) { // Current layer _weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i])); } else if (srcLayerIdx >= 0) { WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx)); Weights* srcWeights = &srcLayer.getWeights(matrixIdx); _weights.addWeights(*new Weights(*srcWeights, epsW[i])); } else { _weights.addWeights( *new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], useGrad)); } } _biases = new Weights(hBiases, hBiasesInc, epsB, wcBias, momB, true); // Epsilons for finite-difference gradient checking operation _wStep = 0.001; _bStep = 0.002; delete &weightSourceLayerIndices; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &epsW; delete &wc; } void WeightLayer::setWeightsEpsScale(float eps_scale) { Layer::setWeightsEpsScale(eps_scale); _weights.setEpsScale(eps_scale); _biases->setEpsScale(eps_scale); } void WeightLayer::multiplyWeightsEpsScale(float multiplier) { Layer::multiplyWeightsEpsScale(multiplier); _weights.multiplyEpsScale(multiplier); _biases->multiplyEpsScale(multiplier); } void WeightLayer::bpropPreCommon(NVMatrix& v, PASS_TYPE passType) { Layer::bpropPreCommon(v, passType); // printf("WeightLayer::bpropPreCommo %s\n",_name.c_str()); if (_biases->getEps() > 0) { bpropBiases(v, passType); } for (int i = 0; i < _weights.getSize(); i++) { if (_weights[i].getEps() > 0) { bpropWeights(v, i, passType); // Increment its number of updates _weights[i].incNumUpdates(); } } } void WeightLayer::updateWeights() { Layer::updateWeights(); if (verbose) printf("WeightLayer::updateWeights %s\n", _name.c_str()); _weights.update(); _biases->update(); } void WeightLayer::copyToCPU() { Layer::copyToCPU(); _weights.copyToCPU(); _biases->copyToCPU(); } void WeightLayer::copyToGPU() { Layer::copyToGPU(); _weights.copyToGPU(); _biases->copyToGPU(); } void WeightLayer::checkGradients() { for (int i = 0; i < _weights.getSize(); i++) { _convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]); } _convNet->checkGradient(_name + " biases", _bStep, *_biases); } Weights& WeightLayer::getWeights(int idx) { return _weights[idx]; } /* * ============ * ScalingLayer Layer * ============ * */ ScalingLayer::ScalingLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, false, true) { } void ScalingLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { Weights& scaleWeight = _weights[inpIdx]; scaleWeight.copyToCPU(); Matrix& hScale = scaleWeight.getCPUW(); float *scale = hScale.getData(); getActs().add(*_inputs[inpIdx], scaleTargets, *scale); if (scaleTargets == 0) { _biases->copyToCPU(); Matrix& hBias = _biases->getCPUW(); float *bias = hBias.getData(); printf("ScalingLayer %s. scale: %f bias:%f scaleTargets:%f\n", _name.c_str(), *scale, *bias, scaleTargets); getActs().addScalar(*bias); } if (verbose) { NVMatrix tmp(getActs()); getActs().apply(NVMatrixOps::Abs(), tmp); float mean_abs_act = tmp.sum() / tmp.getNumElements(); printf("ScalingLayer %s. mean_abs_act:%f\n", _name.c_str(), mean_abs_act); } } void ScalingLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { Weights& scaleWeight = _weights[inpIdx]; scaleWeight.copyToCPU(); Matrix& hScale = scaleWeight.getCPUW(); float *scale = hScale.getData(); // printf("ScalingLayer::bpropAct scale:%f\n",*scale); // _prev[inpIdx]->getActsGrad().resize(v); _prev[inpIdx]->getActsGrad().add(v, scaleTargets, *scale); } void ScalingLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; // printf("ScalingLayer::bpropBiases _biases->getGrad() rows:%d cols:%d\n", // _biases->getGrad().getNumRows(),_biases->getGrad().getNumCols()); _biases->getGrad().resize(1, 1); _biases->getGrad().scale(0); _biases->getGrad().addScalar(scaleBGrad * v.sum()); } void ScalingLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); NVMatrix a; // printf("ScalingLayer::bpropWeights v rows:%d cols:%d getActs() rows:%d cols:%d\n", // v.getNumRows(),v.getNumCols(),getActs().getNumRows(),getActs().getNumCols()); _prev[inpIdx]->getActs().eltwiseMult(v, a); float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // printf("ScalingLayer::bpropWeights _weights[inpIdx].getGrad() rows:%d cols:%d \n", // _weights[inpIdx].getGrad().getNumRows(),_weights[inpIdx].getGrad().getNumCols()); _weights[inpIdx].getGrad().resize(1, 1); _weights[inpIdx].getGrad().setZero(); //Matrix tmp; //_weights[inpIdx].getGrad().copyToHost(tmp,true); //float *data= tmp.getData(); //printf("tmp rows,cols:%d %d data [0]:%f\n",tmp.getNumRows(),tmp.getNumCols(),data[0]); printf( "ScalingLayer::bpropWeights %s. _weights[inpIdx].getGrad().sum():%f scaleGrad:%f weight grad:%f\n", _name.c_str(), _weights[inpIdx].getGrad().sum(), scaleGrad, scaleGrad * a.sum()); _weights[inpIdx].getGrad().addScalar(scaleGrad * a.sum()); } /* * ============ * NormalizeLayer * ============ */ NormalizeLayer::NormalizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void NormalizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); NVMatrix squared; _inputs[inpIdx]->apply(NVMatrixOps::Square(), squared); squared.sum(0, _norm); _norm.apply(NVMatrixOps::Sqrt()); if (verbose) { printf( "NormalizeLayer::fpropActs squared size:%d %d norm size:%d %d norm mean:%f\n", squared.getNumRows(), squared.getNumCols(), _norm.getNumRows(), _norm.getNumCols(), _norm.sum() / _norm.getNumElements()); //_norm.print(_norm.getNumRows(), _norm.getNumCols()); } if (_norm.min() == 0) { printf("zero norm\n"); exit(1); } _inputs[inpIdx]->eltwiseDivideByVector(_norm, getActs()); } void NormalizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); computeL2NormalizeGrad(_prev[inpIdx]->getActs(), getActs(), _norm, v, _prev[inpIdx]->getActsGrad(), scaleTargets == 1); // // NVMatrix prev_v; // v.eltwiseMultByVector(_norm,prev_v); // _prev[inpIdx]->getActsGrad().add(prev_v,scaleTargets,1); } /* * ======================= * FCLayer * ======================= */ FCLayer::FCLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, true, false) { //printf("set dropout to false\n"); //_dropout=0; _dropout = pyDictGetFloat(paramsDict, "dropout"); _wStep = 0.1; _bStep = 0.01; } void FCLayer::fpropPreCommon(NVMatrixV& v, PASS_TYPE passType) { if (passType == PASS_TRAIN && _dropout > 0) //printf("FCLayer::fpropPreCommon dropout in training is enabled\n"); initDropoutMap(); } void FCLayer::fpropPostCommon(NVMatrixV& v, PASS_TYPE passType) { Layer::fpropPostCommon(v, passType); //float actMax=(getActs().max()); //float actMin=(getActs().min()); //if(abs(actMax)>thres || abs(actMin)>thres){ // float wMin,wMax,wMean; // wMin=(*_weights[0]).min(); // wMax=(*_weights[0]).max(); // wMean=(*_weights[0]).mean(); // printf("layer:%s wMin,wMax,wMean:%f %f %f\n", // _name.c_str(),wMin,wMax,wMean); //} if (passType == PASS_TRAIN && _dropout > 0) { //printf("FCLayer::fpropPostCommon dropout in training is enabled\n"); getActs().eltwiseMultByVector(_devDoMap); //printf("dropout binary map\n"); //_hDoMap.print(_hDoMap.getNumRows(),_hDoMap.getNumCols()); //printf("FCLayer::fpropPostCommon activation after dropout\n"); //getActs().print(getActs().getNumRows(),getActs().getNumCols()); } else if (passType == PASS_TEST && _dropout > 0) { //printf("FCLayer::fpropPostCommon dropout in testing is enabled\n"); getActs().scale(1 - _dropout); } } void FCLayer::initDropoutMap() { assert(_weights.getSize() > 0); int numOut = _weights[0].getNumCols(); //int numIn=_weights[0].getNumRows(); //printf("initDropoutMap numIN:%d numOut:%d\n",numIn,numOut); _hDoMap.resize(1, numOut); vector<int> nums(numOut); for (int i = 0; i < numOut; ++i) nums[i] = i; std::random_shuffle(nums.begin(), nums.end()); float *hDoMapData = _hDoMap.getData(); memset(hDoMapData, 0, sizeof(float) * numOut); for (int i = 0; i < floor(numOut * (1 - _dropout)); ++i) hDoMapData[nums[i]] = 1.0f; _devDoMap.resize(_hDoMap); _devDoMap.copyFromHost(_hDoMap); } void FCLayer::bpropPreCommon(NVMatrix& v, PASS_TYPE passType) { WeightLayer::bpropPreCommon(v, passType); if (passType == PASS_TRAIN && _dropout > 0) { assert(_weights.getSize() > 0); //int numOut=_weights[0].getNumCols(); //assert(numOut==v.getNumCols()); v.eltwiseMultByVector(_devDoMap); //v.print(v.getNumRows(),v.getNumCols()); } } void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { checkNVMatrixNan(*_inputs[inpIdx], _name); checkNVMatrixNan(*_weights[inpIdx], _name); //if(_name==string("fc4096_1")) //{ // printf("(*_inputs[inpIdx]) min,max,mean:%f %f %f\n",(*_inputs[inpIdx]).min(),(*_inputs[inpIdx]).max(),(*_inputs[inpIdx]).mean()); // printf("(*_weights[inpIdx]) min,max,mean:%f %f %f\n",(*_weights[inpIdx]).min(),(*_weights[inpIdx]).max(),(*_weights[inpIdx]).mean()); // /* // (*_inputs[inpIdx]).fprint((_name+string("FCLayer fpropActs _inputs[inpIdx]")).c_str(),(*_inputs[inpIdx]).getNumRows(), // (*_inputs[inpIdx]).getNumCols()); //(*_weights[inpIdx]).fprint((_name+string("FCLayer fpropActs _weights[inpIdx]")).c_str(),(*_weights[inpIdx]).getNumRows(), // (*_weights[inpIdx]).getNumCols());*/ //} getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(_biases->getW()); } // if(_name == string("reglayer")){ // printf("FCLayer %s. print getActs()\n", _name.c_str()); // getActs().print(1,getActs().getNumCols()); // } if (verbose) { NVMatrix tmp(getActs()); getActs().apply(NVMatrixOps::Abs(), tmp); float mean_abs_act = tmp.sum() / tmp.getNumElements(); printf("FCLayer %s. mean_abs_act:%f\n", _name.c_str(), mean_abs_act); } // if (isnan(getActs().sum())) { // printf("FCLayer::fpropActs isnan(getActs().sum())\n"); // printf("inputs min:%f max:%f mean:%f\n", (*_inputs[inpIdx]).min(), // (*_inputs[inpIdx]).max(), (*_inputs[inpIdx]).mean()); // printf("(*_weights[inpIdx]) min:%f max:%f mean:%f\n", // (*_weights[inpIdx]).min(), (*_weights[inpIdx]).max(), // (*_weights[inpIdx]).mean()); // printf("_biases min:%f max:%f mean:%f\n", (_biases->getW()).min(), // (_biases->getW()).max(), (_biases->getW()).mean()); // (*_inputs[inpIdx]).fprint( // (_name + string("FCLayer fpropActs _inputs")).c_str(), // (*_inputs[inpIdx]).getNumRows(), // (*_inputs[inpIdx]).getNumCols()); // (*_weights[inpIdx]).fprint( // (_name + string("FCLayer fpropActs _weights")).c_str(), // (*_weights[inpIdx]).getNumRows(), // (*_weights[inpIdx]).getNumCols()); // (_biases->getW()).fprint( // (_name + string("FCLayer fpropActs _biases")).c_str(), // (_biases->getW()).getNumRows(), (_biases->getW()).getNumCols()); // // } // checkNVMatrixNan(getActs(), _name); } void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose(); _prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1); //checkNVMatrixNan(_prev[inpIdx]->getActsGrad(),_name); delete &weights_T; } void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { //if(verbose) // printf("FCLayer::bpropBiases v rows(numCases) cols:%d %d\n", // v.getNumRows(),v.getNumCols()); int numCases = v.getNumRows(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 0, 0, scaleBGrad); if (0) { Matrix biasGrad; _biases->getGrad().copyToHost(biasGrad, true); float *biasGradData = biasGrad.getData(); int posNum = 0, negNum = 0, zeroNum = 0; for (int i = 0; i < biasGrad.getNumElements(); ++i) { if (biasGradData[i] < 0) negNum++; else if (biasGradData[i] == 0) zeroNum++; else posNum++; } float biasGradSum = biasGrad.sum(); printf("FCLayer: %s biasGradSum:%.8f negNum:%d zeroNum:%d posNum:%d\n", _name.c_str(), biasGradSum, negNum, zeroNum, posNum); Matrix bias; _biases->getW().copyToHost(bias, true); float *biasData = bias.getData(); posNum = 0; negNum = 0; zeroNum = 0; for (int i = 0; i < bias.getNumElements(); ++i) { if (biasData[i] < 0) negNum++; else if (biasData[i] == 0) zeroNum++; else posNum++; } float biasSum = bias.sum(); printf("FCLayer: %s biasSum:%.8f negNum:%d zeroNum:%d posNum:%d\n", _name.c_str(), biasSum, negNum, zeroNum, posNum); NVMatrix dbgMat(_biases->getGrad(), true); dbgMat.eltwiseDivide(_biases->getW()); dbgMat.apply(NVMatrixOps::Abs()); printf("FCLayer: %s dbgMat.getNumElements():%d\n", _name.c_str(), dbgMat.getNumElements()); float meanOptimWC = 0.5f * dbgMat.sum() / (float) (dbgMat.getNumElements()); printf("FCLayer: %s bias meanOptimWC:%.8f\n", _name.c_str(), meanOptimWC); } } void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumRows(); NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose(); float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom(); float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; _weights[inpIdx].getInc().addProduct(prevActs_T, v, scaleInc, scaleGrad); if (0) { // debugging: print gradient wrt weights NVMatrix dbgMat(_weights[inpIdx].getInc()); dbgMat.addProduct(prevActs_T, v, 0, scaleGrad); dbgMat.eltwiseDivide(_weights[inpIdx].getW()); dbgMat.apply(NVMatrixOps::Abs()); float meanOptimWC = 0.5f * dbgMat.sum() / (float) (dbgMat.getNumElements()); printf("FCLayer: %s weights meanOptimWC:%.8f\n", _name.c_str(), meanOptimWC); } delete &prevActs_T; } /* * ======================= * LocalLayer * ======================= */ LocalLayer::LocalLayer(ConvNet* convNet, PyObject* paramsDict, bool useGrad) : WeightLayer(convNet, paramsDict, false, useGrad) { _padding = pyDictGetIntV(paramsDict, "padding"); _stride = pyDictGetIntV(paramsDict, "stride"); _filterSize = pyDictGetIntV(paramsDict, "filterSize"); _channels = pyDictGetIntV(paramsDict, "channels"); _imgSize = pyDictGetIntV(paramsDict, "imgSize"); _numFilters = pyDictGetInt(paramsDict, "filters"); _groups = pyDictGetIntV(paramsDict, "groups"); _filterChannels = pyDictGetIntV(paramsDict, "filterChannels"); _randSparse = pyDictGetIntV(paramsDict, "randSparse"); _overSample = pyDictGetIntV(paramsDict, "overSample"); _filterPixels = pyDictGetIntV(paramsDict, "filterPixels"); _imgPixels = pyDictGetIntV(paramsDict, "imgPixels"); _modulesX = pyDictGetInt(paramsDict, "modulesX"); _modules = pyDictGetInt(paramsDict, "modules"); // It's a vector on the heap to be consistent with all the others... _filterConns = new vector<FilterConns>(); PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns"); for (int i = 0; i < _randSparse->size(); i++) { FilterConns fc; if (_randSparse->at(i)) { fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i)); } _filterConns->push_back(fc); } } void LocalLayer::copyToGPU() { WeightLayer::copyToGPU(); for (int i = 0; i < _prev.size(); i++) { if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity hipMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i)); gpuMonitor->addUsedMemory( sizeof(int) * _groups->at(i) * _filterChannels->at(i)); hipMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i), hipMemcpyHostToDevice); cutilCheckMsg("hipMemcpy: failed"); } } } /* * ======================= * ConvLayer * ======================= */ ConvLayer::ConvLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, true) { _partialSum = pyDictGetInt(paramsDict, "partialSum"); _sharedBiases = pyDictGetInt(paramsDict, "sharedBiases"); } void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // printf("ConvLayer %s. fpropActs\n",_name.c_str()); checkNVMatrixNan(*_weights[inpIdx], _name + string("weights")); checkNVMatrixNan(*_inputs[inpIdx], _name + string("inputs")); if (_randSparse->at(inpIdx)) { convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { if (_sharedBiases) { getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters); getActs().addVector(_biases->getW()); getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules)); } else { getActs().addVector(_biases->getW()); } } //if(verbose){ // NVMatrix tmp(getActs()); // getActs().apply(NVMatrixOps::Abs(),tmp); // float mean_abs_act = tmp.sum()/tmp.getNumElements(); // printf("ConvLayer %s. mean_abs_act:%f\n",_name.c_str(),mean_abs_act); //} // if (isnan(getActs().sum())) { // printf("ConvLayer::fpropActs\n"); // printf("inputs min:%f max:%f mean:%f\n", (*_inputs[inpIdx]).min(), // (*_inputs[inpIdx]).max(), (*_inputs[inpIdx]).mean()); // printf("(*_weights[inpIdx]) min:%f max:%f mean:%f\n", // (*_weights[inpIdx]).min(), (*_weights[inpIdx]).max(), // (*_weights[inpIdx]).mean()); // printf("_biases min:%f max:%f mean:%f\n", (_biases->getW()).min(), // (_biases->getW()).max(), (_biases->getW()).mean()); // (*_inputs[inpIdx]).fprint( // (_name + string("ConvLayer fpropActs _inputs")).c_str(), // (*_inputs[inpIdx]).getNumRows(), // (*_inputs[inpIdx]).getNumCols()); // (*_weights[inpIdx]).fprint( // (_name + string("ConvLayer fpropActs _weights")).c_str(), // (*_weights[inpIdx]).getNumRows(), // (*_weights[inpIdx]).getNumCols()); // (_biases->getW()).fprint( // (_name + string("ConvLayer fpropActs _biases")).c_str(), // (_biases->getW()).getNumRows(), (_biases->getW()).getNumCols()); // } // // checkNVMatrixNan(getActs(), _name); } void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; if (_sharedBiases) { v.reshape(_numFilters, v.getNumElements() / _numFilters); _biases->getGrad().addSum(v, 1, 0, scaleBGrad); v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules)); if (0) { NVMatrix dbgMat(_biases->getGrad(), true); dbgMat.eltwiseDivide(_biases->getW()); dbgMat.apply(NVMatrixOps::Abs()); float meanOptimWC = 0.5f * dbgMat.sum() / (float) (dbgMat.getNumElements()); printf("ConvLayer: %s biases meanOptimWC:%.8f\n", _name.c_str(), meanOptimWC); } } else { _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } } void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad(); float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0; if (_randSparse->at(inpIdx)) { convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } else { convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } if (_partialSum > 0) { scaleTargets = _weights[inpIdx].getNumUpdates() > 0; _weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters); _weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1); _weights[inpIdx].getGrad().reshape( _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters); if (0) { NVMatrix dbgMat(_weights[inpIdx].getGrad(), true); dbgMat.eltwiseDivide(_weights[inpIdx].getW()); dbgMat.apply(NVMatrixOps::Abs()); float meanOptimWC = 0.5f * dbgMat.sum() / (float) (dbgMat.getNumElements()); printf("ConvLayer: %s weights meanOptimWC:%.8f\n", _name.c_str(), meanOptimWC); } } } void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad(); convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (_overSample->at(inpIdx) > 1) { _actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx)); _actGradTmp.sum(0, _prev[inpIdx]->getActsGrad()); _prev[inpIdx]->getActsGrad().reshape( _prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols()); } } else { convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); //checkNVMatrixNan(_prev[inpIdx]->getActsGrad(), _name); } } void ConvLayer::truncBwdActs() { LocalLayer::truncBwdActs(); if (_conserveMem) { _weightGradTmp.truncate(); _actGradTmp.truncate(); } } void ConvLayer::fpropPostCommon(NVMatrixV& v, PASS_TYPE passType) { Layer::fpropPostCommon(v, passType); //float actMax=getActs().max(),actMin=getActs().min(); //if(abs(actMax)>thres || abs(actMin)>thres){ // float wMin,wMax,wMean; // wMin=(*_weights[0]).min(); // wMax=(*_weights[0]).max(); // wMean=(*_weights[0]).mean(); // printf("layer:%s wMin,wMax,wMean:%f %f %f\n", // _name.c_str(),wMin,wMax,wMean); //} } /* * ======================= * LocalUnsharedLayer * ======================= */ LocalUnsharedLayer::LocalUnsharedLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, false) { } void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { getActs().addVector(_biases->getW()); } } void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleInc = (passType != PASS_GC && _weights[inpIdx].getNumUpdates() == 0) * _weights[inpIdx].getMom(); // momentum float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // eps / numCases if (_randSparse->at(inpIdx)) { localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } else { localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } } void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } } /* * ======================= * SoftmaxLayer * ======================= */ SoftmaxLayer::SoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { } void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& input = *_inputs[0]; NVMatrix& max = input.max(1); input.addVector(max, -1, getActs()); //Matrix hMax; //max.copyToHost(hMax,true); //float maxmax=hMax.max(); //Matrix hActMat; //getActs().copyToHost(hActMat,true); //float actmin=hActMat.min(); getActs().apply(NVMatrixOps::Exp_a()); //getActs().copyToHost(hActMat,true); //if(hActMat.min()<=0) //{ // printf("act min before exp:%10.9f\n",actmin); // printf("after exp hActMat.min()<=0 %10.9f\n",hActMat.min()); //} assert(getActs().isContiguous()); // Matrix hMat; // getActs().copyToHost(hMat,true); //printf("SoftmaxLayer::fpropActs: check _outputs after do exponentiation\n"); // for(int p=0,i=0;i<hMat.getNumCols();++i){ // for(int j=0;j<hMat.getNumRows();++j,++p){ // if(hMat.getCell(j,i)<=0){ // // printf("nonpositive element is found. %6.5f (%d,%d)\n",hMat.getCell(j,i),j,i); // exit(1); // } // } // } NVMatrix& sum = getActs().sum(1); getActs().eltwiseDivideByVector(sum); //getActs().copyToHost(hMat,true); //for(int p=0,i=0;i<hMat.getNumCols();++i){ // for(int j=0;j<hMat.getNumRows();++j,++p){ // if(isnan(hMat.getCell(j,i)) || hMat.getCell(j,i)<=0){ // printf("SoftmaxLayer::fpropActs: check _outputs after getting probability\n"); // printf("nan/nonpositive element is found. %7.6f (%d,%d)\n",hMat.getCell(j,i),j,i); // } // } //} delete &max; delete &sum; } void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg"; if (doLogregGrad) { NVMatrix& labels = _next[0]->getPrev()[0]->getActs(); float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff); } else { computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1); } } /* * ======================= * LogSoftmaxLayer * ======================= */ LogSoftmaxLayer::LogSoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { } void LogSoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& input = *_inputs[0]; NVMatrix& max = input.max(1); // checkNVMatrixNan(input, _name); // checkNVMatrixNan(max, _name); input.addVector(max, -1, getActs()); // checkNVMatrixNan(getActs(), _name); NVMatrix sumExp(getActs(), true); sumExp.apply(NVMatrixOps::Exp_a()); assert(sumExp.isContiguous()); NVMatrix& sum = sumExp.sum(1); sum.apply(NVMatrixOps::Log()); // checkNVMatrixNan(sum, _name); getActs().addVector(sum, -1); // checkNVMatrixNan(getActs(), _name); delete &max; delete &sum; } void LogSoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.reg"; if (doLogregGrad) { //printf("LogSoftmaxLayer::bpropActs doLogregGrad\n"); NVMatrix& labels = _next[0]->getPrev()[0]->getActs(); float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff, true); } else { // to do computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1); } checkNVMatrixNan(_prev[0]->getActsGrad(), _name); } /* * ======================= * EltwiseSumLayer * ======================= */ EltwiseSumLayer::EltwiseSumLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _coeffs = pyDictGetFloatV(paramsDict, "coeffs"); } void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { _inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs()); // printf("EltwiseSumLayer::fpropActs scaleTargets==0 coeff:%f prev layer name:%s\n", // _coeffs->at(inpIdx),_prev[inpIdx]->getName().c_str()); // getActs().print(getActs().getNumRows(),2); } else { getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx)); // printf("EltwiseSumLayer::fpropActs scaleTargets!=0 coeff:%f prev layer name:%s\n", // _coeffs->at(inpIdx),_prev[inpIdx]->getName().c_str()); // printf("inputs\n"); // _inputs[inpIdx]->print(_inputs[inpIdx]->getNumRows(),2); // printf("getActs()\n"); // getActs().print(getActs().getNumRows(),2); // exit(0); } } void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad()); } else { assert(&_prev[inpIdx]->getActsGrad() != &v); _prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx)); } //printf("EltwiseSumLayer::bpropActs %s. _prev[inpIdx]->getActsGrad() rows,cols:%d %d inpIdx:%d coeff:%.3f\n", // _name.c_str(),_prev[inpIdx]->getActsGrad().getNumRows(), // _prev[inpIdx]->getActsGrad().getNumCols(),inpIdx,_coeffs->at(inpIdx)); } /* * ======================= * EltwiseMaxLayer * ======================= */ EltwiseMaxLayer::EltwiseMaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 1) { // First input, do nothing _inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs()); } else if (inpIdx > 1) { getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]); } } void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0); } /* * ======================= * DataLayer * ======================= */ DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _dataIdx = pyDictGetInt(paramsDict, "dataIdx"); } void DataLayer::fprop(PASS_TYPE passType) { throw string("No dava given!"); } void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { } void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) { _outputs = data[_dataIdx]; checkNVMatrixNan(*_outputs, _name); #ifdef MULTIGPU NVMatrix::checkCUDAError(hipEventRecord(_fpropEvent)); #endif fpropNext(passType); } bool DataLayer::isGradProducer() { return false; } /* * ===================== * PoolLayer * ===================== */ PoolLayer::PoolLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _channels = pyDictGetInt(paramsDict, "channels"); _sizeX = pyDictGetInt(paramsDict, "sizeX"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _pool = pyDictGetString(paramsDict, "pool"); } PoolLayer& PoolLayer::makePoolLayer(ConvNet* convNet, PyObject* paramsDict) { string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new MaxPoolLayer(convNet, paramsDict); } else if (_pool == "avg") { return *new AvgPoolLayer(convNet, paramsDict); } throw string("Unknown pooling layer type ") + _pool; } /* * ===================== * AvgPoolLayer * ===================== */ AvgPoolLayer::AvgPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler()); } void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1); } /* * ===================== * MaxPoolLayer * ===================== */ MaxPoolLayer::MaxPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler()); checkNVMatrixNan(getActs(), _name); } void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1); checkNVMatrixNan(_prev[inpIdx]->getActsGrad(), _name); } /* * ===================== * NailbedLayer * ===================== */ NailbedLayer::NailbedLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1); } void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1); } /* * ===================== * GaussianBlurLayer * ===================== */ GaussianBlurLayer::GaussianBlurLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _hFilter = pyDictGetMatrix(paramsDict, "filter"); } void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1); convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1); } // This is here just for completeness' sake. Why would you backpropagate // through a blur filter? void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad(); convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1); convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1); } void GaussianBlurLayer::copyToGPU() { Layer::copyToGPU(); _filter.copyFromHost(*_hFilter, true); } /* * ===================== * ResizeLayer * ===================== */ ResizeLayer::ResizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); _scale = pyDictGetFloat(paramsDict, "scale"); } void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale); } // Can't do this void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToYUVLayer * ===================== */ RGBToYUVLayer::RGBToYUVLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToYUV(*_inputs[0], getActs()); } // Can't do this void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToLABLayer * ===================== */ RGBToLABLayer::RGBToLABLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _center = pyDictGetInt(paramsDict, "center"); } void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToLAB(*_inputs[0], getActs(), _center); } // Can't do this void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * ResponseNormLayer * ===================== */ ResponseNormLayer::ResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _size = pyDictGetInt(paramsDict, "size"); _bias = pyDictGetFloat(paramsDict, "bias"); _scale = pyDictGetFloat(paramsDict, "scale"); _pow = pyDictGetFloat(paramsDict, "pow"); printf("(bias,scale,pow)=(%f,%f,%f)\n", _bias, _scale, _pow); } void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow); } void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ResponseNormLayer::truncBwdActs() { Layer::truncBwdActs(); if (_conserveMem) { _denoms.truncate(); } } /* * ===================== * CrossMapResponseNormLayer * ===================== */ CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _blocked = pyDictGetInt(paramsDict, "blocked"); } void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _bias, _scale, _pow, _blocked); checkNVMatrixNan(getActs(), _name); } void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _bias, _scale, _pow, _blocked, scaleTargets, 1); checkNVMatrixNan(_prev[0]->getActsGrad(), _name); } /* * ===================== * ContrastNormLayer * ===================== */ ContrastNormLayer::ContrastNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& images = *_inputs[0]; convLocalPool(images, _meanDiffs, _channels, _size, -_size / 2, 1, _imgSize, AvgPooler()); _meanDiffs.add(images, -1, 1); convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow); } void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ContrastNormLayer::truncBwdActs() { ResponseNormLayer::truncBwdActs(); if (_conserveMem) { _meanDiffs.truncate(); } } /* * ===================== * CostLayer * ===================== */ CostLayer::CostLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _coeff = pyDictGetFloat(paramsDict, "coeff"); } float CostLayer::getCoeff() { return _coeff; } void CostLayer::setCoeff(float coeff) { assert(coeff > 0); _coeff = coeff; } void CostLayer::scaleCoeff(float scale) { assert(scale > 0); float oldCoeff = _coeff; _coeff *= scale; printf( "===========Cost Layer %s. Coeffient is scaled by %3.2f. %6.5f->%6.5f=====\n", _name.c_str(), scale, oldCoeff, _coeff); } void CostLayer::bprop(PASS_TYPE passType) { if (_coeff != 0) { Layer::bprop(passType); } } bool CostLayer::isGradProducer() { return _coeff != 0; } doublev& CostLayer::getCost() { doublev& v = *new doublev(); v.insert(v.begin(), _costv.begin(), _costv.end()); return v; } CostLayer& CostLayer::makeCostLayer(ConvNet* convNet, string& type, PyObject* paramsDict) { if (type == "cost.logreg") { return *new LogregCostLayer(convNet, paramsDict); } else if (type == "cost.sum2") { return *new SumOfSquaresCostLayer(convNet, paramsDict); } else if (type == "cost.reg") { return *new RegCostLayer(convNet, paramsDict); } else if (type == "cost.sumsquaresdiff") { return *new SumOfSquaresOfDiffCostLayer(convNet, paramsDict); } else if (type == "cost.logsumsquaresdiff") { return *new LogSumOfSquaresOfDiffCostLayer(convNet, paramsDict); } else if (type == "cost.color_enhance") { return *new ColorEnhanceCostLayer(convNet, paramsDict); } else if (type == "cost.color_enhance_separate") { return *new ColorEnhanceSeparateCostLayer(convNet, paramsDict); } else if (type == "cost.gradmag_enhance") { return *new GradMagEnhanceCostLayer(convNet, paramsDict); } throw string("Unknown cost layer type ") + type; } /* * ===================== * LogregCostLayer * ===================== */ LogregCostLayer::LogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getNumElements(); //int numOut=probs.getNumRows(); //assert(numCases==probs.getNumCols()); //printf("numCases:%d numOut:%d\n",numCases,numOut); //Matrix *hLabels=new Matrix(); //labels.copyToHost(*hLabels,true); //float maxLabel=-1; //for(int i=0;i<numCases;++i){ // if(maxLabel<hLabels->getCell(0,i)) // maxLabel=hLabels->getCell(0,i); //} //if(maxLabel>=999) // printf("----------Error: maxlabel>999-----------\n"); //Matrix *hProbMat=new Matrix(); //probs.copyToHost(*hProbMat,true); //float probmin=hProbMat->min(); //float probmax=hProbMat->max(); //if(probmin<=0) // printf("probmin:%10.9f\n",probmin); //for(int i=0;i<numCases;++i){ // for(int j=0;j<numOut;++j){ // if(hProbMat->getCell(j,i)<0){ // printf("enter cs 4\n"); // printf("find non-positive prob: %6.5f\n (i,j)=(%d,%d)\n", // hProbMat->getCell(j,i),i,j); // } // } //} NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); //printf("\nLogregCostLayer::fpropActs: logreg cost : %6.5f testError:%6.5f\n", // -trueLabelLogProbs.sum(),(numCases - correctProbs.sum())); checkNVMatrixNan(trueLabelLogProbs, _name); } } void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = _prev[0]->getActs(); NVMatrix& probs = _prev[1]->getActs(); NVMatrix& target = _prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax"; if (doWork) { computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * RegCostLayer * ===================== */ RegCostLayer::RegCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void RegCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& logprobs = *_inputs[1]; // NVMatrix& probs = *_inputs[1]; int numCases = labels.getNumElements(); checkNVMatrixNan(logprobs, _name); NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeLogregCost(labels, logprobs, trueLabelLogProbs, correctProbs, true); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); //printf("\nLogregCostLayer::fpropActs: logreg cost : %6.5f testError:%6.5f\n", // -trueLabelLogProbs.sum(),(numCases - correctProbs.sum())); checkNVMatrixNan(trueLabelLogProbs, _name); } } void RegCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = _prev[0]->getActs(); NVMatrix& probs = _prev[1]->getActs(); NVMatrix& target = _prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "logsoftmax"; if (doWork) { printf("RegCostLayer::bpropActs doWork\n"); // to do computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * SumOfSquaresCostLayer * ===================== */ SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { int numCases = _inputs[0]->getNumCols(); _inputs[0]->apply(NVMatrixOps::Square(), getActs()); _costv.clear(); // will be divided by numCases in Python _costv.push_back(0.5 * getActs().sum()); // _costv.push_back(0.5 * getActs().sum() / numCases); } void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -_coeff); } SumOfSquaresOfDiffCostLayer::SumOfSquaresOfDiffCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { _relativeDiff = pyDictGetInt(paramsDict, "relativeDiff"); printf("SumOfSquaresOfDiffCostLayer %s init. _relativeDiff : %d\n", _name.c_str(), _relativeDiff); } void SumOfSquaresOfDiffCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses two inputs if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; // dim * numImg. _trans=0 NVMatrix& predLabels = *_inputs[1]; // dim * numImg. _trans=0 predLabels.subtract(labels, getActs()); printf("SumOfSquaresOfDiffCostLayer::fpropActs\n"); labels.print(labels.getNumRows(), 2); printf("\n"); predLabels.print(predLabels.getNumRows(), 2); printf("\n"); getActs().print(getActs().getNumRows(), 2); printf("\n"); exit(0); if (_relativeDiff) { getActs().eltwiseDivide(labels); } // printf("SumOfSquaresOfDiffCostLayer::fpropActs print labels\n"); // labels.print(labels.getFollowingDim(),10); // printf("SumOfSquaresOfDiffCostLayer::fpropActs print predLabels\n"); // predLabels.print(predLabels.getFollowingDim(),10); // printf("SumOfSquaresOfDiffCostLayer::fpropActs print getActs()\n"); // getActs().print(labels.getFollowingDim(),10); int numCases = labels.getLeadingDim(); _costv.clear(); _costv.push_back(0.5 * getActs().norm2()); // printf("averaged cost:%f\n",0.5 * getActs().norm2()/numCases); } } void SumOfSquaresOfDiffCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = *_inputs[0]; // dim * numImg. _trans=0 NVMatrix& predLabels = *_inputs[1]; // dim * numImg. _trans=0 NVMatrix diffLabels; //printf("SumOfSquaresOfDiffCostLayer::bpropActs\n"); // printf("print labels\n"); // labels.print(labels.getFollowingDim(),labels.getLeadingDim()); // printf("print predLabels\n"); // predLabels.print(predLabels.getFollowingDim(),predLabels.getLeadingDim()); // exit(0); //printf("scaleTargets: %f\n",scaleTargets); if (!_relativeDiff) _prev[inpIdx]->getActsGrad().add(getActs(), scaleTargets, -_coeff); else { NVMatrix& absDiff = getActs().copy(); absDiff.eltwiseDivide(labels); _prev[inpIdx]->getActsGrad().add(absDiff, scaleTargets, -_coeff); } } LogSumOfSquaresOfDiffCostLayer::LogSumOfSquaresOfDiffCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { _scale = pyDictGetFloat(paramsDict, "scale"); printf("layer :%s. coeff:%f scale:%f\n", _name.c_str(), _coeff, _scale); } // to be verified void LogSumOfSquaresOfDiffCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses two inputs if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; // dim * numImg. _trans=0 NVMatrix& predLabels = *_inputs[1]; // dim * numImg. _trans=0 int numCases = labels.getLeadingDim(); NVMatrix squaredDiff; labels.applyBinary(NVMatrixBinaryOps::SquaredDiff(), predLabels, squaredDiff); squaredDiff.sum(0, getActs()); getActs().apply(NVMatrixOps::MultByScalar(0.5)); printf("LogSumOfSquaresOfDiffCostLayer size: %d %d\n", getActs().getLeadingDim(), getActs().getFollowingDim()); NVMatrix costs; getActs().apply(NVMatrixOps::MultByScalar(_scale), costs); costs.apply(NVMatrixOps::Logistic()); costs.apply(NVMatrixOps::AddScalar(-0.5)); _costv.clear(); _costv.push_back(costs.sum() / numCases); } } void LogSumOfSquaresOfDiffCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = *_inputs[0]; // dim * numImg. _trans=0 NVMatrix& predLabels = *_inputs[1]; // dim * numImg. _trans=0 NVMatrix diffLabels; predLabels.subtract(labels, diffLabels); NVMatrix a, b; getActs().apply(NVMatrixOps::WeightedAddScalar(_scale, 0.5 * _scale), a); getActs().apply(NVMatrixOps::WeightedAddScalar(-1, 0.5), b); a.applyBinary(NVMatrixBinaryOps::Multiply(), b); diffLabels.eltwiseMultByVector(a); printf("print out diffLabels\n"); diffLabels.print(diffLabels.getFollowingDim(), diffLabels.getLeadingDim()); _prev[inpIdx]->getActsGrad().add(diffLabels, scaleTargets, -_coeff); } CroppingLayer::CroppingLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _start = pyDictGetInt(paramsDict, "start"); _end = pyDictGetInt(paramsDict, "end"); } void CroppingLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // printf("CroppingLayer %s: fpropActs\n",_name.c_str()); cropping(*_inputs[0], getActs(), _channels, _start, _end); } void CroppingLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { // printf("CroppingLayer %s: bpropActs\n",_name.c_str()); croppingUndo(_prev[0]->getActs(), v, _prev[0]->getActsGrad(), _start, _end, scaleTargets, 1); } ColorEnhanceCostLayer::ColorEnhanceCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } /* * input 0: prediction of 10/30 D transform for L-channel. (ch*10,n) * input 1: original 10D pixel basis (segment_random_sample_num*10,n) * input 2: groundtruth color (segment_random_sample_num*ch,n) * */ void ColorEnhanceCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // this layer uses 3 inputs if (inpIdx == 0) { NVMatrix& predMapping = *_inputs[0]; //shape: (ch*10,num_imgs) //NVMatrix& gtMapping = *_inputs[1]; // shape: (ch*10, num_imgs) NVMatrix& colorBasis = *_inputs[1]; // shape: (segment_random_sample_num*10,num_imgs) NVMatrix& gtColor = *_inputs[2]; // shape: (segment_random_sample_num*ch,num_imgs) int basis_dim = 10; int ch = predMapping.getNumRows() / basis_dim; int segment_random_sample_num = gtColor.getNumRows() / ch; if (verbose) printf("basis_dim %d segment_random_sample_num %d ch:%d\n", basis_dim, segment_random_sample_num, ch); //assert(predMapping.getNumCols()==gtMapping.getNumCols()); //assert(predMapping.getNumRows() == gtMapping.getNumRows()); assert(colorBasis.getNumCols()==predMapping.getNumCols()); assert(gtColor.getNumCols()==predMapping.getNumCols()); assert(basis_dim*segment_random_sample_num==colorBasis.getNumRows()); int costOption = 1; if (costOption == 0) { // measure difference between predicted mapping and estimated mapping from segment // TO DO: update code //NVMatrix diffMapping; //predMapping.subtract(gtMapping, diffMapping); //diffMapping.eltwiseMult(colorBasis); //diffMapping.sum(0, getActs()); //NVMatrix dotProd, predColor, diffColor; //predMapping.eltwiseMult(colorBasis, dotProd); //dotProd.sum(0, predColor); //predColor.subtract(gtColor, diffColor); //_costv.clear(); //_costv.push_back(0.5 * getActs().norm2()); //_costv.push_back(0.5 * diffColor.norm2()); } else { NVMatrix *dotProd = new NVMatrix(); NVMatrix *predColor = new NVMatrix(gtColor); predColor->setZero(); for (int i = 0; i < segment_random_sample_num; ++i) { NVMatrix& l_color_basis = colorBasis.sliceRows(i * basis_dim, (i + 1) * basis_dim); for (int j = 0; j < ch; ++j) { NVMatrix& l_pred_color = predColor->sliceRows(ch * i + j, ch * i + j + 1); NVMatrix& l_pred_mapping = predMapping.sliceRows( j * basis_dim, (j + 1) * basis_dim); l_pred_mapping.eltwiseMult(l_color_basis, *dotProd); dotProd->sum(0, l_pred_color); delete &l_pred_color; delete &l_pred_mapping; } delete &l_color_basis; } predColor->subtract(gtColor, getActs()); if (verbose) printf( "ColorEnhanceCostLayer::fpropActs gtColor rows:%d cols:%d\n", predColor->getNumRows(), predColor->getNumCols()); _costv.clear(); _costv.push_back( 0.5 * getActs().norm2() / segment_random_sample_num); delete predColor; delete dotProd; } } } void ColorEnhanceCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); NVMatrix& predMapping = *_inputs[0]; //shape: (30,num_imgs) //NVMatrix& gtMapping = *_inputs[1]; // shape: (30, num_imgs) NVMatrix& colorBasis = *_inputs[1]; // shape: (segment_random_sample_num*10,num_imgs) NVMatrix& gtColor = *_inputs[2]; // shape: (segment_random_sample_num*3,num_imgs) // NVMatrix& predMapping = *_inputs[0]; // NVMatrix& gtMapping = *_inputs[1]; // NVMatrix& colorBasis = *_inputs[2]; // NVMatrix& gtColor = *_inputs[3]; // shape: (segment_random_sample_num,num_imgs) int basis_dim = 10; int ch = predMapping.getNumRows() / basis_dim; int segment_random_sample_num = gtColor.getNumRows() / ch; int num_imgs = predMapping.getNumCols(); NVMatrix *grad1 = new NVMatrix(basis_dim * ch, num_imgs, false); grad1->setZero(); NVMatrix *l_grad = new NVMatrix(); for (int i = 0; i < segment_random_sample_num; ++i) { NVMatrix& l_color_basis = colorBasis.sliceRows(i * basis_dim, (i + 1) * basis_dim); for (int j = 0; j < ch; ++j) { NVMatrix& l_grad1 = grad1->sliceRows(j * basis_dim, (j + 1) * basis_dim); NVMatrix& l_color_diff = getActs().sliceRows(ch * i + j, ch * i + j + 1); l_color_basis.eltwiseMultByVector(l_color_diff, *l_grad); l_grad1.add(*l_grad, 1); delete &l_grad1; delete &l_color_diff; } if (verbose) printf("l_grad rows:%d cols:%d l_grad min,max:%f %f\n", l_grad->getNumRows(), l_grad->getNumCols(), l_grad->min(), l_grad->max()); delete &l_color_basis; } float scale = 1.0f / (float) segment_random_sample_num; grad1->scale(scale); if (verbose) { printf("grad1 rows,cols:%d %d min,max:%f %f\n", grad1->getNumRows(), grad1->getNumCols(), grad1->min(), grad1->max()); printf("basis_dim %d segment_random_sample_num:%d scale:%f\n", basis_dim, segment_random_sample_num, scale); } _prev[inpIdx]->getActsGrad().add(*grad1, scaleTargets, -_coeff); delete l_grad; delete grad1; } ColorEnhanceSeparateCostLayer::ColorEnhanceSeparateCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } /* * input 0: predicted 10D transform for L channel. (10,n) * input 1: predicted 10D transform for a channel. (10,n) * input 2: predicted 10D transform for b channel. (10,n) * input 3: groundtruth of 10D transform. (3*10,n) * input 4: original 10D pixel basis (segment_random_sample_num*10,n) * input 5: groundtruth color (segment_random_sample_num*3,n) * */ void ColorEnhanceSeparateCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // this layer uses 6 inputs if (inpIdx == 0) { NVMatrix& predMapping_L = *_inputs[0]; //shape: (10,num_imgs) NVMatrix& predMapping_a = *_inputs[1]; //shape: (10,num_imgs) NVMatrix& predMapping_b = *_inputs[2]; //shape: (10,num_imgs) NVMatrix& gtMapping = *_inputs[3]; // shape: (3*10, num_imgs) NVMatrix& colorBasis = *_inputs[4]; // shape: (segment_random_sample_num*10,num_imgs) NVMatrix& gtColor = *_inputs[5]; // shape: (segment_random_sample_num*3,num_imgs) assert(predMapping_L.getNumRows()==predMapping_a.getNumRows()); assert(predMapping_L.getNumRows()==predMapping_b.getNumRows()); assert(predMapping_L.getNumCols()==predMapping_a.getNumCols()); assert(predMapping_L.getNumCols()==predMapping_b.getNumCols()); assert(predMapping_L.getNumCols()==gtMapping.getNumCols()); assert(predMapping_L.getNumCols()==colorBasis.getNumCols()); assert(predMapping_L.getNumCols()==gtColor.getNumCols()); assert(gtMapping.getNumRows()==30); assert(gtColor.getNumRows()%3==0); int basis_dim = 10; int segment_random_sample_num = colorBasis.getNumRows() / basis_dim; assert(gtColor.getNumRows()==(segment_random_sample_num*3)); if (verbose) printf("basis_dim %d segment_random_sample_num %d\n", basis_dim, segment_random_sample_num); NVMatrix dotProd; NVMatrix predColor(gtColor); for (int i = 0; i < segment_random_sample_num; ++i) { NVMatrix& l_color_basis = colorBasis.sliceRows(i * basis_dim, (i + 1) * basis_dim); NVMatrix& l_pred_L = predColor.sliceRows(3 * i, 3 * i + 1); NVMatrix& l_pred_a = predColor.sliceRows(3 * i + 1, 3 * i + 2); NVMatrix& l_pred_b = predColor.sliceRows(3 * i + 2, 3 * i + 3); predMapping_L.eltwiseMult(l_color_basis, dotProd); dotProd.sum(0, l_pred_L); predMapping_a.eltwiseMult(l_color_basis, dotProd); dotProd.sum(0, l_pred_a); predMapping_b.eltwiseMult(l_color_basis, dotProd); dotProd.sum(0, l_pred_b); delete &l_color_basis; delete &l_pred_L; delete &l_pred_a; delete &l_pred_b; } predColor.subtract(gtColor, getActs()); _costv.clear(); _costv.push_back(0.5 * getActs().norm2() / segment_random_sample_num); } } void ColorEnhanceSeparateCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0 || inpIdx == 1 || inpIdx ==2); // printf("inpIdx: %d\n",inpIdx); NVMatrix& predMapping = *_inputs[inpIdx]; //shape: (10,num_imgs) NVMatrix& gtMapping = *_inputs[3]; // shape: (3*10, num_imgs) NVMatrix& colorBasis = *_inputs[4]; // shape: (segment_random_sample_num*10,num_imgs) NVMatrix& gtColor = *_inputs[5]; // shape: (segment_random_sample_num*3,num_imgs) int basis_dim = 10; int segment_random_sample_num = colorBasis.getNumRows() / basis_dim; assert(gtColor.getNumRows()==(segment_random_sample_num*3)); int num_imgs = predMapping.getNumCols(); NVMatrix grad1, grad2; grad1.resize(basis_dim, num_imgs); grad1.setZero(); grad2.resize(basis_dim, num_imgs); for (int i = 0; i < segment_random_sample_num; ++i) { NVMatrix l_grad; NVMatrix& l_color_basis = colorBasis.sliceRows(i * basis_dim, (i + 1) * basis_dim); NVMatrix& l_color_diff = getActs().sliceRows(3 * i + inpIdx, 3 * i + inpIdx + 1); l_color_basis.eltwiseMultByVector(l_color_diff, l_grad); l_grad.add(grad1, 1, 1, grad2); grad2.copy(grad1); delete &l_color_basis; delete &l_color_diff; } grad1.scale(1.0 / (float) segment_random_sample_num); _prev[inpIdx]->getActsGrad().add(grad1, scaleTargets, -_coeff); } ConcatenateLayer::ConcatenateLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void ConcatenateLayer::fpropPreCommon(NVMatrixV& v, PASS_TYPE passType) { assert(v.size()>0); int outputs = 0; int num_imgs = v[0]->getNumCols(); for (int i = 0; i < v.size(); ++i) { assert(num_imgs==v[i]->getNumCols()); outputs += v[i]->getNumRows(); } if (verbose) printf("ConcatenateLayer::fpropPreCommon outputs:%d\n", outputs); getActs().resize(outputs, num_imgs); _outputs_p = 0; } void ConcatenateLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { int input_dim = _inputs[inpIdx]->getNumRows(); // printf("ConcatenateLayer::fpropActs inpIdx:%d input_dim:%d\n", // inpIdx,input_dim); NVMatrix& dest = getActs().sliceRows(_outputs_p, _outputs_p + input_dim); _inputs[inpIdx]->copy(dest); _outputs_p += input_dim; delete &dest; } void ConcatenateLayer::bpropPreCommon(NVMatrix& v, PASS_TYPE passType) { _outputs_p = 0; } void ConcatenateLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { int input_dim = _inputs[inpIdx]->getNumRows(); // printf("ConcatenateLayer::bpropActs inpIdx:%d input_dim:%d\n", // inpIdx,input_dim); NVMatrix& l_grad = v.sliceRows(_outputs_p, _outputs_p + input_dim); l_grad.copy(_prev[inpIdx]->getActsGrad()); _outputs_p += input_dim; delete &l_grad; } GradMagEnhanceCostLayer::GradMagEnhanceCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } /* * input 0: prediction of 2 D transform for gradient magnitude. (2,n) * input 1: pixel L channel in input image (1,n) * input 2: pixel L channel gradient magnitude in input image (1,n) * input 3: pixel L channel gradient magnitude in enhanced image (1,n) * */ void GradMagEnhanceCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 0) { NVMatrix& predMapping = *_inputs[0]; //shape: (2,num_imgs) NVMatrix& inL = *_inputs[1]; // shape: (1,num_imgs) NVMatrix& inGradMag = *_inputs[2]; // shape: (1,num_imgs) NVMatrix& enhGradMag = *_inputs[3]; // shape: (1,num_imgs) assert(predMapping.getNumRows()==2); assert(inL.getNumRows()==1); assert(inGradMag.getNumRows()==1); assert(enhGradMag.getNumRows()==1); // printf("GradMagEnhanceCostLayer::fpropActs\n"); // log(enhGradMag/inGradMag)=a*logInL+b // cost function: 0.5 * (squared difference in enhGradMag) // printf("inL min:%f \n",inL.min()); inL.apply(NVMatrixOps::Log(), logInL); NVMatrix predLogRatio; NVMatrix& coeff_a = predMapping.sliceRows(0, 1); NVMatrix& coeff_b = predMapping.sliceRows(1, 2); // printf("coeff_a mean %f \n", coeff_a.mean()); // printf("coeff_b mean %f \n", coeff_b.mean()); logInL.eltwiseMult(coeff_a, predLogRatio); predLogRatio.applyBinary(NVMatrixBinaryOps::Add(), coeff_b); predLogRatio.apply(NVMatrixOps::Exp(), predEnhGradMag); // printf("inGradMag min:%f max:%f\n",inGradMag.min(),inGradMag.max()); predEnhGradMag.applyBinary(NVMatrixBinaryOps::Multiply(), inGradMag); // printf("enhGradMag min:%f max:%f\n",enhGradMag.min(),enhGradMag.max()); predEnhGradMag.subtract(enhGradMag, getActs()); _costv.clear(); _costv.push_back(0.5 * getActs().norm2()); delete &coeff_a; delete &coeff_b; // printf("exit GradMagEnhanceCostLayer::fpropActs\n"); } } void GradMagEnhanceCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { // printf("GradMagEnhanceCostLayer::bpropActs tag1\n"); assert(inpIdx==0); NVMatrix& predMapping = *_inputs[0]; //shape: (2,num_imgs) NVMatrix& inL = *_inputs[1]; // shape: (1,num_imgs) NVMatrix& inGradMag = *_inputs[2]; // shape: (1,num_imgs) NVMatrix& enhGradMag = *_inputs[3]; // shape: (1,num_imgs) int num_imgs=predMapping.getNumCols(); //if(_prev[inpIdx]->getActsGrad().getNumRows()==0){ _prev[inpIdx]->getActsGrad().resize(2,num_imgs); _prev[inpIdx]->getActsGrad().setZero(); //} NVMatrix& prev_grad_coeff_a = _prev[inpIdx]->getActsGrad().sliceRows(0,1); NVMatrix& prev_grad_coeff_b = _prev[inpIdx]->getActsGrad().sliceRows(1,2); // printf("GradMagEnhanceCostLayer::bpropActs tag2\n"); NVMatrix grad_coeff_a,grad_coeff_b; getActs().eltwiseMult(predEnhGradMag,grad_coeff_b); grad_coeff_b.eltwiseMult(logInL,grad_coeff_a); // // printf("grad_coeff_a rows,cols:%d %d\n",grad_coeff_a.getNumRows(), // grad_coeff_a.getNumCols()); // printf("grad_coeff_b rows,cols:%d %d\n",grad_coeff_b.getNumRows(), // grad_coeff_b.getNumCols()); prev_grad_coeff_a.add(grad_coeff_a,scaleTargets,-_coeff); prev_grad_coeff_b.add(grad_coeff_b,scaleTargets,-_coeff); delete &prev_grad_coeff_a; delete &prev_grad_coeff_b; // printf("GradMagEnhanceCostLayer::bpropActs tag3\n"); }
59ee042c73eca9f8314343340066475f08699e77.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cutil_inline.h> #include <iostream> #include <layer_kernels.cuh> #include <layer.cuh> #include <data.cuh> #include <util.cuh> #include <cudaconv2.cuh> #include <matrix.h> #include <GPUmonitor.h> #include <cmath> #include <algorithm> #include <vector> #include <cstring> using namespace std; extern ConvNet* model; extern GPUmonitor *gpuMonitor; float thres = 1.0e5; bool verbose = 0; #define checkNVMatrixNan(dmat,msg) _checkNVMatrixNan(dmat,msg,__FILE__,__LINE__) void _checkNVMatrixNan(NVMatrix &dMat, string msg, const char* filenm, const int linenum) { //int leadingDim = dMat.getLeadingDim(); //int stride = dMat.getStride(); //int followingDim = dMat.getFollowingDim(); //printf("ldDim:%d stride:%d followingDim:%d\n",leadingDim,stride,followingDim); if (isnan(dMat.sum())) { printf("_checkNVMatrixNan File:%s line:%d\n", filenm, linenum); dMat.printShape(msg.c_str()); dMat.fprint(msg.c_str(), dMat.getNumRows(), dMat.getNumCols()); dMat.print(2, 2); printf("min:%f max:%f mean:%f\n", dMat.min(), dMat.max(), dMat.mean()); exit(1); } } /* * ======================= * Layer * ======================= */ Layer::Layer(ConvNet* convNet, PyObject* paramsDict, bool trans) : _convNet(convNet), _trans(trans) { _name = pyDictGetString(paramsDict, "name"); _type = pyDictGetString(paramsDict, "type"); _numGradProducersNext = 0; _foundGradConsumers = false; _gradConsumer = pyDictGetInt(paramsDict, "gradConsumer"); _actsTarget = pyDictGetInt(paramsDict, "actsTarget"); _actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget"); _conserveMem = pyDictGetInt(paramsDict, "conserveMem"); _outputs = _actsTarget < 0 ? new NVMatrix() : NULL; _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL; // can be commented out since it's done in postInit() _GPU = pyDictGetInt(paramsDict, "GPU"); #ifdef MULTIGPU cudaSetDevice(_GPU); NVMatrix::checkCUDAError(cudaEventCreate(&_fpropEvent), "Layer::Layer Layer::Layer"); #endif printf(" init layer: %s\n", _name.c_str()); printf("_actsTarget:%d _actsGradTarget:%d _GPU:%d\n", _actsTarget, _actsGradTarget, _GPU); } void Layer::fpropNext(PASS_TYPE passType) { for (int i = 0; i < _next.size(); i++) { // printf("_next[%d] name:%s \n",i,_name.c_str()); // if(&_next[i]->getActs()) // printf("_next[%d] name:%s rows:%d cols:%d\n",i,_name.c_str(), // _next[i]->getActs().getNumRows(),_next[i]->getActs().getNumCols()); _next[i]->fprop(passType); } } void Layer::truncBwdActs() { // Only truncate actsGrad if I own it if (_conserveMem && _actsGradTarget < 0) { getActsGrad().truncate(); } if (_conserveMem) { getActs().truncate(); } } void Layer::fpropPreCommon(NVMatrixV& v, PASS_TYPE passType) { // Do nothing by default } void Layer::fpropPostCommon(NVMatrixV& v, PASS_TYPE passType) { } void Layer::bpropPreCommon(NVMatrix& v, PASS_TYPE passType) { #ifdef MULTIGPU NVMatrix::checkCUDAError(cudaSetDevice(_GPU), "Layer::bpropPreCommon cudaSetDevice"); #endif // Do nothing by default } void Layer::bpropPostCommon(NVMatrix& v, PASS_TYPE passType) { #ifdef MULTIGPU NVMatrix::checkCUDAError(cudaSetDevice(_GPU), "Layer::bpropPostCommon cudaSetDevice"); #endif //float gradMin=getActsGrad().min(); //float gradMax=getActsGrad().max(); //float gradMean=getActsGrad().mean(); //if(abs(gradMin)>thres || abs(gradMax)>thres){ // printf("layer:%s gradMin:%f gradMax:%f gradMean:%f\n",_name.c_str(), // gradMin,gradMax,gradMean); //} } void Layer::fprop(PASS_TYPE passType) { _rcvdFInputs += 1; if (_rcvdFInputs == _prev.size()) { #ifdef MULTIGPU //reset _bpropEventID = 0; NVMatrix::checkCUDAError(cudaSetDevice(_GPU), "Layer::fprop(PASS_TYPE passType) cudaSetDevice"); for (int i = 0; i < _prev.size(); ++i) { NVMatrix::checkCUDAError( cudaEventSynchronize(_prev[i]->getFpropEvent()), "Layer::fprop(PASS_TYPE passType) (cudaEventSynchronize"); } #endif NVMatrixV v; for (int i = 0; i < _prev.size(); i++) { v.push_back(&_prev[i]->getActs()); } fprop(v, passType); } } void Layer::fprop(NVMatrix& v, PASS_TYPE passType) { NVMatrixV vl; vl.push_back(&v); fprop(vl, passType); } void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) { // printf("Layer %s\n",_name.c_str()); // if(_actsTarget<0) // printf("Layer %s fprop. acts rows:%d cols:%d\n",_name.c_str(), // getActs().getNumRows(),getActs().getNumCols()); assert(v.size() == _prev.size()); _inputs.clear(); _inputs.insert(_inputs.begin(), v.begin(), v.end()); _outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget]; _rcvdFInputs = _prev.size(); for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) { (*it)->transpose(_trans); } getActs().transpose(_trans); // printf("Layer %s fprop. acts rows:%d cols:%d\n",_name.c_str(), // getActs().getNumRows(),getActs().getNumCols()); fpropPreCommon(v, passType); // First do fprop on the input whose acts matrix I'm sharing, if any if (_actsTarget >= 0) { fpropActs(_actsTarget, 0, passType); } // Then add the rest of the inputs to that for (int i = 0; i < _prev.size(); i++) { if (i != _actsTarget) { fpropActs(i, _actsTarget >= 0 || i > 0, passType); } } if (verbose) { NVMatrix tmp(getActs()); getActs().apply(NVMatrixOps::Abs(), tmp); float mean_abs_act = tmp.sum() / tmp.getNumElements(); printf("Layer::fprop %s. mean_abs_act:%f\n", _name.c_str(), mean_abs_act); } //float actMax=getActs().max(),actMin=getActs().min(); //if(abs(actMax)>thres || abs(actMin)>thres){ // printf("\nlayer:%s actMax:%f actMin:%f\n",_name.c_str(),actMax,actMin); // for(int i=0;i<_inputs.size();++i){ // float inputMax=(*_inputs[i]).max(); // float inputMin=(*_inputs[i]).min(); // printf("input:%d inputMax:%f inputMin:%f\n",i,inputMax,inputMin); // } //} fpropPostCommon(v, passType); #ifdef MULTIGPU NVMatrix::checkCUDAError(cudaEventRecord(_fpropEvent), "Layer::fprop(NVMatrixV& v, PASS_TYPE passType) cudaEventRecord"); #endif fpropNext(passType); } void Layer::bprop(PASS_TYPE passType) { if (_rcvdBInputs == _numGradProducersNext) { _rcvdBInputs++; // avoid doing bprop computation twice #ifdef MULTIGPU for (int i = 0; i < _next.size(); ++i) { NVMatrix::checkCUDAError(cudaEventSynchronize(_bpropEvent[i]), "Layer::bprop(PASS_TYPE passType) cudaEventSynchronize (_bpropEvent[i])"); } #endif bprop(getActsGrad(), passType); } } void Layer::bprop(NVMatrix& v, PASS_TYPE passType) { if (verbose & v.getNumElements() > 0) { NVMatrix tmp(v); v.apply(NVMatrixOps::Abs(), tmp); float meanAbs = tmp.sum() / tmp.getNumElements(); printf("Layer::bprop %s v rows,cols,%d %d mean Abs:%f\n", _name.c_str(), v.getNumRows(), v.getNumCols(), meanAbs); } v.transpose(_trans); for (int i = 0; i < _prev.size(); i++) { _prev[i]->getActs().transpose(_trans); _prev[i]->getActsGrad().transpose(_trans); } getActs().transpose(_trans); bpropPreCommon(v, passType); if (isGradProducer()) { // First propagate activity gradient to all layers whose activity // gradient matrix I'm definitely not sharing. for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer() && _actsGradTarget != i) { #ifdef MULTIGPU // do bprop on previous layer's device. // in the case where previous layer connects to multiple subsequent layers, // there is no need to synchronize the bprop for a previous layer NVMatrix::checkCUDAError(cudaSetDevice(_prev[i]->getGPU()), "Layer::bprop(NVMatrix& v, PASS_TYPE passType) cudaSetDevice(_prev[i]->getGPU())"); #endif bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[i]->incRcvdBInputs(); #ifdef MULTIGPU NVMatrix::checkCUDAError( cudaEventRecord(_prev[i]->getNextBpropEvent()), "Layer::bprop(NVMatrix& v, PASS_TYPE passType) cudaEventRecord"); #endif } } // Then propagate activity gradient to the layer whose activity gradient // matrix I'm sharing, if any. if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) { #ifdef MULTIGPU NVMatrix::checkCUDAError( cudaSetDevice(_prev[_actsGradTarget]->getGPU()), "Layer::bprop(NVMatrix& v, PASS_TYPE passType) cudaSetDevice(_prev[_actsGradTarget]->getGPU())"); #endif bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[_actsGradTarget]->incRcvdBInputs(); #ifdef MULTIGPU NVMatrix::checkCUDAError( cudaEventRecord( _prev[_actsGradTarget]->getNextBpropEvent()), "Layer::bprop(NVMatrix& v, PASS_TYPE passType) cudaEventRecord(_prev[_actsGradTarget]->getNextBpropEvent())"); #endif } } truncBwdActs(); bpropPostCommon(v, passType); if (isGradProducer()) { for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer()) { _prev[i]->bprop(passType); } } } } void Layer::reset() { _rcvdFInputs = 0; _rcvdBInputs = 0; } string& Layer::getName() { return _name; } string& Layer::getType() { return _type; } int Layer::getRcvdFInputs() { return _rcvdFInputs; } int Layer::getRcvdBInputs() { return _rcvdBInputs; } int Layer::incRcvdBInputs() { return ++_rcvdBInputs; } void Layer::addNext(Layer* l) { _next.push_back(l); _numGradProducersNext += l->isGradProducer(); } void Layer::addPrev(Layer* l) { _prev.push_back(l); } void Layer::postInit() { // _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs(); _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad(); } #ifdef MULTIGPU int Layer::getGPU() { return _GPU; } cudaEvent_t Layer::getFpropEvent() { return _fpropEvent; } void Layer::initBpropEvent() { NVMatrix::checkCUDAError(cudaSetDevice(_GPU), "Layer::initBpropEvent cudaSetDevice"); for (int i = 0; i < _next.size(); ++i) { NVMatrix::checkCUDAError(cudaEventCreate(&_bpropEvent[i]), "Layer::initBpropEvent() cudaEventCreate(&_bpropEvent[i])"); } } cudaEvent_t& Layer::getNextBpropEvent() { return _bpropEvent[_bpropEventID++]; } #endif // Does this layer, or some layer below it, need the gradient // for parameter updates? // Only weight layers should be grad consumers themselves. bool Layer::isGradConsumer() { if (!_foundGradConsumers) { for (int i = 0; i < _prev.size(); i++) { _gradConsumer |= _prev[i]->isGradConsumer(); } _foundGradConsumers = true; } return _gradConsumer; } // Does this layer produce gradient for layers below? bool Layer::isGradProducer() { return true; } vector<Layer*>& Layer::getPrev() { return _prev; } vector<Layer*>& Layer::getNext() { return _next; } NVMatrix& Layer::getActs() { assert(_outputs != NULL); return *_outputs; } NVMatrix& Layer::getActsGrad() { assert(_actsGrad != NULL); return *_actsGrad; } /* * ======================= * NeuronLayer * ======================= */ NeuronLayer::NeuronLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { _neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron")); } void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0); } void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->activate(*_inputs[0], getActs()); } /* * ======================= * WeightLayer * ======================= */ WeightLayer::WeightLayer(ConvNet* convNet, PyObject* paramsDict, bool trans, bool useGrad) : Layer(convNet, paramsDict, trans) { MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases"); Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); float momB = pyDictGetFloat(paramsDict, "momB"); floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW"); float epsB = pyDictGetFloat(paramsDict, "epsB"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); float wcBias = pyDictGetFloat(paramsDict, "wcB"); printf("(epsW,momW,wc) for %d inputs\n", epsW.size()); for (int i = 0; i < momW.size(); ++i) { printf("(%.12f,%.12f,%.12f) ", epsW[i], momW[i], wc[i]); } printf("\n"); printf("momB:%.12f epsB:%.12f wcBias:%.12f\n", momB, epsB, wcBias); // Source layers for shared weights intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); for (int i = 0; i < weightSourceLayerIndices.size(); i++) { int srcLayerIdx = weightSourceLayerIndices[i]; int matrixIdx = weightSourceMatrixIndices[i]; if (srcLayerIdx == convNet->getNumLayers()) { // Current layer _weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i])); } else if (srcLayerIdx >= 0) { WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx)); Weights* srcWeights = &srcLayer.getWeights(matrixIdx); _weights.addWeights(*new Weights(*srcWeights, epsW[i])); } else { _weights.addWeights( *new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], useGrad)); } } _biases = new Weights(hBiases, hBiasesInc, epsB, wcBias, momB, true); // Epsilons for finite-difference gradient checking operation _wStep = 0.001; _bStep = 0.002; delete &weightSourceLayerIndices; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &epsW; delete &wc; } void WeightLayer::setWeightsEpsScale(float eps_scale) { Layer::setWeightsEpsScale(eps_scale); _weights.setEpsScale(eps_scale); _biases->setEpsScale(eps_scale); } void WeightLayer::multiplyWeightsEpsScale(float multiplier) { Layer::multiplyWeightsEpsScale(multiplier); _weights.multiplyEpsScale(multiplier); _biases->multiplyEpsScale(multiplier); } void WeightLayer::bpropPreCommon(NVMatrix& v, PASS_TYPE passType) { Layer::bpropPreCommon(v, passType); // printf("WeightLayer::bpropPreCommo %s\n",_name.c_str()); if (_biases->getEps() > 0) { bpropBiases(v, passType); } for (int i = 0; i < _weights.getSize(); i++) { if (_weights[i].getEps() > 0) { bpropWeights(v, i, passType); // Increment its number of updates _weights[i].incNumUpdates(); } } } void WeightLayer::updateWeights() { Layer::updateWeights(); if (verbose) printf("WeightLayer::updateWeights %s\n", _name.c_str()); _weights.update(); _biases->update(); } void WeightLayer::copyToCPU() { Layer::copyToCPU(); _weights.copyToCPU(); _biases->copyToCPU(); } void WeightLayer::copyToGPU() { Layer::copyToGPU(); _weights.copyToGPU(); _biases->copyToGPU(); } void WeightLayer::checkGradients() { for (int i = 0; i < _weights.getSize(); i++) { _convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]); } _convNet->checkGradient(_name + " biases", _bStep, *_biases); } Weights& WeightLayer::getWeights(int idx) { return _weights[idx]; } /* * ============ * ScalingLayer Layer * ============ * */ ScalingLayer::ScalingLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, false, true) { } void ScalingLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { Weights& scaleWeight = _weights[inpIdx]; scaleWeight.copyToCPU(); Matrix& hScale = scaleWeight.getCPUW(); float *scale = hScale.getData(); getActs().add(*_inputs[inpIdx], scaleTargets, *scale); if (scaleTargets == 0) { _biases->copyToCPU(); Matrix& hBias = _biases->getCPUW(); float *bias = hBias.getData(); printf("ScalingLayer %s. scale: %f bias:%f scaleTargets:%f\n", _name.c_str(), *scale, *bias, scaleTargets); getActs().addScalar(*bias); } if (verbose) { NVMatrix tmp(getActs()); getActs().apply(NVMatrixOps::Abs(), tmp); float mean_abs_act = tmp.sum() / tmp.getNumElements(); printf("ScalingLayer %s. mean_abs_act:%f\n", _name.c_str(), mean_abs_act); } } void ScalingLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { Weights& scaleWeight = _weights[inpIdx]; scaleWeight.copyToCPU(); Matrix& hScale = scaleWeight.getCPUW(); float *scale = hScale.getData(); // printf("ScalingLayer::bpropAct scale:%f\n",*scale); // _prev[inpIdx]->getActsGrad().resize(v); _prev[inpIdx]->getActsGrad().add(v, scaleTargets, *scale); } void ScalingLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; // printf("ScalingLayer::bpropBiases _biases->getGrad() rows:%d cols:%d\n", // _biases->getGrad().getNumRows(),_biases->getGrad().getNumCols()); _biases->getGrad().resize(1, 1); _biases->getGrad().scale(0); _biases->getGrad().addScalar(scaleBGrad * v.sum()); } void ScalingLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); NVMatrix a; // printf("ScalingLayer::bpropWeights v rows:%d cols:%d getActs() rows:%d cols:%d\n", // v.getNumRows(),v.getNumCols(),getActs().getNumRows(),getActs().getNumCols()); _prev[inpIdx]->getActs().eltwiseMult(v, a); float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // printf("ScalingLayer::bpropWeights _weights[inpIdx].getGrad() rows:%d cols:%d \n", // _weights[inpIdx].getGrad().getNumRows(),_weights[inpIdx].getGrad().getNumCols()); _weights[inpIdx].getGrad().resize(1, 1); _weights[inpIdx].getGrad().setZero(); //Matrix tmp; //_weights[inpIdx].getGrad().copyToHost(tmp,true); //float *data= tmp.getData(); //printf("tmp rows,cols:%d %d data [0]:%f\n",tmp.getNumRows(),tmp.getNumCols(),data[0]); printf( "ScalingLayer::bpropWeights %s. _weights[inpIdx].getGrad().sum():%f scaleGrad:%f weight grad:%f\n", _name.c_str(), _weights[inpIdx].getGrad().sum(), scaleGrad, scaleGrad * a.sum()); _weights[inpIdx].getGrad().addScalar(scaleGrad * a.sum()); } /* * ============ * NormalizeLayer * ============ */ NormalizeLayer::NormalizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void NormalizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); NVMatrix squared; _inputs[inpIdx]->apply(NVMatrixOps::Square(), squared); squared.sum(0, _norm); _norm.apply(NVMatrixOps::Sqrt()); if (verbose) { printf( "NormalizeLayer::fpropActs squared size:%d %d norm size:%d %d norm mean:%f\n", squared.getNumRows(), squared.getNumCols(), _norm.getNumRows(), _norm.getNumCols(), _norm.sum() / _norm.getNumElements()); //_norm.print(_norm.getNumRows(), _norm.getNumCols()); } if (_norm.min() == 0) { printf("zero norm\n"); exit(1); } _inputs[inpIdx]->eltwiseDivideByVector(_norm, getActs()); } void NormalizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); computeL2NormalizeGrad(_prev[inpIdx]->getActs(), getActs(), _norm, v, _prev[inpIdx]->getActsGrad(), scaleTargets == 1); // // NVMatrix prev_v; // v.eltwiseMultByVector(_norm,prev_v); // _prev[inpIdx]->getActsGrad().add(prev_v,scaleTargets,1); } /* * ======================= * FCLayer * ======================= */ FCLayer::FCLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, true, false) { //printf("set dropout to false\n"); //_dropout=0; _dropout = pyDictGetFloat(paramsDict, "dropout"); _wStep = 0.1; _bStep = 0.01; } void FCLayer::fpropPreCommon(NVMatrixV& v, PASS_TYPE passType) { if (passType == PASS_TRAIN && _dropout > 0) //printf("FCLayer::fpropPreCommon dropout in training is enabled\n"); initDropoutMap(); } void FCLayer::fpropPostCommon(NVMatrixV& v, PASS_TYPE passType) { Layer::fpropPostCommon(v, passType); //float actMax=(getActs().max()); //float actMin=(getActs().min()); //if(abs(actMax)>thres || abs(actMin)>thres){ // float wMin,wMax,wMean; // wMin=(*_weights[0]).min(); // wMax=(*_weights[0]).max(); // wMean=(*_weights[0]).mean(); // printf("layer:%s wMin,wMax,wMean:%f %f %f\n", // _name.c_str(),wMin,wMax,wMean); //} if (passType == PASS_TRAIN && _dropout > 0) { //printf("FCLayer::fpropPostCommon dropout in training is enabled\n"); getActs().eltwiseMultByVector(_devDoMap); //printf("dropout binary map\n"); //_hDoMap.print(_hDoMap.getNumRows(),_hDoMap.getNumCols()); //printf("FCLayer::fpropPostCommon activation after dropout\n"); //getActs().print(getActs().getNumRows(),getActs().getNumCols()); } else if (passType == PASS_TEST && _dropout > 0) { //printf("FCLayer::fpropPostCommon dropout in testing is enabled\n"); getActs().scale(1 - _dropout); } } void FCLayer::initDropoutMap() { assert(_weights.getSize() > 0); int numOut = _weights[0].getNumCols(); //int numIn=_weights[0].getNumRows(); //printf("initDropoutMap numIN:%d numOut:%d\n",numIn,numOut); _hDoMap.resize(1, numOut); vector<int> nums(numOut); for (int i = 0; i < numOut; ++i) nums[i] = i; std::random_shuffle(nums.begin(), nums.end()); float *hDoMapData = _hDoMap.getData(); memset(hDoMapData, 0, sizeof(float) * numOut); for (int i = 0; i < floor(numOut * (1 - _dropout)); ++i) hDoMapData[nums[i]] = 1.0f; _devDoMap.resize(_hDoMap); _devDoMap.copyFromHost(_hDoMap); } void FCLayer::bpropPreCommon(NVMatrix& v, PASS_TYPE passType) { WeightLayer::bpropPreCommon(v, passType); if (passType == PASS_TRAIN && _dropout > 0) { assert(_weights.getSize() > 0); //int numOut=_weights[0].getNumCols(); //assert(numOut==v.getNumCols()); v.eltwiseMultByVector(_devDoMap); //v.print(v.getNumRows(),v.getNumCols()); } } void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { checkNVMatrixNan(*_inputs[inpIdx], _name); checkNVMatrixNan(*_weights[inpIdx], _name); //if(_name==string("fc4096_1")) //{ // printf("(*_inputs[inpIdx]) min,max,mean:%f %f %f\n",(*_inputs[inpIdx]).min(),(*_inputs[inpIdx]).max(),(*_inputs[inpIdx]).mean()); // printf("(*_weights[inpIdx]) min,max,mean:%f %f %f\n",(*_weights[inpIdx]).min(),(*_weights[inpIdx]).max(),(*_weights[inpIdx]).mean()); // /* // (*_inputs[inpIdx]).fprint((_name+string("FCLayer fpropActs _inputs[inpIdx]")).c_str(),(*_inputs[inpIdx]).getNumRows(), // (*_inputs[inpIdx]).getNumCols()); //(*_weights[inpIdx]).fprint((_name+string("FCLayer fpropActs _weights[inpIdx]")).c_str(),(*_weights[inpIdx]).getNumRows(), // (*_weights[inpIdx]).getNumCols());*/ //} getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(_biases->getW()); } // if(_name == string("reglayer")){ // printf("FCLayer %s. print getActs()\n", _name.c_str()); // getActs().print(1,getActs().getNumCols()); // } if (verbose) { NVMatrix tmp(getActs()); getActs().apply(NVMatrixOps::Abs(), tmp); float mean_abs_act = tmp.sum() / tmp.getNumElements(); printf("FCLayer %s. mean_abs_act:%f\n", _name.c_str(), mean_abs_act); } // if (isnan(getActs().sum())) { // printf("FCLayer::fpropActs isnan(getActs().sum())\n"); // printf("inputs min:%f max:%f mean:%f\n", (*_inputs[inpIdx]).min(), // (*_inputs[inpIdx]).max(), (*_inputs[inpIdx]).mean()); // printf("(*_weights[inpIdx]) min:%f max:%f mean:%f\n", // (*_weights[inpIdx]).min(), (*_weights[inpIdx]).max(), // (*_weights[inpIdx]).mean()); // printf("_biases min:%f max:%f mean:%f\n", (_biases->getW()).min(), // (_biases->getW()).max(), (_biases->getW()).mean()); // (*_inputs[inpIdx]).fprint( // (_name + string("FCLayer fpropActs _inputs")).c_str(), // (*_inputs[inpIdx]).getNumRows(), // (*_inputs[inpIdx]).getNumCols()); // (*_weights[inpIdx]).fprint( // (_name + string("FCLayer fpropActs _weights")).c_str(), // (*_weights[inpIdx]).getNumRows(), // (*_weights[inpIdx]).getNumCols()); // (_biases->getW()).fprint( // (_name + string("FCLayer fpropActs _biases")).c_str(), // (_biases->getW()).getNumRows(), (_biases->getW()).getNumCols()); // // } // checkNVMatrixNan(getActs(), _name); } void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose(); _prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1); //checkNVMatrixNan(_prev[inpIdx]->getActsGrad(),_name); delete &weights_T; } void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { //if(verbose) // printf("FCLayer::bpropBiases v rows(numCases) cols:%d %d\n", // v.getNumRows(),v.getNumCols()); int numCases = v.getNumRows(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 0, 0, scaleBGrad); if (0) { Matrix biasGrad; _biases->getGrad().copyToHost(biasGrad, true); float *biasGradData = biasGrad.getData(); int posNum = 0, negNum = 0, zeroNum = 0; for (int i = 0; i < biasGrad.getNumElements(); ++i) { if (biasGradData[i] < 0) negNum++; else if (biasGradData[i] == 0) zeroNum++; else posNum++; } float biasGradSum = biasGrad.sum(); printf("FCLayer: %s biasGradSum:%.8f negNum:%d zeroNum:%d posNum:%d\n", _name.c_str(), biasGradSum, negNum, zeroNum, posNum); Matrix bias; _biases->getW().copyToHost(bias, true); float *biasData = bias.getData(); posNum = 0; negNum = 0; zeroNum = 0; for (int i = 0; i < bias.getNumElements(); ++i) { if (biasData[i] < 0) negNum++; else if (biasData[i] == 0) zeroNum++; else posNum++; } float biasSum = bias.sum(); printf("FCLayer: %s biasSum:%.8f negNum:%d zeroNum:%d posNum:%d\n", _name.c_str(), biasSum, negNum, zeroNum, posNum); NVMatrix dbgMat(_biases->getGrad(), true); dbgMat.eltwiseDivide(_biases->getW()); dbgMat.apply(NVMatrixOps::Abs()); printf("FCLayer: %s dbgMat.getNumElements():%d\n", _name.c_str(), dbgMat.getNumElements()); float meanOptimWC = 0.5f * dbgMat.sum() / (float) (dbgMat.getNumElements()); printf("FCLayer: %s bias meanOptimWC:%.8f\n", _name.c_str(), meanOptimWC); } } void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumRows(); NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose(); float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom(); float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; _weights[inpIdx].getInc().addProduct(prevActs_T, v, scaleInc, scaleGrad); if (0) { // debugging: print gradient wrt weights NVMatrix dbgMat(_weights[inpIdx].getInc()); dbgMat.addProduct(prevActs_T, v, 0, scaleGrad); dbgMat.eltwiseDivide(_weights[inpIdx].getW()); dbgMat.apply(NVMatrixOps::Abs()); float meanOptimWC = 0.5f * dbgMat.sum() / (float) (dbgMat.getNumElements()); printf("FCLayer: %s weights meanOptimWC:%.8f\n", _name.c_str(), meanOptimWC); } delete &prevActs_T; } /* * ======================= * LocalLayer * ======================= */ LocalLayer::LocalLayer(ConvNet* convNet, PyObject* paramsDict, bool useGrad) : WeightLayer(convNet, paramsDict, false, useGrad) { _padding = pyDictGetIntV(paramsDict, "padding"); _stride = pyDictGetIntV(paramsDict, "stride"); _filterSize = pyDictGetIntV(paramsDict, "filterSize"); _channels = pyDictGetIntV(paramsDict, "channels"); _imgSize = pyDictGetIntV(paramsDict, "imgSize"); _numFilters = pyDictGetInt(paramsDict, "filters"); _groups = pyDictGetIntV(paramsDict, "groups"); _filterChannels = pyDictGetIntV(paramsDict, "filterChannels"); _randSparse = pyDictGetIntV(paramsDict, "randSparse"); _overSample = pyDictGetIntV(paramsDict, "overSample"); _filterPixels = pyDictGetIntV(paramsDict, "filterPixels"); _imgPixels = pyDictGetIntV(paramsDict, "imgPixels"); _modulesX = pyDictGetInt(paramsDict, "modulesX"); _modules = pyDictGetInt(paramsDict, "modules"); // It's a vector on the heap to be consistent with all the others... _filterConns = new vector<FilterConns>(); PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns"); for (int i = 0; i < _randSparse->size(); i++) { FilterConns fc; if (_randSparse->at(i)) { fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i)); } _filterConns->push_back(fc); } } void LocalLayer::copyToGPU() { WeightLayer::copyToGPU(); for (int i = 0; i < _prev.size(); i++) { if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity cudaMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i)); gpuMonitor->addUsedMemory( sizeof(int) * _groups->at(i) * _filterChannels->at(i)); cudaMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i), cudaMemcpyHostToDevice); cutilCheckMsg("cudaMemcpy: failed"); } } } /* * ======================= * ConvLayer * ======================= */ ConvLayer::ConvLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, true) { _partialSum = pyDictGetInt(paramsDict, "partialSum"); _sharedBiases = pyDictGetInt(paramsDict, "sharedBiases"); } void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // printf("ConvLayer %s. fpropActs\n",_name.c_str()); checkNVMatrixNan(*_weights[inpIdx], _name + string("weights")); checkNVMatrixNan(*_inputs[inpIdx], _name + string("inputs")); if (_randSparse->at(inpIdx)) { convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { if (_sharedBiases) { getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters); getActs().addVector(_biases->getW()); getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules)); } else { getActs().addVector(_biases->getW()); } } //if(verbose){ // NVMatrix tmp(getActs()); // getActs().apply(NVMatrixOps::Abs(),tmp); // float mean_abs_act = tmp.sum()/tmp.getNumElements(); // printf("ConvLayer %s. mean_abs_act:%f\n",_name.c_str(),mean_abs_act); //} // if (isnan(getActs().sum())) { // printf("ConvLayer::fpropActs\n"); // printf("inputs min:%f max:%f mean:%f\n", (*_inputs[inpIdx]).min(), // (*_inputs[inpIdx]).max(), (*_inputs[inpIdx]).mean()); // printf("(*_weights[inpIdx]) min:%f max:%f mean:%f\n", // (*_weights[inpIdx]).min(), (*_weights[inpIdx]).max(), // (*_weights[inpIdx]).mean()); // printf("_biases min:%f max:%f mean:%f\n", (_biases->getW()).min(), // (_biases->getW()).max(), (_biases->getW()).mean()); // (*_inputs[inpIdx]).fprint( // (_name + string("ConvLayer fpropActs _inputs")).c_str(), // (*_inputs[inpIdx]).getNumRows(), // (*_inputs[inpIdx]).getNumCols()); // (*_weights[inpIdx]).fprint( // (_name + string("ConvLayer fpropActs _weights")).c_str(), // (*_weights[inpIdx]).getNumRows(), // (*_weights[inpIdx]).getNumCols()); // (_biases->getW()).fprint( // (_name + string("ConvLayer fpropActs _biases")).c_str(), // (_biases->getW()).getNumRows(), (_biases->getW()).getNumCols()); // } // // checkNVMatrixNan(getActs(), _name); } void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; if (_sharedBiases) { v.reshape(_numFilters, v.getNumElements() / _numFilters); _biases->getGrad().addSum(v, 1, 0, scaleBGrad); v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules)); if (0) { NVMatrix dbgMat(_biases->getGrad(), true); dbgMat.eltwiseDivide(_biases->getW()); dbgMat.apply(NVMatrixOps::Abs()); float meanOptimWC = 0.5f * dbgMat.sum() / (float) (dbgMat.getNumElements()); printf("ConvLayer: %s biases meanOptimWC:%.8f\n", _name.c_str(), meanOptimWC); } } else { _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } } void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad(); float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0; if (_randSparse->at(inpIdx)) { convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } else { convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } if (_partialSum > 0) { scaleTargets = _weights[inpIdx].getNumUpdates() > 0; _weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters); _weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1); _weights[inpIdx].getGrad().reshape( _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters); if (0) { NVMatrix dbgMat(_weights[inpIdx].getGrad(), true); dbgMat.eltwiseDivide(_weights[inpIdx].getW()); dbgMat.apply(NVMatrixOps::Abs()); float meanOptimWC = 0.5f * dbgMat.sum() / (float) (dbgMat.getNumElements()); printf("ConvLayer: %s weights meanOptimWC:%.8f\n", _name.c_str(), meanOptimWC); } } } void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad(); convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (_overSample->at(inpIdx) > 1) { _actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx)); _actGradTmp.sum(0, _prev[inpIdx]->getActsGrad()); _prev[inpIdx]->getActsGrad().reshape( _prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols()); } } else { convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); //checkNVMatrixNan(_prev[inpIdx]->getActsGrad(), _name); } } void ConvLayer::truncBwdActs() { LocalLayer::truncBwdActs(); if (_conserveMem) { _weightGradTmp.truncate(); _actGradTmp.truncate(); } } void ConvLayer::fpropPostCommon(NVMatrixV& v, PASS_TYPE passType) { Layer::fpropPostCommon(v, passType); //float actMax=getActs().max(),actMin=getActs().min(); //if(abs(actMax)>thres || abs(actMin)>thres){ // float wMin,wMax,wMean; // wMin=(*_weights[0]).min(); // wMax=(*_weights[0]).max(); // wMean=(*_weights[0]).mean(); // printf("layer:%s wMin,wMax,wMean:%f %f %f\n", // _name.c_str(),wMin,wMax,wMean); //} } /* * ======================= * LocalUnsharedLayer * ======================= */ LocalUnsharedLayer::LocalUnsharedLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, false) { } void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { getActs().addVector(_biases->getW()); } } void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleInc = (passType != PASS_GC && _weights[inpIdx].getNumUpdates() == 0) * _weights[inpIdx].getMom(); // momentum float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // eps / numCases if (_randSparse->at(inpIdx)) { localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } else { localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } } void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } } /* * ======================= * SoftmaxLayer * ======================= */ SoftmaxLayer::SoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { } void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& input = *_inputs[0]; NVMatrix& max = input.max(1); input.addVector(max, -1, getActs()); //Matrix hMax; //max.copyToHost(hMax,true); //float maxmax=hMax.max(); //Matrix hActMat; //getActs().copyToHost(hActMat,true); //float actmin=hActMat.min(); getActs().apply(NVMatrixOps::Exp_a()); //getActs().copyToHost(hActMat,true); //if(hActMat.min()<=0) //{ // printf("act min before exp:%10.9f\n",actmin); // printf("after exp hActMat.min()<=0 %10.9f\n",hActMat.min()); //} assert(getActs().isContiguous()); // Matrix hMat; // getActs().copyToHost(hMat,true); //printf("SoftmaxLayer::fpropActs: check _outputs after do exponentiation\n"); // for(int p=0,i=0;i<hMat.getNumCols();++i){ // for(int j=0;j<hMat.getNumRows();++j,++p){ // if(hMat.getCell(j,i)<=0){ // // printf("nonpositive element is found. %6.5f (%d,%d)\n",hMat.getCell(j,i),j,i); // exit(1); // } // } // } NVMatrix& sum = getActs().sum(1); getActs().eltwiseDivideByVector(sum); //getActs().copyToHost(hMat,true); //for(int p=0,i=0;i<hMat.getNumCols();++i){ // for(int j=0;j<hMat.getNumRows();++j,++p){ // if(isnan(hMat.getCell(j,i)) || hMat.getCell(j,i)<=0){ // printf("SoftmaxLayer::fpropActs: check _outputs after getting probability\n"); // printf("nan/nonpositive element is found. %7.6f (%d,%d)\n",hMat.getCell(j,i),j,i); // } // } //} delete &max; delete &sum; } void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg"; if (doLogregGrad) { NVMatrix& labels = _next[0]->getPrev()[0]->getActs(); float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff); } else { computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1); } } /* * ======================= * LogSoftmaxLayer * ======================= */ LogSoftmaxLayer::LogSoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { } void LogSoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& input = *_inputs[0]; NVMatrix& max = input.max(1); // checkNVMatrixNan(input, _name); // checkNVMatrixNan(max, _name); input.addVector(max, -1, getActs()); // checkNVMatrixNan(getActs(), _name); NVMatrix sumExp(getActs(), true); sumExp.apply(NVMatrixOps::Exp_a()); assert(sumExp.isContiguous()); NVMatrix& sum = sumExp.sum(1); sum.apply(NVMatrixOps::Log()); // checkNVMatrixNan(sum, _name); getActs().addVector(sum, -1); // checkNVMatrixNan(getActs(), _name); delete &max; delete &sum; } void LogSoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.reg"; if (doLogregGrad) { //printf("LogSoftmaxLayer::bpropActs doLogregGrad\n"); NVMatrix& labels = _next[0]->getPrev()[0]->getActs(); float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff, true); } else { // to do computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1); } checkNVMatrixNan(_prev[0]->getActsGrad(), _name); } /* * ======================= * EltwiseSumLayer * ======================= */ EltwiseSumLayer::EltwiseSumLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _coeffs = pyDictGetFloatV(paramsDict, "coeffs"); } void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { _inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs()); // printf("EltwiseSumLayer::fpropActs scaleTargets==0 coeff:%f prev layer name:%s\n", // _coeffs->at(inpIdx),_prev[inpIdx]->getName().c_str()); // getActs().print(getActs().getNumRows(),2); } else { getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx)); // printf("EltwiseSumLayer::fpropActs scaleTargets!=0 coeff:%f prev layer name:%s\n", // _coeffs->at(inpIdx),_prev[inpIdx]->getName().c_str()); // printf("inputs\n"); // _inputs[inpIdx]->print(_inputs[inpIdx]->getNumRows(),2); // printf("getActs()\n"); // getActs().print(getActs().getNumRows(),2); // exit(0); } } void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad()); } else { assert(&_prev[inpIdx]->getActsGrad() != &v); _prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx)); } //printf("EltwiseSumLayer::bpropActs %s. _prev[inpIdx]->getActsGrad() rows,cols:%d %d inpIdx:%d coeff:%.3f\n", // _name.c_str(),_prev[inpIdx]->getActsGrad().getNumRows(), // _prev[inpIdx]->getActsGrad().getNumCols(),inpIdx,_coeffs->at(inpIdx)); } /* * ======================= * EltwiseMaxLayer * ======================= */ EltwiseMaxLayer::EltwiseMaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 1) { // First input, do nothing _inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs()); } else if (inpIdx > 1) { getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]); } } void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0); } /* * ======================= * DataLayer * ======================= */ DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _dataIdx = pyDictGetInt(paramsDict, "dataIdx"); } void DataLayer::fprop(PASS_TYPE passType) { throw string("No dava given!"); } void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { } void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) { _outputs = data[_dataIdx]; checkNVMatrixNan(*_outputs, _name); #ifdef MULTIGPU NVMatrix::checkCUDAError(cudaEventRecord(_fpropEvent)); #endif fpropNext(passType); } bool DataLayer::isGradProducer() { return false; } /* * ===================== * PoolLayer * ===================== */ PoolLayer::PoolLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _channels = pyDictGetInt(paramsDict, "channels"); _sizeX = pyDictGetInt(paramsDict, "sizeX"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _pool = pyDictGetString(paramsDict, "pool"); } PoolLayer& PoolLayer::makePoolLayer(ConvNet* convNet, PyObject* paramsDict) { string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new MaxPoolLayer(convNet, paramsDict); } else if (_pool == "avg") { return *new AvgPoolLayer(convNet, paramsDict); } throw string("Unknown pooling layer type ") + _pool; } /* * ===================== * AvgPoolLayer * ===================== */ AvgPoolLayer::AvgPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler()); } void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1); } /* * ===================== * MaxPoolLayer * ===================== */ MaxPoolLayer::MaxPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler()); checkNVMatrixNan(getActs(), _name); } void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1); checkNVMatrixNan(_prev[inpIdx]->getActsGrad(), _name); } /* * ===================== * NailbedLayer * ===================== */ NailbedLayer::NailbedLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1); } void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1); } /* * ===================== * GaussianBlurLayer * ===================== */ GaussianBlurLayer::GaussianBlurLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _hFilter = pyDictGetMatrix(paramsDict, "filter"); } void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1); convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1); } // This is here just for completeness' sake. Why would you backpropagate // through a blur filter? void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad(); convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1); convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1); } void GaussianBlurLayer::copyToGPU() { Layer::copyToGPU(); _filter.copyFromHost(*_hFilter, true); } /* * ===================== * ResizeLayer * ===================== */ ResizeLayer::ResizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); _scale = pyDictGetFloat(paramsDict, "scale"); } void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale); } // Can't do this void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToYUVLayer * ===================== */ RGBToYUVLayer::RGBToYUVLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToYUV(*_inputs[0], getActs()); } // Can't do this void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToLABLayer * ===================== */ RGBToLABLayer::RGBToLABLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _center = pyDictGetInt(paramsDict, "center"); } void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToLAB(*_inputs[0], getActs(), _center); } // Can't do this void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * ResponseNormLayer * ===================== */ ResponseNormLayer::ResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _size = pyDictGetInt(paramsDict, "size"); _bias = pyDictGetFloat(paramsDict, "bias"); _scale = pyDictGetFloat(paramsDict, "scale"); _pow = pyDictGetFloat(paramsDict, "pow"); printf("(bias,scale,pow)=(%f,%f,%f)\n", _bias, _scale, _pow); } void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow); } void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ResponseNormLayer::truncBwdActs() { Layer::truncBwdActs(); if (_conserveMem) { _denoms.truncate(); } } /* * ===================== * CrossMapResponseNormLayer * ===================== */ CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _blocked = pyDictGetInt(paramsDict, "blocked"); } void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _bias, _scale, _pow, _blocked); checkNVMatrixNan(getActs(), _name); } void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _bias, _scale, _pow, _blocked, scaleTargets, 1); checkNVMatrixNan(_prev[0]->getActsGrad(), _name); } /* * ===================== * ContrastNormLayer * ===================== */ ContrastNormLayer::ContrastNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& images = *_inputs[0]; convLocalPool(images, _meanDiffs, _channels, _size, -_size / 2, 1, _imgSize, AvgPooler()); _meanDiffs.add(images, -1, 1); convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow); } void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ContrastNormLayer::truncBwdActs() { ResponseNormLayer::truncBwdActs(); if (_conserveMem) { _meanDiffs.truncate(); } } /* * ===================== * CostLayer * ===================== */ CostLayer::CostLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _coeff = pyDictGetFloat(paramsDict, "coeff"); } float CostLayer::getCoeff() { return _coeff; } void CostLayer::setCoeff(float coeff) { assert(coeff > 0); _coeff = coeff; } void CostLayer::scaleCoeff(float scale) { assert(scale > 0); float oldCoeff = _coeff; _coeff *= scale; printf( "===========Cost Layer %s. Coeffient is scaled by %3.2f. %6.5f->%6.5f=====\n", _name.c_str(), scale, oldCoeff, _coeff); } void CostLayer::bprop(PASS_TYPE passType) { if (_coeff != 0) { Layer::bprop(passType); } } bool CostLayer::isGradProducer() { return _coeff != 0; } doublev& CostLayer::getCost() { doublev& v = *new doublev(); v.insert(v.begin(), _costv.begin(), _costv.end()); return v; } CostLayer& CostLayer::makeCostLayer(ConvNet* convNet, string& type, PyObject* paramsDict) { if (type == "cost.logreg") { return *new LogregCostLayer(convNet, paramsDict); } else if (type == "cost.sum2") { return *new SumOfSquaresCostLayer(convNet, paramsDict); } else if (type == "cost.reg") { return *new RegCostLayer(convNet, paramsDict); } else if (type == "cost.sumsquaresdiff") { return *new SumOfSquaresOfDiffCostLayer(convNet, paramsDict); } else if (type == "cost.logsumsquaresdiff") { return *new LogSumOfSquaresOfDiffCostLayer(convNet, paramsDict); } else if (type == "cost.color_enhance") { return *new ColorEnhanceCostLayer(convNet, paramsDict); } else if (type == "cost.color_enhance_separate") { return *new ColorEnhanceSeparateCostLayer(convNet, paramsDict); } else if (type == "cost.gradmag_enhance") { return *new GradMagEnhanceCostLayer(convNet, paramsDict); } throw string("Unknown cost layer type ") + type; } /* * ===================== * LogregCostLayer * ===================== */ LogregCostLayer::LogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getNumElements(); //int numOut=probs.getNumRows(); //assert(numCases==probs.getNumCols()); //printf("numCases:%d numOut:%d\n",numCases,numOut); //Matrix *hLabels=new Matrix(); //labels.copyToHost(*hLabels,true); //float maxLabel=-1; //for(int i=0;i<numCases;++i){ // if(maxLabel<hLabels->getCell(0,i)) // maxLabel=hLabels->getCell(0,i); //} //if(maxLabel>=999) // printf("----------Error: maxlabel>999-----------\n"); //Matrix *hProbMat=new Matrix(); //probs.copyToHost(*hProbMat,true); //float probmin=hProbMat->min(); //float probmax=hProbMat->max(); //if(probmin<=0) // printf("probmin:%10.9f\n",probmin); //for(int i=0;i<numCases;++i){ // for(int j=0;j<numOut;++j){ // if(hProbMat->getCell(j,i)<0){ // printf("enter cs 4\n"); // printf("find non-positive prob: %6.5f\n (i,j)=(%d,%d)\n", // hProbMat->getCell(j,i),i,j); // } // } //} NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); //printf("\nLogregCostLayer::fpropActs: logreg cost : %6.5f testError:%6.5f\n", // -trueLabelLogProbs.sum(),(numCases - correctProbs.sum())); checkNVMatrixNan(trueLabelLogProbs, _name); } } void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = _prev[0]->getActs(); NVMatrix& probs = _prev[1]->getActs(); NVMatrix& target = _prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax"; if (doWork) { computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * RegCostLayer * ===================== */ RegCostLayer::RegCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void RegCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& logprobs = *_inputs[1]; // NVMatrix& probs = *_inputs[1]; int numCases = labels.getNumElements(); checkNVMatrixNan(logprobs, _name); NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeLogregCost(labels, logprobs, trueLabelLogProbs, correctProbs, true); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); //printf("\nLogregCostLayer::fpropActs: logreg cost : %6.5f testError:%6.5f\n", // -trueLabelLogProbs.sum(),(numCases - correctProbs.sum())); checkNVMatrixNan(trueLabelLogProbs, _name); } } void RegCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = _prev[0]->getActs(); NVMatrix& probs = _prev[1]->getActs(); NVMatrix& target = _prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "logsoftmax"; if (doWork) { printf("RegCostLayer::bpropActs doWork\n"); // to do computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * SumOfSquaresCostLayer * ===================== */ SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { int numCases = _inputs[0]->getNumCols(); _inputs[0]->apply(NVMatrixOps::Square(), getActs()); _costv.clear(); // will be divided by numCases in Python _costv.push_back(0.5 * getActs().sum()); // _costv.push_back(0.5 * getActs().sum() / numCases); } void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -_coeff); } SumOfSquaresOfDiffCostLayer::SumOfSquaresOfDiffCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { _relativeDiff = pyDictGetInt(paramsDict, "relativeDiff"); printf("SumOfSquaresOfDiffCostLayer %s init. _relativeDiff : %d\n", _name.c_str(), _relativeDiff); } void SumOfSquaresOfDiffCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses two inputs if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; // dim * numImg. _trans=0 NVMatrix& predLabels = *_inputs[1]; // dim * numImg. _trans=0 predLabels.subtract(labels, getActs()); printf("SumOfSquaresOfDiffCostLayer::fpropActs\n"); labels.print(labels.getNumRows(), 2); printf("\n"); predLabels.print(predLabels.getNumRows(), 2); printf("\n"); getActs().print(getActs().getNumRows(), 2); printf("\n"); exit(0); if (_relativeDiff) { getActs().eltwiseDivide(labels); } // printf("SumOfSquaresOfDiffCostLayer::fpropActs print labels\n"); // labels.print(labels.getFollowingDim(),10); // printf("SumOfSquaresOfDiffCostLayer::fpropActs print predLabels\n"); // predLabels.print(predLabels.getFollowingDim(),10); // printf("SumOfSquaresOfDiffCostLayer::fpropActs print getActs()\n"); // getActs().print(labels.getFollowingDim(),10); int numCases = labels.getLeadingDim(); _costv.clear(); _costv.push_back(0.5 * getActs().norm2()); // printf("averaged cost:%f\n",0.5 * getActs().norm2()/numCases); } } void SumOfSquaresOfDiffCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = *_inputs[0]; // dim * numImg. _trans=0 NVMatrix& predLabels = *_inputs[1]; // dim * numImg. _trans=0 NVMatrix diffLabels; //printf("SumOfSquaresOfDiffCostLayer::bpropActs\n"); // printf("print labels\n"); // labels.print(labels.getFollowingDim(),labels.getLeadingDim()); // printf("print predLabels\n"); // predLabels.print(predLabels.getFollowingDim(),predLabels.getLeadingDim()); // exit(0); //printf("scaleTargets: %f\n",scaleTargets); if (!_relativeDiff) _prev[inpIdx]->getActsGrad().add(getActs(), scaleTargets, -_coeff); else { NVMatrix& absDiff = getActs().copy(); absDiff.eltwiseDivide(labels); _prev[inpIdx]->getActsGrad().add(absDiff, scaleTargets, -_coeff); } } LogSumOfSquaresOfDiffCostLayer::LogSumOfSquaresOfDiffCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { _scale = pyDictGetFloat(paramsDict, "scale"); printf("layer :%s. coeff:%f scale:%f\n", _name.c_str(), _coeff, _scale); } // to be verified void LogSumOfSquaresOfDiffCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses two inputs if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; // dim * numImg. _trans=0 NVMatrix& predLabels = *_inputs[1]; // dim * numImg. _trans=0 int numCases = labels.getLeadingDim(); NVMatrix squaredDiff; labels.applyBinary(NVMatrixBinaryOps::SquaredDiff(), predLabels, squaredDiff); squaredDiff.sum(0, getActs()); getActs().apply(NVMatrixOps::MultByScalar(0.5)); printf("LogSumOfSquaresOfDiffCostLayer size: %d %d\n", getActs().getLeadingDim(), getActs().getFollowingDim()); NVMatrix costs; getActs().apply(NVMatrixOps::MultByScalar(_scale), costs); costs.apply(NVMatrixOps::Logistic()); costs.apply(NVMatrixOps::AddScalar(-0.5)); _costv.clear(); _costv.push_back(costs.sum() / numCases); } } void LogSumOfSquaresOfDiffCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = *_inputs[0]; // dim * numImg. _trans=0 NVMatrix& predLabels = *_inputs[1]; // dim * numImg. _trans=0 NVMatrix diffLabels; predLabels.subtract(labels, diffLabels); NVMatrix a, b; getActs().apply(NVMatrixOps::WeightedAddScalar(_scale, 0.5 * _scale), a); getActs().apply(NVMatrixOps::WeightedAddScalar(-1, 0.5), b); a.applyBinary(NVMatrixBinaryOps::Multiply(), b); diffLabels.eltwiseMultByVector(a); printf("print out diffLabels\n"); diffLabels.print(diffLabels.getFollowingDim(), diffLabels.getLeadingDim()); _prev[inpIdx]->getActsGrad().add(diffLabels, scaleTargets, -_coeff); } CroppingLayer::CroppingLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _start = pyDictGetInt(paramsDict, "start"); _end = pyDictGetInt(paramsDict, "end"); } void CroppingLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // printf("CroppingLayer %s: fpropActs\n",_name.c_str()); cropping(*_inputs[0], getActs(), _channels, _start, _end); } void CroppingLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { // printf("CroppingLayer %s: bpropActs\n",_name.c_str()); croppingUndo(_prev[0]->getActs(), v, _prev[0]->getActsGrad(), _start, _end, scaleTargets, 1); } ColorEnhanceCostLayer::ColorEnhanceCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } /* * input 0: prediction of 10/30 D transform for L-channel. (ch*10,n) * input 1: original 10D pixel basis (segment_random_sample_num*10,n) * input 2: groundtruth color (segment_random_sample_num*ch,n) * */ void ColorEnhanceCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // this layer uses 3 inputs if (inpIdx == 0) { NVMatrix& predMapping = *_inputs[0]; //shape: (ch*10,num_imgs) //NVMatrix& gtMapping = *_inputs[1]; // shape: (ch*10, num_imgs) NVMatrix& colorBasis = *_inputs[1]; // shape: (segment_random_sample_num*10,num_imgs) NVMatrix& gtColor = *_inputs[2]; // shape: (segment_random_sample_num*ch,num_imgs) int basis_dim = 10; int ch = predMapping.getNumRows() / basis_dim; int segment_random_sample_num = gtColor.getNumRows() / ch; if (verbose) printf("basis_dim %d segment_random_sample_num %d ch:%d\n", basis_dim, segment_random_sample_num, ch); //assert(predMapping.getNumCols()==gtMapping.getNumCols()); //assert(predMapping.getNumRows() == gtMapping.getNumRows()); assert(colorBasis.getNumCols()==predMapping.getNumCols()); assert(gtColor.getNumCols()==predMapping.getNumCols()); assert(basis_dim*segment_random_sample_num==colorBasis.getNumRows()); int costOption = 1; if (costOption == 0) { // measure difference between predicted mapping and estimated mapping from segment // TO DO: update code //NVMatrix diffMapping; //predMapping.subtract(gtMapping, diffMapping); //diffMapping.eltwiseMult(colorBasis); //diffMapping.sum(0, getActs()); //NVMatrix dotProd, predColor, diffColor; //predMapping.eltwiseMult(colorBasis, dotProd); //dotProd.sum(0, predColor); //predColor.subtract(gtColor, diffColor); //_costv.clear(); //_costv.push_back(0.5 * getActs().norm2()); //_costv.push_back(0.5 * diffColor.norm2()); } else { NVMatrix *dotProd = new NVMatrix(); NVMatrix *predColor = new NVMatrix(gtColor); predColor->setZero(); for (int i = 0; i < segment_random_sample_num; ++i) { NVMatrix& l_color_basis = colorBasis.sliceRows(i * basis_dim, (i + 1) * basis_dim); for (int j = 0; j < ch; ++j) { NVMatrix& l_pred_color = predColor->sliceRows(ch * i + j, ch * i + j + 1); NVMatrix& l_pred_mapping = predMapping.sliceRows( j * basis_dim, (j + 1) * basis_dim); l_pred_mapping.eltwiseMult(l_color_basis, *dotProd); dotProd->sum(0, l_pred_color); delete &l_pred_color; delete &l_pred_mapping; } delete &l_color_basis; } predColor->subtract(gtColor, getActs()); if (verbose) printf( "ColorEnhanceCostLayer::fpropActs gtColor rows:%d cols:%d\n", predColor->getNumRows(), predColor->getNumCols()); _costv.clear(); _costv.push_back( 0.5 * getActs().norm2() / segment_random_sample_num); delete predColor; delete dotProd; } } } void ColorEnhanceCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); NVMatrix& predMapping = *_inputs[0]; //shape: (30,num_imgs) //NVMatrix& gtMapping = *_inputs[1]; // shape: (30, num_imgs) NVMatrix& colorBasis = *_inputs[1]; // shape: (segment_random_sample_num*10,num_imgs) NVMatrix& gtColor = *_inputs[2]; // shape: (segment_random_sample_num*3,num_imgs) // NVMatrix& predMapping = *_inputs[0]; // NVMatrix& gtMapping = *_inputs[1]; // NVMatrix& colorBasis = *_inputs[2]; // NVMatrix& gtColor = *_inputs[3]; // shape: (segment_random_sample_num,num_imgs) int basis_dim = 10; int ch = predMapping.getNumRows() / basis_dim; int segment_random_sample_num = gtColor.getNumRows() / ch; int num_imgs = predMapping.getNumCols(); NVMatrix *grad1 = new NVMatrix(basis_dim * ch, num_imgs, false); grad1->setZero(); NVMatrix *l_grad = new NVMatrix(); for (int i = 0; i < segment_random_sample_num; ++i) { NVMatrix& l_color_basis = colorBasis.sliceRows(i * basis_dim, (i + 1) * basis_dim); for (int j = 0; j < ch; ++j) { NVMatrix& l_grad1 = grad1->sliceRows(j * basis_dim, (j + 1) * basis_dim); NVMatrix& l_color_diff = getActs().sliceRows(ch * i + j, ch * i + j + 1); l_color_basis.eltwiseMultByVector(l_color_diff, *l_grad); l_grad1.add(*l_grad, 1); delete &l_grad1; delete &l_color_diff; } if (verbose) printf("l_grad rows:%d cols:%d l_grad min,max:%f %f\n", l_grad->getNumRows(), l_grad->getNumCols(), l_grad->min(), l_grad->max()); delete &l_color_basis; } float scale = 1.0f / (float) segment_random_sample_num; grad1->scale(scale); if (verbose) { printf("grad1 rows,cols:%d %d min,max:%f %f\n", grad1->getNumRows(), grad1->getNumCols(), grad1->min(), grad1->max()); printf("basis_dim %d segment_random_sample_num:%d scale:%f\n", basis_dim, segment_random_sample_num, scale); } _prev[inpIdx]->getActsGrad().add(*grad1, scaleTargets, -_coeff); delete l_grad; delete grad1; } ColorEnhanceSeparateCostLayer::ColorEnhanceSeparateCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } /* * input 0: predicted 10D transform for L channel. (10,n) * input 1: predicted 10D transform for a channel. (10,n) * input 2: predicted 10D transform for b channel. (10,n) * input 3: groundtruth of 10D transform. (3*10,n) * input 4: original 10D pixel basis (segment_random_sample_num*10,n) * input 5: groundtruth color (segment_random_sample_num*3,n) * */ void ColorEnhanceSeparateCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // this layer uses 6 inputs if (inpIdx == 0) { NVMatrix& predMapping_L = *_inputs[0]; //shape: (10,num_imgs) NVMatrix& predMapping_a = *_inputs[1]; //shape: (10,num_imgs) NVMatrix& predMapping_b = *_inputs[2]; //shape: (10,num_imgs) NVMatrix& gtMapping = *_inputs[3]; // shape: (3*10, num_imgs) NVMatrix& colorBasis = *_inputs[4]; // shape: (segment_random_sample_num*10,num_imgs) NVMatrix& gtColor = *_inputs[5]; // shape: (segment_random_sample_num*3,num_imgs) assert(predMapping_L.getNumRows()==predMapping_a.getNumRows()); assert(predMapping_L.getNumRows()==predMapping_b.getNumRows()); assert(predMapping_L.getNumCols()==predMapping_a.getNumCols()); assert(predMapping_L.getNumCols()==predMapping_b.getNumCols()); assert(predMapping_L.getNumCols()==gtMapping.getNumCols()); assert(predMapping_L.getNumCols()==colorBasis.getNumCols()); assert(predMapping_L.getNumCols()==gtColor.getNumCols()); assert(gtMapping.getNumRows()==30); assert(gtColor.getNumRows()%3==0); int basis_dim = 10; int segment_random_sample_num = colorBasis.getNumRows() / basis_dim; assert(gtColor.getNumRows()==(segment_random_sample_num*3)); if (verbose) printf("basis_dim %d segment_random_sample_num %d\n", basis_dim, segment_random_sample_num); NVMatrix dotProd; NVMatrix predColor(gtColor); for (int i = 0; i < segment_random_sample_num; ++i) { NVMatrix& l_color_basis = colorBasis.sliceRows(i * basis_dim, (i + 1) * basis_dim); NVMatrix& l_pred_L = predColor.sliceRows(3 * i, 3 * i + 1); NVMatrix& l_pred_a = predColor.sliceRows(3 * i + 1, 3 * i + 2); NVMatrix& l_pred_b = predColor.sliceRows(3 * i + 2, 3 * i + 3); predMapping_L.eltwiseMult(l_color_basis, dotProd); dotProd.sum(0, l_pred_L); predMapping_a.eltwiseMult(l_color_basis, dotProd); dotProd.sum(0, l_pred_a); predMapping_b.eltwiseMult(l_color_basis, dotProd); dotProd.sum(0, l_pred_b); delete &l_color_basis; delete &l_pred_L; delete &l_pred_a; delete &l_pred_b; } predColor.subtract(gtColor, getActs()); _costv.clear(); _costv.push_back(0.5 * getActs().norm2() / segment_random_sample_num); } } void ColorEnhanceSeparateCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0 || inpIdx == 1 || inpIdx ==2); // printf("inpIdx: %d\n",inpIdx); NVMatrix& predMapping = *_inputs[inpIdx]; //shape: (10,num_imgs) NVMatrix& gtMapping = *_inputs[3]; // shape: (3*10, num_imgs) NVMatrix& colorBasis = *_inputs[4]; // shape: (segment_random_sample_num*10,num_imgs) NVMatrix& gtColor = *_inputs[5]; // shape: (segment_random_sample_num*3,num_imgs) int basis_dim = 10; int segment_random_sample_num = colorBasis.getNumRows() / basis_dim; assert(gtColor.getNumRows()==(segment_random_sample_num*3)); int num_imgs = predMapping.getNumCols(); NVMatrix grad1, grad2; grad1.resize(basis_dim, num_imgs); grad1.setZero(); grad2.resize(basis_dim, num_imgs); for (int i = 0; i < segment_random_sample_num; ++i) { NVMatrix l_grad; NVMatrix& l_color_basis = colorBasis.sliceRows(i * basis_dim, (i + 1) * basis_dim); NVMatrix& l_color_diff = getActs().sliceRows(3 * i + inpIdx, 3 * i + inpIdx + 1); l_color_basis.eltwiseMultByVector(l_color_diff, l_grad); l_grad.add(grad1, 1, 1, grad2); grad2.copy(grad1); delete &l_color_basis; delete &l_color_diff; } grad1.scale(1.0 / (float) segment_random_sample_num); _prev[inpIdx]->getActsGrad().add(grad1, scaleTargets, -_coeff); } ConcatenateLayer::ConcatenateLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void ConcatenateLayer::fpropPreCommon(NVMatrixV& v, PASS_TYPE passType) { assert(v.size()>0); int outputs = 0; int num_imgs = v[0]->getNumCols(); for (int i = 0; i < v.size(); ++i) { assert(num_imgs==v[i]->getNumCols()); outputs += v[i]->getNumRows(); } if (verbose) printf("ConcatenateLayer::fpropPreCommon outputs:%d\n", outputs); getActs().resize(outputs, num_imgs); _outputs_p = 0; } void ConcatenateLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { int input_dim = _inputs[inpIdx]->getNumRows(); // printf("ConcatenateLayer::fpropActs inpIdx:%d input_dim:%d\n", // inpIdx,input_dim); NVMatrix& dest = getActs().sliceRows(_outputs_p, _outputs_p + input_dim); _inputs[inpIdx]->copy(dest); _outputs_p += input_dim; delete &dest; } void ConcatenateLayer::bpropPreCommon(NVMatrix& v, PASS_TYPE passType) { _outputs_p = 0; } void ConcatenateLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { int input_dim = _inputs[inpIdx]->getNumRows(); // printf("ConcatenateLayer::bpropActs inpIdx:%d input_dim:%d\n", // inpIdx,input_dim); NVMatrix& l_grad = v.sliceRows(_outputs_p, _outputs_p + input_dim); l_grad.copy(_prev[inpIdx]->getActsGrad()); _outputs_p += input_dim; delete &l_grad; } GradMagEnhanceCostLayer::GradMagEnhanceCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } /* * input 0: prediction of 2 D transform for gradient magnitude. (2,n) * input 1: pixel L channel in input image (1,n) * input 2: pixel L channel gradient magnitude in input image (1,n) * input 3: pixel L channel gradient magnitude in enhanced image (1,n) * */ void GradMagEnhanceCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 0) { NVMatrix& predMapping = *_inputs[0]; //shape: (2,num_imgs) NVMatrix& inL = *_inputs[1]; // shape: (1,num_imgs) NVMatrix& inGradMag = *_inputs[2]; // shape: (1,num_imgs) NVMatrix& enhGradMag = *_inputs[3]; // shape: (1,num_imgs) assert(predMapping.getNumRows()==2); assert(inL.getNumRows()==1); assert(inGradMag.getNumRows()==1); assert(enhGradMag.getNumRows()==1); // printf("GradMagEnhanceCostLayer::fpropActs\n"); // log(enhGradMag/inGradMag)=a*logInL+b // cost function: 0.5 * (squared difference in enhGradMag) // printf("inL min:%f \n",inL.min()); inL.apply(NVMatrixOps::Log(), logInL); NVMatrix predLogRatio; NVMatrix& coeff_a = predMapping.sliceRows(0, 1); NVMatrix& coeff_b = predMapping.sliceRows(1, 2); // printf("coeff_a mean %f \n", coeff_a.mean()); // printf("coeff_b mean %f \n", coeff_b.mean()); logInL.eltwiseMult(coeff_a, predLogRatio); predLogRatio.applyBinary(NVMatrixBinaryOps::Add(), coeff_b); predLogRatio.apply(NVMatrixOps::Exp(), predEnhGradMag); // printf("inGradMag min:%f max:%f\n",inGradMag.min(),inGradMag.max()); predEnhGradMag.applyBinary(NVMatrixBinaryOps::Multiply(), inGradMag); // printf("enhGradMag min:%f max:%f\n",enhGradMag.min(),enhGradMag.max()); predEnhGradMag.subtract(enhGradMag, getActs()); _costv.clear(); _costv.push_back(0.5 * getActs().norm2()); delete &coeff_a; delete &coeff_b; // printf("exit GradMagEnhanceCostLayer::fpropActs\n"); } } void GradMagEnhanceCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { // printf("GradMagEnhanceCostLayer::bpropActs tag1\n"); assert(inpIdx==0); NVMatrix& predMapping = *_inputs[0]; //shape: (2,num_imgs) NVMatrix& inL = *_inputs[1]; // shape: (1,num_imgs) NVMatrix& inGradMag = *_inputs[2]; // shape: (1,num_imgs) NVMatrix& enhGradMag = *_inputs[3]; // shape: (1,num_imgs) int num_imgs=predMapping.getNumCols(); //if(_prev[inpIdx]->getActsGrad().getNumRows()==0){ _prev[inpIdx]->getActsGrad().resize(2,num_imgs); _prev[inpIdx]->getActsGrad().setZero(); //} NVMatrix& prev_grad_coeff_a = _prev[inpIdx]->getActsGrad().sliceRows(0,1); NVMatrix& prev_grad_coeff_b = _prev[inpIdx]->getActsGrad().sliceRows(1,2); // printf("GradMagEnhanceCostLayer::bpropActs tag2\n"); NVMatrix grad_coeff_a,grad_coeff_b; getActs().eltwiseMult(predEnhGradMag,grad_coeff_b); grad_coeff_b.eltwiseMult(logInL,grad_coeff_a); // // printf("grad_coeff_a rows,cols:%d %d\n",grad_coeff_a.getNumRows(), // grad_coeff_a.getNumCols()); // printf("grad_coeff_b rows,cols:%d %d\n",grad_coeff_b.getNumRows(), // grad_coeff_b.getNumCols()); prev_grad_coeff_a.add(grad_coeff_a,scaleTargets,-_coeff); prev_grad_coeff_b.add(grad_coeff_b,scaleTargets,-_coeff); delete &prev_grad_coeff_a; delete &prev_grad_coeff_b; // printf("GradMagEnhanceCostLayer::bpropActs tag3\n"); }
597afaaae836421064900d9b1eafb2f8a1e6142d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> int main() { // get the range of stream priorities for this device int priority_high, priority_low; hipDeviceGetStreamPriorityRange(&priority_low, &priority_high); // create streams with highest and lowest available priorities hipStream_t st_high, st_low; hipStreamCreateWithPriority(&st_high, hipStreamNonBlocking, priority_high); hipStreamCreateWithPriority(&st_low, hipStreamNonBlocking, priority_low); printf("%d -- %d\n", priority_low, priority_high); }
597afaaae836421064900d9b1eafb2f8a1e6142d.cu
#include <stdio.h> #include <cuda.h> int main() { // get the range of stream priorities for this device int priority_high, priority_low; cudaDeviceGetStreamPriorityRange(&priority_low, &priority_high); // create streams with highest and lowest available priorities cudaStream_t st_high, st_low; cudaStreamCreateWithPriority(&st_high, cudaStreamNonBlocking, priority_high); cudaStreamCreateWithPriority(&st_low, cudaStreamNonBlocking, priority_low); printf("%d -- %d\n", priority_low, priority_high); }
312c75b9a516e5709878b45d60c284ff6f75cb97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void orcu_kernel14132(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) { const int tid=blockIdx.x*blockDim.x+threadIdx.x; const int gsize=gridDim.x*blockDim.x; double ysum; int j, k, col, row; for (int i=tid; i<=nrows-1; i+=gsize) { { ysum=0.0; for (j=0; j<=ndiags-1; j++ ) { row=i+j*sbdiag; col=(floor((float)i/ndofs)+offsets[j])*ndofs; if (col>=0&&col<nrows) for (k=0; k<=ndofs-1; k++ ) ysum=ysum+A[row+k*nrows]*x[col+k]; } y[i]=ysum; } } } void MatMult_SeqDIA(double* A, double* x, double* y, int M, int N, int P, int NOS, int DOF) { register int i,j,k; int col,row; double ysum; /*@ begin PerfTuning ( def performance_params { param TC[] = range(32,1025,32); param BC[] = range(14,105,14); param PL[] = [16,32,48]; } def input_params { param M[] = [64]; param N[] = [64]; param P[] = [64]; param NOS = 7; param DOF[] = range(1,17); constraint c1 = (M==N); constraint c2 = (N==P); } def input_vars { decl dynamic double A[M*N*P*DOF*DOF*NOS] = random; decl dynamic double x[M*N*P*DOF] = random; decl dynamic double y[M*N*P*DOF] = 0; decl static int offsets[NOS] = {-M*N*DOF,-M*DOF,-DOF,0,DOF,M*DOF,M*N*DOF}; } ) @*/ /**-- (Generated by Orio) Best performance cost: [4.1088300000000002, 4.0860799999999999, 4.0519999999999996, 4.0544599999999997, 4.0455399999999999] Tuned for specific problem sizes: DOF = 7 M = 64 N = 64 NOS = 7 P = 64 Best performance parameters: BC = 84 PL = 48 TC = 896 --**/ int nrows=M*N*P*DOF; int ndiags=NOS; int ndofs=DOF; int sbdiag=M*N*P*DOF*DOF; /*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL) for(i=0; i<=nrows-1; i++){ ysum = 0.0; for(j=0; j<=ndiags-1; j++){ row = i+j*sbdiag; col = (floor((float)i/ndofs)+offsets[j])*ndofs; if(col>=0&&col<nrows) for(k=0; k<=ndofs-1; k++) ysum += A[row+k*nrows] * x[col+k]; } y[i] = ysum; } ) @*/ { hipDeviceSynchronize(); /*declare variables*/ double *dev_A, *dev_x, *dev_y; int *dev_offsets; int nthreads=896; /*calculate device dimensions*/ dim3 dimGrid, dimBlock; dimBlock.x=nthreads; dimGrid.x=84; /*allocate device memory*/ hipMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double)); hipMalloc(&dev_x,M *N *P *DOF*sizeof(double)); hipMalloc(&dev_y,M *N *P *DOF*sizeof(double)); hipMalloc(&dev_offsets,NOS*sizeof(int)); hipDeviceSetCacheConfig(hipFuncCachePreferL1); /*copy data from host to device*/ hipEventRecord(tstart,0); hipMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_offsets,offsets,NOS*sizeof(int),hipMemcpyHostToDevice); hipEventRecord(tstop,0); hipEventSynchronize(tstop); hipEventElapsedTime(&orcu_transfer,tstart,tstop); hipEventRecord(start,0); /*invoke device kernel*/ hipLaunchKernelGGL(( orcu_kernel14132), dim3(dimGrid),dim3(dimBlock), 0, 0, nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&orcu_elapsed,start,stop); /*copy data from device to host*/ hipMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),hipMemcpyDeviceToHost); hipDeviceSetCacheConfig(hipFuncCachePreferNone); /*free allocated memory*/ hipFree(dev_A); hipFree(dev_x); hipFree(dev_y); hipFree(dev_offsets); hipError_t err=hipGetLastError(); if (hipSuccess!=err) printf("CUDA runtime error: %s@",hipGetErrorString(err)); } /*@ end @*/ /*@ end @*/ }
312c75b9a516e5709878b45d60c284ff6f75cb97.cu
__global__ void orcu_kernel14132(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) { const int tid=blockIdx.x*blockDim.x+threadIdx.x; const int gsize=gridDim.x*blockDim.x; double ysum; int j, k, col, row; for (int i=tid; i<=nrows-1; i+=gsize) { { ysum=0.0; for (j=0; j<=ndiags-1; j++ ) { row=i+j*sbdiag; col=(floor((float)i/ndofs)+offsets[j])*ndofs; if (col>=0&&col<nrows) for (k=0; k<=ndofs-1; k++ ) ysum=ysum+A[row+k*nrows]*x[col+k]; } y[i]=ysum; } } } void MatMult_SeqDIA(double* A, double* x, double* y, int M, int N, int P, int NOS, int DOF) { register int i,j,k; int col,row; double ysum; /*@ begin PerfTuning ( def performance_params { param TC[] = range(32,1025,32); param BC[] = range(14,105,14); param PL[] = [16,32,48]; } def input_params { param M[] = [64]; param N[] = [64]; param P[] = [64]; param NOS = 7; param DOF[] = range(1,17); constraint c1 = (M==N); constraint c2 = (N==P); } def input_vars { decl dynamic double A[M*N*P*DOF*DOF*NOS] = random; decl dynamic double x[M*N*P*DOF] = random; decl dynamic double y[M*N*P*DOF] = 0; decl static int offsets[NOS] = {-M*N*DOF,-M*DOF,-DOF,0,DOF,M*DOF,M*N*DOF}; } ) @*/ /**-- (Generated by Orio) Best performance cost: [4.1088300000000002, 4.0860799999999999, 4.0519999999999996, 4.0544599999999997, 4.0455399999999999] Tuned for specific problem sizes: DOF = 7 M = 64 N = 64 NOS = 7 P = 64 Best performance parameters: BC = 84 PL = 48 TC = 896 --**/ int nrows=M*N*P*DOF; int ndiags=NOS; int ndofs=DOF; int sbdiag=M*N*P*DOF*DOF; /*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL) for(i=0; i<=nrows-1; i++){ ysum = 0.0; for(j=0; j<=ndiags-1; j++){ row = i+j*sbdiag; col = (floor((float)i/ndofs)+offsets[j])*ndofs; if(col>=0&&col<nrows) for(k=0; k<=ndofs-1; k++) ysum += A[row+k*nrows] * x[col+k]; } y[i] = ysum; } ) @*/ { cudaDeviceSynchronize(); /*declare variables*/ double *dev_A, *dev_x, *dev_y; int *dev_offsets; int nthreads=896; /*calculate device dimensions*/ dim3 dimGrid, dimBlock; dimBlock.x=nthreads; dimGrid.x=84; /*allocate device memory*/ cudaMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double)); cudaMalloc(&dev_x,M *N *P *DOF*sizeof(double)); cudaMalloc(&dev_y,M *N *P *DOF*sizeof(double)); cudaMalloc(&dev_offsets,NOS*sizeof(int)); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); /*copy data from host to device*/ cudaEventRecord(tstart,0); cudaMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_offsets,offsets,NOS*sizeof(int),cudaMemcpyHostToDevice); cudaEventRecord(tstop,0); cudaEventSynchronize(tstop); cudaEventElapsedTime(&orcu_transfer,tstart,tstop); cudaEventRecord(start,0); /*invoke device kernel*/ orcu_kernel14132<<<dimGrid,dimBlock>>>(nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&orcu_elapsed,start,stop); /*copy data from device to host*/ cudaMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),cudaMemcpyDeviceToHost); cudaDeviceSetCacheConfig(cudaFuncCachePreferNone); /*free allocated memory*/ cudaFree(dev_A); cudaFree(dev_x); cudaFree(dev_y); cudaFree(dev_offsets); cudaError_t err=cudaGetLastError(); if (cudaSuccess!=err) printf("CUDA runtime error: %s@",cudaGetErrorString(err)); } /*@ end @*/ /*@ end @*/ }
7142eaf4f93b842c5e616b20754e85adaaac5360.hip
// !!! This is a file automatically generated by hipify!!! /** * Generate Uniformly-Distributed Random Numbers via the CUDA cuRAND Library on the NVIDIA GPU. */ #include <stdio.h> #include <math.h> #include <time.h> /** * NOTE that on Ubuntu, the below header files are generally located in: * /usr/local/cuda/include/ */ #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> /** * HOST: Handle the CUDA Errors for GPU. */ #define HCE( cuda_expr ) { assertGpuError( ( cuda_expr ), __FILE__, __LINE__ ); } inline void assertGpuError( const hipError_t error_index, const char *error_file, const unsigned error_line ) { if ( error_index != hipSuccess ) { fprintf( stderr, "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n", error_file, error_line, hipGetErrorString( error_index ) ); hipDeviceReset(); exit( EXIT_FAILURE ); } } /** * DEVICE: Set the RNG State for GPU. */ __global__ void devSetRngState( const unsigned total_num_threads, const unsigned rng_seed, hiprandState_t *dev_rgn_state ) { unsigned tidx = threadIdx.x + blockIdx.x * blockDim.x; while ( tidx < total_num_threads ) { hiprand_init( rng_seed, tidx, 0, dev_rgn_state + tidx ); tidx += blockDim.x * gridDim.x; } } /** * DEVICE: Generate Uniformly-Distributed Random Numbers for GPU. */ __global__ void devGenUniformRand( const unsigned total_num_threads, hiprandState_t *dev_rgn_state, float *dev_rand_samples ) { unsigned tidx = threadIdx.x + blockIdx.x * blockDim.x; while ( tidx < total_num_threads ) { *( dev_rand_samples + tidx ) = hiprand_uniform( dev_rgn_state + tidx ); tidx += blockDim.x * gridDim.x; } } /** * HOST: Calculate the Average Value for the One-Dimensional Numerical Array. */ float avg( const float *array, const unsigned array_length ) { float asum = 0.0; for ( unsigned ind_array = 0; ind_array < array_length; ind_array++ ) { asum += *( array + ind_array ); } return asum / array_length; } int main( void ) { printf("\n*******\n* Generate Uniformly-Distributed Random Numbers " "via the CUDA cuRAND Library on the NVIDIA GPU\n*******\n"); srand( ( unsigned ) time( NULL ) ); const unsigned NUM_BLOCKS_PER_GRID = 128; const unsigned NUM_THREADS_PER_BLOCK = 64; printf("\n*** sampling of random numbers on parallel threads ***\n"); hiprandState_t *dev_rgn_state; float *dev_rand_samples; float *rand_samples; for ( unsigned ind_num_threads = 1; ind_num_threads <= 1000000; ind_num_threads *= 10 ) { printf( "ind_num_threads = %u ---> ", ind_num_threads ); HCE( hipMalloc( ( hiprandState_t ** ) &dev_rgn_state, ind_num_threads * sizeof( hiprandState_t ) ) ); HCE( hipMalloc( ( float ** ) &dev_rand_samples, ind_num_threads * sizeof( float ) ) ); hipLaunchKernelGGL(( devSetRngState) , dim3(NUM_BLOCKS_PER_GRID), dim3(NUM_THREADS_PER_BLOCK) , 0, 0, ind_num_threads, ( unsigned ) rand(), dev_rgn_state ); HCE( hipPeekAtLastError() ); hipLaunchKernelGGL(( devGenUniformRand) , dim3(NUM_BLOCKS_PER_GRID), dim3(NUM_THREADS_PER_BLOCK) , 0, 0, ind_num_threads, dev_rgn_state, dev_rand_samples ); HCE( hipPeekAtLastError() ); rand_samples = ( float * ) malloc( ind_num_threads * sizeof( float ) ); HCE( hipMemcpy( rand_samples, dev_rand_samples, ind_num_threads * sizeof( float ), hipMemcpyDeviceToHost ) ); for ( unsigned ind_elem = 0; ind_elem < ind_num_threads; ind_elem++ ) { if ( rand_samples[ ind_elem ] > 1.0 || rand_samples[ ind_elem ] <= 0.0 ) { fprintf( stderr, "\n\n\nERROR >> cannot correctly generate random numbers.\n\n\n" ); } } printf( "rand_samples[%i] = %5.2lf, [%u] = %5.2lf, [%u] = %5.2lf && Avg = %7.5lf\n", 0, rand_samples[ 0 ], ind_num_threads / 2, rand_samples[ ind_num_threads / 2 ], ind_num_threads - 1, rand_samples[ ind_num_threads - 1 ], avg( rand_samples, ind_num_threads ) ); HCE( hipFree( dev_rgn_state ) ); HCE( hipFree( dev_rand_samples ) ); free( rand_samples ); } printf("\n*** sampling of random numbers on different iterations ***\n"); unsigned num_threads = 1000; hiprandState_t *dev_rgn_state2; float *dev_rand_samples2; float *rand_samples2; rand_samples2 = ( float * ) malloc( num_threads * sizeof( float ) ); HCE( hipMalloc( ( hiprandState_t ** ) &dev_rgn_state2, num_threads * sizeof( hiprandState_t ) ) ); HCE( hipMalloc( ( float ** ) &dev_rand_samples2, num_threads * sizeof( float ) )); hipLaunchKernelGGL(( devSetRngState) , dim3(NUM_BLOCKS_PER_GRID), dim3(NUM_THREADS_PER_BLOCK) , 0, 0, num_threads, ( unsigned ) rand(), dev_rgn_state2 ); HCE( hipPeekAtLastError() ); float avg_fst = 0.0, avg_half = 0.0, avg_end = 0.0; unsigned num_iter = 5000; for ( unsigned ind_iter = 1; ind_iter <= num_iter; ind_iter++ ) { hipLaunchKernelGGL(( devGenUniformRand) , dim3(NUM_BLOCKS_PER_GRID), dim3(NUM_THREADS_PER_BLOCK) , 0, 0, num_threads, dev_rgn_state2, dev_rand_samples2 ); HCE( hipPeekAtLastError() ); HCE( hipMemcpy( rand_samples2, dev_rand_samples2, num_threads * sizeof( float ), hipMemcpyDeviceToHost ) ); avg_fst += rand_samples2[ 0 ]; avg_half += rand_samples2[ num_threads / 2 ]; avg_end += rand_samples2[ num_threads - 1 ]; for ( unsigned ind_elem = 0; ind_elem < num_threads; ind_elem++ ) { if ( rand_samples2[ ind_elem ] > 1.0 || rand_samples2[ ind_elem ] <= 0.0 ) { fprintf( stderr, "\n\n\nERROR >> cannot correctly generate random numbers.\n\n\n" ); } } if ( ind_iter == 1 || ind_iter % 500 == 0 || ind_iter == num_iter ) { printf( "ind_iter = %u ---> rand_samples[%i] = %5.2lf, [%u] = %5.2lf, [%u] = %5.2lf\n", ind_iter, 0, rand_samples2[ 0 ], num_threads / 2, rand_samples2[ num_threads / 2 ], num_threads - 1, rand_samples2[ num_threads - 1 ]); } } printf("avg_fst = %5.2lf && avg_half = %5.2lf && avg_end = %5.2lf\n", avg_fst / num_iter, avg_half / num_iter, avg_end / num_iter ); HCE( hipFree( dev_rgn_state2 ) ); HCE( hipFree( dev_rand_samples2 ) ); free( rand_samples2 ); }
7142eaf4f93b842c5e616b20754e85adaaac5360.cu
/** * Generate Uniformly-Distributed Random Numbers via the CUDA cuRAND Library on the NVIDIA GPU. */ #include <stdio.h> #include <math.h> #include <time.h> /** * NOTE that on Ubuntu, the below header files are generally located in: * /usr/local/cuda/include/ */ #include <cuda_runtime.h> #include <curand_kernel.h> /** * HOST: Handle the CUDA Errors for GPU. */ #define HCE( cuda_expr ) { assertGpuError( ( cuda_expr ), __FILE__, __LINE__ ); } inline void assertGpuError( const cudaError_t error_index, const char *error_file, const unsigned error_line ) { if ( error_index != cudaSuccess ) { fprintf( stderr, "\n\n\n***\nCUDA ERROR :: %s [LINE %u] ---> %s.\n***\n\n\n", error_file, error_line, cudaGetErrorString( error_index ) ); cudaDeviceReset(); exit( EXIT_FAILURE ); } } /** * DEVICE: Set the RNG State for GPU. */ __global__ void devSetRngState( const unsigned total_num_threads, const unsigned rng_seed, curandState *dev_rgn_state ) { unsigned tidx = threadIdx.x + blockIdx.x * blockDim.x; while ( tidx < total_num_threads ) { curand_init( rng_seed, tidx, 0, dev_rgn_state + tidx ); tidx += blockDim.x * gridDim.x; } } /** * DEVICE: Generate Uniformly-Distributed Random Numbers for GPU. */ __global__ void devGenUniformRand( const unsigned total_num_threads, curandState *dev_rgn_state, float *dev_rand_samples ) { unsigned tidx = threadIdx.x + blockIdx.x * blockDim.x; while ( tidx < total_num_threads ) { *( dev_rand_samples + tidx ) = curand_uniform( dev_rgn_state + tidx ); tidx += blockDim.x * gridDim.x; } } /** * HOST: Calculate the Average Value for the One-Dimensional Numerical Array. */ float avg( const float *array, const unsigned array_length ) { float asum = 0.0; for ( unsigned ind_array = 0; ind_array < array_length; ind_array++ ) { asum += *( array + ind_array ); } return asum / array_length; } int main( void ) { printf("\n*******\n* Generate Uniformly-Distributed Random Numbers " "via the CUDA cuRAND Library on the NVIDIA GPU\n*******\n"); srand( ( unsigned ) time( NULL ) ); const unsigned NUM_BLOCKS_PER_GRID = 128; const unsigned NUM_THREADS_PER_BLOCK = 64; printf("\n*** sampling of random numbers on parallel threads ***\n"); curandState *dev_rgn_state; float *dev_rand_samples; float *rand_samples; for ( unsigned ind_num_threads = 1; ind_num_threads <= 1000000; ind_num_threads *= 10 ) { printf( "ind_num_threads = %u ---> ", ind_num_threads ); HCE( cudaMalloc( ( curandState ** ) &dev_rgn_state, ind_num_threads * sizeof( curandState ) ) ); HCE( cudaMalloc( ( float ** ) &dev_rand_samples, ind_num_threads * sizeof( float ) ) ); devSetRngState <<< NUM_BLOCKS_PER_GRID, NUM_THREADS_PER_BLOCK >>> ( ind_num_threads, ( unsigned ) rand(), dev_rgn_state ); HCE( cudaPeekAtLastError() ); devGenUniformRand <<< NUM_BLOCKS_PER_GRID, NUM_THREADS_PER_BLOCK >>> ( ind_num_threads, dev_rgn_state, dev_rand_samples ); HCE( cudaPeekAtLastError() ); rand_samples = ( float * ) malloc( ind_num_threads * sizeof( float ) ); HCE( cudaMemcpy( rand_samples, dev_rand_samples, ind_num_threads * sizeof( float ), cudaMemcpyDeviceToHost ) ); for ( unsigned ind_elem = 0; ind_elem < ind_num_threads; ind_elem++ ) { if ( rand_samples[ ind_elem ] > 1.0 || rand_samples[ ind_elem ] <= 0.0 ) { fprintf( stderr, "\n\n\nERROR >> cannot correctly generate random numbers.\n\n\n" ); } } printf( "rand_samples[%i] = %5.2lf, [%u] = %5.2lf, [%u] = %5.2lf && Avg = %7.5lf\n", 0, rand_samples[ 0 ], ind_num_threads / 2, rand_samples[ ind_num_threads / 2 ], ind_num_threads - 1, rand_samples[ ind_num_threads - 1 ], avg( rand_samples, ind_num_threads ) ); HCE( cudaFree( dev_rgn_state ) ); HCE( cudaFree( dev_rand_samples ) ); free( rand_samples ); } printf("\n*** sampling of random numbers on different iterations ***\n"); unsigned num_threads = 1000; curandState *dev_rgn_state2; float *dev_rand_samples2; float *rand_samples2; rand_samples2 = ( float * ) malloc( num_threads * sizeof( float ) ); HCE( cudaMalloc( ( curandState ** ) &dev_rgn_state2, num_threads * sizeof( curandState ) ) ); HCE( cudaMalloc( ( float ** ) &dev_rand_samples2, num_threads * sizeof( float ) )); devSetRngState <<< NUM_BLOCKS_PER_GRID, NUM_THREADS_PER_BLOCK >>> ( num_threads, ( unsigned ) rand(), dev_rgn_state2 ); HCE( cudaPeekAtLastError() ); float avg_fst = 0.0, avg_half = 0.0, avg_end = 0.0; unsigned num_iter = 5000; for ( unsigned ind_iter = 1; ind_iter <= num_iter; ind_iter++ ) { devGenUniformRand <<< NUM_BLOCKS_PER_GRID, NUM_THREADS_PER_BLOCK >>> ( num_threads, dev_rgn_state2, dev_rand_samples2 ); HCE( cudaPeekAtLastError() ); HCE( cudaMemcpy( rand_samples2, dev_rand_samples2, num_threads * sizeof( float ), cudaMemcpyDeviceToHost ) ); avg_fst += rand_samples2[ 0 ]; avg_half += rand_samples2[ num_threads / 2 ]; avg_end += rand_samples2[ num_threads - 1 ]; for ( unsigned ind_elem = 0; ind_elem < num_threads; ind_elem++ ) { if ( rand_samples2[ ind_elem ] > 1.0 || rand_samples2[ ind_elem ] <= 0.0 ) { fprintf( stderr, "\n\n\nERROR >> cannot correctly generate random numbers.\n\n\n" ); } } if ( ind_iter == 1 || ind_iter % 500 == 0 || ind_iter == num_iter ) { printf( "ind_iter = %u ---> rand_samples[%i] = %5.2lf, [%u] = %5.2lf, [%u] = %5.2lf\n", ind_iter, 0, rand_samples2[ 0 ], num_threads / 2, rand_samples2[ num_threads / 2 ], num_threads - 1, rand_samples2[ num_threads - 1 ]); } } printf("avg_fst = %5.2lf && avg_half = %5.2lf && avg_end = %5.2lf\n", avg_fst / num_iter, avg_half / num_iter, avg_end / num_iter ); HCE( cudaFree( dev_rgn_state2 ) ); HCE( cudaFree( dev_rand_samples2 ) ); free( rand_samples2 ); }
ca3cbf5f1f567c807fad28c12e44a9cad1d6838c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "defs.h" // Define your kernels in this file you may use more than one kernel if you // need to // INSERT KERNEL(S) HERE __global__ void exclusiveScan(unsigned int *out, unsigned int* in, unsigned int*sum, unsigned int inputSize) { __shared__ unsigned int temp[2 * BLOCK_SIZE]; int start = 2 * blockIdx.x * blockDim.x; int tx = threadIdx.x; int index = 0; if (start + tx < inputSize) { temp[tx] = in[start + tx]; } else { temp[tx] = 0; } if (start + tx + blockDim.x < inputSize) { temp[tx + blockDim.x] = in[start + tx + blockDim.x]; } else { temp[tx + blockDim.x] = 0; } __syncthreads(); // reduction step int stride = 1; while(stride <= blockDim.x) { index = (tx + 1) * 2 * stride - 1; if (index < (2 * blockDim.x)) { temp[index] += temp[index - stride]; } stride *= 2; __syncthreads(); } // first store the reduction sum in sum array // make it zero since it is exclusive scan if (tx == 0) { // sum array contains the prefix sum of each // 2*blockDim blocks of element.. if (sum != NULL) { sum[blockIdx.x] = temp[2*blockDim.x - 1]; } temp[2*blockDim.x - 1] = 0; } //wait for thread zero to write __syncthreads(); // post scan step stride = blockDim.x; index = 0; unsigned int var = 0; while(stride > 0) { index = (2 * stride * (tx + 1)) - 1; if (index < 2 * blockDim.x) { var = temp[index]; temp[index] += temp[index - stride]; temp[index-stride] = var; } stride >>= 1; __syncthreads(); } // now write the temp array to output if (start + tx < inputSize) { out[start + tx] = temp[tx]; } if(start + tx + blockDim.x < inputSize) { out[start + tx + blockDim.x] = temp[tx + blockDim.x]; } } __global__ void mergeScanBlocks(unsigned int *sum, unsigned int* output, int opSize) { int index = 2 * blockDim.x * blockIdx.x + threadIdx.x; if (index < opSize) { output[index] += sum[blockIdx.x]; } if (index + blockDim.x < opSize) { output[index + blockDim.x] += sum[blockIdx.x]; } } /****************************************************************************** Setup and invoke your kernel(s) in this function. You may also allocate more GPU memory if you need to *******************************************************************************/ void preScan(unsigned int *out, unsigned int *in, unsigned int in_size) { // INSERT CODE HERE hipError_t ret; unsigned int numBlocks1 = in_size / BLOCK_SIZE; if (in_size % BLOCK_SIZE) numBlocks1++; int numBlocks2 = numBlocks1 / 2; if(numBlocks1 % 2) numBlocks2++; dim3 dimThreadBlock; dimThreadBlock.x = BLOCK_SIZE; dimThreadBlock.y = 1; dimThreadBlock.z = 1; dim3 dimGrid; dimGrid.x = numBlocks2; dimGrid.y = 1; dimGrid.z = 1; unsigned int*sumArr_d = NULL; if (in_size > (2*BLOCK_SIZE)) { // we need the sum auxilarry array only if numblocks2 > 1 ret = hipMalloc((void**)&sumArr_d, numBlocks2 * sizeof(unsigned int)); if (ret != hipSuccess) FATAL("unable to create sum array"); } hipLaunchKernelGGL(( exclusiveScan), dim3(dimGrid), dim3(dimThreadBlock), 0, 0, out, in, sumArr_d, in_size); ret = hipDeviceSynchronize(); if (ret != hipSuccess) FATAL("unable to launch scan kernel"); if (in_size <= (2*BLOCK_SIZE)) { // out has proper exclusive scan. just return return; } else { // now we need to perform exclusive scan on the auxilliary sum array unsigned int *sumArr_scan_d; ret = hipMalloc((void**)&sumArr_scan_d, numBlocks2 * sizeof(unsigned int)); if (ret != hipSuccess) FATAL("unable to create sum scan array"); preScan(sumArr_scan_d, sumArr_d, numBlocks2); // sumAdd_scan_d now contains the exclusive scan op of individual blocks // now just do a one-one addition of blocks hipLaunchKernelGGL(( mergeScanBlocks), dim3(dimGrid),dim3(dimThreadBlock), 0, 0, sumArr_scan_d, out, in_size); ret = hipDeviceSynchronize(); if (ret != hipSuccess) FATAL("unable to launch merge scan kernel"); hipFree(sumArr_d); hipFree(sumArr_scan_d); } }
ca3cbf5f1f567c807fad28c12e44a9cad1d6838c.cu
#include "defs.h" // Define your kernels in this file you may use more than one kernel if you // need to // INSERT KERNEL(S) HERE __global__ void exclusiveScan(unsigned int *out, unsigned int* in, unsigned int*sum, unsigned int inputSize) { __shared__ unsigned int temp[2 * BLOCK_SIZE]; int start = 2 * blockIdx.x * blockDim.x; int tx = threadIdx.x; int index = 0; if (start + tx < inputSize) { temp[tx] = in[start + tx]; } else { temp[tx] = 0; } if (start + tx + blockDim.x < inputSize) { temp[tx + blockDim.x] = in[start + tx + blockDim.x]; } else { temp[tx + blockDim.x] = 0; } __syncthreads(); // reduction step int stride = 1; while(stride <= blockDim.x) { index = (tx + 1) * 2 * stride - 1; if (index < (2 * blockDim.x)) { temp[index] += temp[index - stride]; } stride *= 2; __syncthreads(); } // first store the reduction sum in sum array // make it zero since it is exclusive scan if (tx == 0) { // sum array contains the prefix sum of each // 2*blockDim blocks of element.. if (sum != NULL) { sum[blockIdx.x] = temp[2*blockDim.x - 1]; } temp[2*blockDim.x - 1] = 0; } //wait for thread zero to write __syncthreads(); // post scan step stride = blockDim.x; index = 0; unsigned int var = 0; while(stride > 0) { index = (2 * stride * (tx + 1)) - 1; if (index < 2 * blockDim.x) { var = temp[index]; temp[index] += temp[index - stride]; temp[index-stride] = var; } stride >>= 1; __syncthreads(); } // now write the temp array to output if (start + tx < inputSize) { out[start + tx] = temp[tx]; } if(start + tx + blockDim.x < inputSize) { out[start + tx + blockDim.x] = temp[tx + blockDim.x]; } } __global__ void mergeScanBlocks(unsigned int *sum, unsigned int* output, int opSize) { int index = 2 * blockDim.x * blockIdx.x + threadIdx.x; if (index < opSize) { output[index] += sum[blockIdx.x]; } if (index + blockDim.x < opSize) { output[index + blockDim.x] += sum[blockIdx.x]; } } /****************************************************************************** Setup and invoke your kernel(s) in this function. You may also allocate more GPU memory if you need to *******************************************************************************/ void preScan(unsigned int *out, unsigned int *in, unsigned int in_size) { // INSERT CODE HERE cudaError_t ret; unsigned int numBlocks1 = in_size / BLOCK_SIZE; if (in_size % BLOCK_SIZE) numBlocks1++; int numBlocks2 = numBlocks1 / 2; if(numBlocks1 % 2) numBlocks2++; dim3 dimThreadBlock; dimThreadBlock.x = BLOCK_SIZE; dimThreadBlock.y = 1; dimThreadBlock.z = 1; dim3 dimGrid; dimGrid.x = numBlocks2; dimGrid.y = 1; dimGrid.z = 1; unsigned int*sumArr_d = NULL; if (in_size > (2*BLOCK_SIZE)) { // we need the sum auxilarry array only if numblocks2 > 1 ret = cudaMalloc((void**)&sumArr_d, numBlocks2 * sizeof(unsigned int)); if (ret != cudaSuccess) FATAL("unable to create sum array"); } exclusiveScan<<<dimGrid, dimThreadBlock>>>(out, in, sumArr_d, in_size); ret = cudaDeviceSynchronize(); if (ret != cudaSuccess) FATAL("unable to launch scan kernel"); if (in_size <= (2*BLOCK_SIZE)) { // out has proper exclusive scan. just return return; } else { // now we need to perform exclusive scan on the auxilliary sum array unsigned int *sumArr_scan_d; ret = cudaMalloc((void**)&sumArr_scan_d, numBlocks2 * sizeof(unsigned int)); if (ret != cudaSuccess) FATAL("unable to create sum scan array"); preScan(sumArr_scan_d, sumArr_d, numBlocks2); // sumAdd_scan_d now contains the exclusive scan op of individual blocks // now just do a one-one addition of blocks mergeScanBlocks<<<dimGrid,dimThreadBlock>>>(sumArr_scan_d, out, in_size); ret = cudaDeviceSynchronize(); if (ret != cudaSuccess) FATAL("unable to launch merge scan kernel"); cudaFree(sumArr_d); cudaFree(sumArr_scan_d); } }
a9f8122e002d1b3b436c4db3a4a3db85346245d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/declarable/helpers/top_k.h> #include <helpers/MmulHelper.h> #include <array/NDArrayFactory.h> #include <graph/Status.h> #include <helpers/ConstantTadHelper.h> #include <helpers/ShapeUtils.h> //#include <ops/declarable/generic/helpers/BroadcastHelper.h> #include <cusolverDn.h> #include <exceptions/cuda_exception.h> namespace sd { namespace ops { namespace helpers { // ------------------------------------------------------------------------------------------------------------------ // // invert the second diagonal for lower diagonal matrix template<typename T> static __global__ void invertKernelLow(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { T* inverted = reinterpret_cast<T *>(invertedBuf); T* input = reinterpret_cast<T*>(inputBuf); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = start + 1; i < n; i += step) { Nd4jLong pos[] = {i, i - 1}; Nd4jLong posX[] = {i, i}; Nd4jLong posY[] = {i - 1, i - 1}; auto xIndex = shape::getOffset(inputShape, pos); auto dxIndex = shape::getOffset(inputShape, posX); auto dyIndex = shape::getOffset(inputShape, posY); auto zIndex = shape::getOffset(invertedShape, pos); // invert lower triangular matrix inverted[zIndex] = -input[xIndex] / (input[dxIndex] * input[dyIndex]); // math::atomics::nd4j_atomicAdd(&inverted[zIndex], - input[xIndex] * inverted[iIndex] / input[dIndex]); } } // ------------------------------------------------------------------------------------------------------------------ // // invert diagonal vals to upper diagonal matrix template<typename T> static __global__ void upvertKernel(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { T *inverted = reinterpret_cast<T *>(invertedBuf); T *input = reinterpret_cast<T *>(inputBuf); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = start; i < n; i += step) { Nd4jLong pos[] = {i, i}; auto xIndex = shape::getOffset(inputShape, pos); auto zIndex = shape::getOffset(invertedShape, pos); // math::atomics::nd4j_atomicDiv(&inverted[zIndex], input[xIndex]); // invert diagonal elements inverted[zIndex] /= input[xIndex]; } } // ------------------------------------------------------------------------------------------------------------------ // // invert upper second diagonal template<typename T> static __global__ void upvertKernelUp(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { __shared__ T* inverted; __shared__ T* input; if (threadIdx.x == 0) { inverted = reinterpret_cast<T *>(invertedBuf); input = reinterpret_cast<T *>(inputBuf); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = start; i < n - 1; i += step) { Nd4jLong pos[] = {i, i + 1}; Nd4jLong posX[] = {i + 1, i + 1}; auto xIndex = shape::getOffset(inputShape, pos); auto iIndex = shape::getOffset(invertedShape, posX); auto zIndex = shape::getOffset(invertedShape, pos); // invert upper matrix math::atomics::nd4j_atomicAdd(&inverted[zIndex], -input[xIndex] * inverted[iIndex]); // / input[yIndex]); //inputMatrix->t<T>(i, i + 1) * invertedMatrix->t<T>(i + 1, i + 1) / inputMatrix->t<T>(i, i) } } // ------------------------------------------------------------------------------------------------------------------ // template<typename T> static __global__ void invertLowKernel(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { T *inverted = reinterpret_cast<T *>(invertedBuf); T *input = reinterpret_cast<T *>(inputBuf); if (threadIdx.x == 0) { inverted = reinterpret_cast<T *>(invertedBuf); input = reinterpret_cast<T *>(inputBuf); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (int i = tid + 2; i < n; i += step) { for (int j = i - 2; j >= 0; --j) for (int k = 0; k < i; k++) { Nd4jLong posZ[] = {i, j}; Nd4jLong posY[] = {k, j}; Nd4jLong posX[] = {i, k}; Nd4jLong posD[] = {i, i}; auto xIndex = shape::getOffset(inputShape, posX); auto yIndex = shape::getOffset(invertedShape, posY); auto dIndex = shape::getOffset(inputShape, posD); auto zIndex = shape::getOffset(invertedShape, posZ); // invert non-diagonal elements math::atomics::nd4j_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex] / input[dIndex]); } } } // ------------------------------------------------------------------------------------------------------------------ // // Invertion of upper triangular matrix non-diagonal elements when main and second diagonals already processed template<typename T> static __global__ void invertUpKernel(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { __shared__ T* inverted; __shared__ T* input; if (threadIdx.x == 0) { inverted = reinterpret_cast<T *>(invertedBuf);; input = reinterpret_cast<T *>(inputBuf); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int i = (int)n - tid - 2; i >= 0; i -= step) { for (int j = i + 2; j < (int)n; j++) for (int k = i; k < (int)n; k++) { Nd4jLong posZ[] = {i, j}; Nd4jLong posY[] = {k, j}; Nd4jLong posX[] = {i, k}; // inversion with Joardan Gauss transformation auto xIndex = shape::getOffset(inputShape, posX); auto yIndex = shape::getOffset(invertedShape, posY); auto zIndex = shape::getOffset(invertedShape, posZ); // invert upper non-diagonal elements math::atomics::nd4j_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex]); } } } // ------------------------------------------------------------------------------------------------------------------ // // procedure to invert lower-triangular matrix. // In current case lower triangular matrix has main diagonal with general values // template<typename T> static void invertLowerMatrix_(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) { int n = inputMatrix->rows(); invertedMatrix->setIdentity(); if (inputMatrix->isIdentityMatrix()) return; auto stream = context->getCudaStream(); // invert lower matrix // invert main diagonal hipLaunchKernelGGL(( upvertKernel<T>), dim3(1), dim3(n), 512, *stream, invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); // invert the second diagonal hipLaunchKernelGGL(( invertKernelLow<T>), dim3(1), dim3(n), 512, *stream, invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); // invert non-diagonal elements hipLaunchKernelGGL(( invertLowKernel<T>), dim3(n), dim3(n), 512, *stream, invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); } // ------------------------------------------------------------------------------------------------------------------ // // caller for invert lower matrix routine void invertLowerMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) { NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix}); BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), invertLowerMatrix_, (context, inputMatrix, invertedMatrix), FLOAT_NATIVE); NDArray::registerSpecialUse({invertedMatrix}, {inputMatrix}); } // ------------------------------------------------------------------------------------------------------------------ // // procedure to invert upper-triangular matrix. // In current case upper triangular matrix has main diagonal with all ones on it. template<typename T> static void invertUpperMatrix_(LaunchContext *context, NDArray* inputMatrix, NDArray* invertedMatrix) { int n = inputMatrix->rows(); invertedMatrix->setIdentity(); auto stream = context->getCudaStream(); if (inputMatrix->isIdentityMatrix()) { // the inverse for I is I return; } // invert upper matrix // invert the second diagonal hipLaunchKernelGGL(( upvertKernelUp<T>), dim3(1), dim3(n), 512, *stream , invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); // invert other elements hipLaunchKernelGGL(( invertUpKernel<T>), dim3(n), dim3(n), 512, *stream , invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); } // ------------------------------------------------------------------------------------------------------------------ // // invertion of upper triangular matrix - runner routine void invertUpperMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) { NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix}); BUILD_SINGLE_SELECTOR(invertedMatrix->dataType(), invertUpperMatrix_, (context, inputMatrix, invertedMatrix), FLOAT_NATIVE); NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix}); } // ------------------------------------------------------------------------------------------------------------------ // // determinant kernel - accumulation product of all values on the main diagonal template<typename T> static __global__ void determinantKernel(T *compound, T *result, Nd4jLong len) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto i = start; i < len; i += step) { auto pos = i * len + i; //shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2); // multiply all diagonal elements math::atomics::nd4j_atomicMul(&result[0], compound[pos]); } } // ------------------------------------------------------------------------------------------------------------------ // // determinant logarithm - accumulation sum of all logarithm values on the main diagonal. All in logarithic values // should be positive template<typename T> static __global__ void determinantLogKernel(T *compound, T *result, Nd4jLong len) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto i = start; i < len; i += step) { auto pos = i * len + i; //shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2); // sum logs of all diagonal elements math::atomics::nd4j_atomicAdd(result, math::nd4j_log<T,T>(math::nd4j_abs(compound[pos]))); } } // ------------------------------------------------------------------------------------------------------------------ // // kernel to copy matrix with given shape to compound tensor with given pos // output - a N-D tensor buffer with rank not less than 2, input - 2D square n x n matrix with n = rowLen template<typename T, typename F> static __global__ void fillMatrix(void *output, Nd4jLong *outShape, void *input, Nd4jLong *inputShape, Nd4jLong pos, Nd4jLong rowLen) { __shared__ F *matrix; __shared__ T *inputBuf; __shared__ Nd4jLong inputLen; __shared__ Nd4jLong n2; if (threadIdx.x == 0) { matrix = reinterpret_cast<F*>(output); inputBuf = reinterpret_cast<T*>(input); inputLen = shape::length(inputShape); n2 = rowLen * rowLen; } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int k = pos + start, j = start; j < n2; k += step, j += step) { auto xIndex = shape::getIndexOffset(k, inputShape); matrix[j] = (F) inputBuf[xIndex]; } } // ------------------------------------------------------------------------------------------------------------------ // // same as above, but without type conversion template<typename T> static __global__ void returnMatrix(void *output, Nd4jLong *outputShape, void *input, Nd4jLong *inputShape, Nd4jLong pos, Nd4jLong rowLen) { __shared__ T* matrix; __shared__ T* outputBuf; __shared__ Nd4jLong outputLen; __shared__ Nd4jLong n2; if (threadIdx.x == 0) { matrix = reinterpret_cast<T *>(input); outputBuf = reinterpret_cast<T *>(output); outputLen = shape::length(inputShape); n2 = rowLen * rowLen; } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int k = pos + start, j = start; j < n2; k += step, j += step) { auto zIndex = shape::getIndexOffset(k, outputShape); outputBuf[zIndex] = matrix[j]; } } // ------------------------------------------------------------------------------------------------------------------ // // fill up permutaion matrix kernel. Permutation matrix filled with zeros and ones template<typename F> static __global__ void fillUpPermutation(void *output, Nd4jLong *shape, int *source, int rowNum) { F *permutation = reinterpret_cast<F *>(output); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto i = start; i < rowNum; i += step) { int val = source[i] - 1; Nd4jLong posF[] = {i, val}; auto pos = shape::getOffset(shape, posF); permutation[pos] = F(1.f); } } // ------------------------------------------------------------------------------------------------------------------ // // LUP decomposition runner - using CUBLAS SOLVER // if permutation is given, then using LUP decomposition, LU decomposition otherwise // L - lower triangular, U - upper triangular, P - permutation matricies // PA = LU // // input - A matrix nxn // compound - C matrix L + U - I, or main diagonal and lower - L matrix, from the 2nd diagonal - U matrix template<typename T, typename I> static void lup_(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) { auto stream = context->getCudaStream(); auto n = input->rows(); hipsolverDnHandle_t cusolverH = nullptr; // create solver handle cusolverStatus_t status = hipsolverDnCreate(&cusolverH); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("Cannot create cuSolver handle", status); } // set solver stream status = hipsolverDnSetStream(cusolverH, *stream); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("Cannot set up stream for cuda solver", status); } int lwork = 0; int *d_info = nullptr; // allocate memory for permutation vector auto err = hipMalloc((void **) &d_info, sizeof(int)); if (err) { throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver info buffer", err); } DataType dtype = input->dataType(); switch (dtype) { // there are two implementations with cublas for LUP decomposition - double and float case DataType::DOUBLE: { double *d_work = nullptr; // compute internal buffer size double *matrix = reinterpret_cast<double *>(input->specialBuffer()); status = hipsolverDnDgetrf_bufferSize( cusolverH, n, n, matrix, n, &lwork); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status); } err = hipMalloc((void **) &d_work, sizeof(float) * lwork); if (err) { throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", err); } if (permutation == nullptr) { status = hipsolverDnDgetrf( cusolverH, n, n, matrix, n, d_work, nullptr, d_info); if (status != CUSOLVER_STATUS_SUCCESS) { throw cuda_exception::build("helpers::lup_: LU factorization is failed due ", status); } } else { NDArray permutVector('c', {n}, sd::DataType::INT32, context); int* permutationBuf = permutVector.dataBuffer()->specialAsT<int>(); status = hipsolverDnDgetrf( cusolverH, n, n, matrix, n, d_work, permutationBuf, d_info); if (status != CUSOLVER_STATUS_SUCCESS) { throw cuda_exception::build("helpers::lup_: LU factorization is failed due ", status); } if (permutation->rankOf() == 2) { hipLaunchKernelGGL(( fillUpPermutation<double>) , dim3(n), dim3(n), 1024, *stream , permutation->specialBuffer(), permutation->specialShapeInfo(), permutationBuf, n); } else { permutVector.tickWriteDevice(); input->tickWriteDevice(); compound->assign(input); permutation->assign(permutVector); } } err = hipFree(d_work); if (err) { throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", err); } } break; case DataType::FLOAT32: { float *matrix = reinterpret_cast<float*>(input->specialBuffer()); float *d_work = nullptr; status = hipsolverDnSgetrf_bufferSize( cusolverH, n, n, matrix, n, &lwork); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status); } err = hipMalloc((void **) &d_work, sizeof(float) * lwork); if (err) { throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", err); } if (permutation == nullptr) status = hipsolverDnSgetrf( cusolverH, n, n, matrix, n, d_work, nullptr, d_info); else { NDArray permutVector('c', {n}, DataType::INT32, context); int *permutationBuf = reinterpret_cast<int *>(permutVector.specialBuffer()); status = hipsolverDnSgetrf( cusolverH, n, n, matrix, n, d_work, permutationBuf, d_info); if (permutation->rankOf() == 2) { hipLaunchKernelGGL(( fillUpPermutation<I>) , dim3(n), dim3(n), 128, *stream , permutation->specialBuffer(), permutation->specialShapeInfo(), permutationBuf, n); permutation->tickWriteDevice(); } else { input->tickWriteDevice(); compound->assign(input); permutation->assign(permutVector); } } err = hipFree(d_work); if (err) { throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", err); } } } if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::lup_: Cannot make LU decomposition", status); } err = hipFree(d_info); if (err) { throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver info buffer", err); } hipsolverDnDestroy(cusolverH); // NDArray::registerSpecialUse({input}, {input}); input->tickWriteDevice(); } // ------------------------------------------------------------------------------------------------------------------ // BUILD_DOUBLE_TEMPLATE(template void lup_,(LaunchContext * context, NDArray * input, NDArray * output, NDArray * permutation), FLOAT_NATIVE, INDEXING_TYPES); template <typename T> static __device__ void swapRows(T* matrix, Nd4jLong* shape, Nd4jLong theFirst, Nd4jLong theSecond, Nd4jLong n) { if (theFirst != theSecond) { for (auto i = 0; i < n; i++) { Nd4jLong theFirstPos[] = {theFirst, i}; Nd4jLong theSecondPos[] = {theSecond, i}; auto theFirstIndex = shape::getOffset(shape, theFirstPos, 0); auto theSecondIndex = shape::getOffset(shape, theSecondPos, 0); math::nd4j_swap(matrix[theFirstIndex], matrix[theSecondIndex]); } } } template <typename T> static __device__ void processColumns(Nd4jLong currentRow, Nd4jLong rowNum, T* compoundBuf, Nd4jLong* compoundShape) { Nd4jLong xDiag[] = {currentRow, currentRow}; auto diagIndex = shape::getOffset(compoundShape, xDiag, 0); for (auto j = currentRow + 1; j < rowNum; j++) { Nd4jLong xRow[] = {j, currentRow}; auto rowIndex = shape::getOffset(compoundShape, xRow, 0); compoundBuf[rowIndex] /= compoundBuf[diagIndex]; //output->t<T>(i, i); for (auto k = currentRow + 1; k < rowNum; k++) { Nd4jLong yRow[] = {j, k}; Nd4jLong yCol[] = {currentRow, k}; auto rowIndexY = shape::getOffset(compoundShape, yRow, 0); auto colIndex = shape::getOffset(compoundShape, yCol, 0); compoundBuf[rowIndexY] -= compoundBuf[rowIndex] * compoundBuf[colIndex]; } } } template <typename T> __device__ Nd4jLong argmaxCol(Nd4jLong column, T* compoundBuffer, Nd4jLong* compoundShape) { auto rowNum = shape::sizeAt(compoundShape, 0); Nd4jLong xInitial[] = {column, column}; auto xInitialIndex = shape::getOffset(compoundShape, xInitial, 0); auto maxValue = T(0); //sd::math::nd4j_abs(compoundBuffer[xInitialIndex]); auto result = -1LL; for (auto rowCounter = column; rowCounter < rowNum; rowCounter++) { Nd4jLong xPos[] = {rowCounter, column}; auto xIndex = shape::getOffset(compoundShape, xPos, 0); if (sd::math::nd4j_abs(compoundBuffer[xIndex]) > maxValue) { maxValue = sd::math::nd4j_max(maxValue, sd::math::nd4j_abs(compoundBuffer[xIndex])); result = rowCounter; } } return result; } template <typename T, typename I> static __device__ int luNN(T* matrix, Nd4jLong* shape, I* permutation, Nd4jLong* permuShape, Nd4jLong n) { for (auto i = 0; i < n - 1; i++) { auto pivotIndex = argmaxCol(i, matrix, shape); if (pivotIndex < 0) { return -1;//throw std::runtime_error("helpers::luNN_: input matrix is singular."); } math::nd4j_swap(permutation[shape::getIndexOffset(i, permuShape)], permutation[shape::getIndexOffset(pivotIndex, permuShape)]); swapRows(matrix, shape, (Nd4jLong)i, pivotIndex, n); processColumns(i, n, matrix, shape); } return 0; } template <typename T, typename I> static __global__ void luBatchedKernel(T* outputBuf, Nd4jLong* outputShape, I* permutations, Nd4jLong* permuShape, Nd4jLong* outputTadShape, Nd4jLong* outputTadOffsets, Nd4jLong* permuTadShape, Nd4jLong* permuTadOffsets, Nd4jLong batchNum) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto b = start; b < batchNum; b += step) { T* matrix = outputBuf + outputTadOffsets[b]; I* permutation = permutations + permuTadOffsets[b]; if (0 != luNN(matrix, outputTadShape, permutation, permuTadShape, shape::length(permuTadShape))) break; } } template <typename T, typename I> static void lu_(LaunchContext * context, NDArray* input, NDArray* output, NDArray* permutationVectors) { auto n = input->sizeAt(-1); auto stream = context->getCudaStream(); NDArray iota('c', {n}, permutationVectors->dataType(), context);// = NDArrayFactory::create(); // <int>('c', {n}); iota.linspace(0); iota.syncToDevice(); output->assign(input); // fill up output tensor with zeros // output->tickWriteDevice(); permutationVectors->applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), iota, *permutationVectors, true, nullptr); // permutationVectors->tickWriteDevice(); auto tads = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {-2, -1}); auto permutaionTads = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {-1}); auto batchNum = tads.numberOfTads(); hipLaunchKernelGGL(( luBatchedKernel<T,I>), dim3(batchNum), dim3(256), 1024, *stream, reinterpret_cast<T*>(output->platformBuffer()), output->specialShapeInfo(), reinterpret_cast<I*>(permutationVectors->platformBuffer()), permutationVectors->specialShapeInfo(), tads.specialShapeInfo(), tads.specialOffsets(), permutaionTads.specialShapeInfo(), permutaionTads.specialOffsets(), batchNum); } void lu(LaunchContext* context, NDArray* input, NDArray* output, NDArray* permutations) { NDArray::prepareSpecialUse({output, permutations}, {input}); BUILD_DOUBLE_SELECTOR(input->dataType(), permutations->dataType(), lu_, (context, input, output, permutations), FLOAT_NATIVE, INDEXING_TYPES); NDArray::registerSpecialUse({output, permutations}, {input}); } // ------------------------------------------------------------------------------------------------------------------ // template<typename T> static int determinant_(sd::LaunchContext *context, NDArray *input, NDArray *output) { Nd4jLong n = input->sizeAt(-1); Nd4jLong n2 = n * n; std::vector<int> dims(); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {input->rankOf() - 2, input->rankOf() - 1}); //auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {output->rankOf() - 1}); // DataType dtype = input->dataType(); // if (dtype != DataType::DOUBLE) // dtype = DataType::FLOAT32; auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, DataTypeUtils::fromT<T>(), context); //, block.getWorkspace()); auto det = NDArrayFactory::create<T>(1, context); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input}); dim3 launchDims(256, 256, 1024); output->assign(1.f); for (int e = 0; e < output->lengthOf(); e++) { Nd4jLong pos = e * n2; // if (matrix.dataType() == input->dataType()) hipLaunchKernelGGL(( fillMatrix<T, T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); // else // fillMatrix<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); lup_<T, int>(context, &matrix, nullptr, nullptr); // else // lup_<float>(context, &matrix, nullptr, nullptr); auto offset = shape::getIndexOffset(e, output->shapeInfo()); auto inputBuf = reinterpret_cast<T *>(matrix.specialBuffer()); auto outputBuf = reinterpret_cast<T *>(output->specialBuffer()) + offset; // if (matrix.dataType() == input->dataType()) determinantKernel<T> << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (inputBuf, outputBuf, n); // else // determinantKernel<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream >>> (inputBuf, outputBuf, n); } NDArray::registerSpecialUse({output}, {input}); return Status::OK(); } int determinant(sd::LaunchContext *context, NDArray *input, NDArray *output) { NDArray::prepareSpecialUse({output}, {input}); BUILD_SINGLE_SELECTOR(input->dataType(), return determinant_, (context, input, output), FLOAT_NATIVE); NDArray::registerSpecialUse({output}, {input}); } template<typename T> int logAbsDeterminant_(LaunchContext *context, NDArray *input, NDArray *output) { Nd4jLong n = input->sizeAt(-1); Nd4jLong n2 = n * n; std::vector<int> dims(); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {input->rankOf() - 2, input->rankOf() - 1}); //auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {output->rankOf() - 1}); DataType dtype = input->dataType(); if (dtype != DataType::DOUBLE) dtype = DataType::FLOAT32; auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, dtype, context); //, block.getWorkspace()); auto det = NDArrayFactory::create<T>(1, context); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input}); dim3 launchDims(256, 256, 1024); output->assign(0.f); for (int e = 0; e < output->lengthOf(); e++) { Nd4jLong pos = e * n2; // if (matrix.dataType() == input->dataType()) hipLaunchKernelGGL(( fillMatrix<T, T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); // else // fillMatrix<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); // if (matrix.dataType() == input->dataType()) lup_<T, int>(context, &matrix, nullptr, nullptr); // else // lup_<float>(context, &matrix, nullptr, nullptr); auto offset = shape::getIndexOffset(e, output->shapeInfo()); auto inputBuf = reinterpret_cast<T *>(matrix.specialBuffer()); auto outputBuf = reinterpret_cast<T *>(output->specialBuffer()) + offset; // if (matrix.dataType() == input->dataType()) hipLaunchKernelGGL(( determinantLogKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, inputBuf, outputBuf, n); // else // determinantLogKernel<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream >>> (inputBuf, outputBuf, n); } NDArray::registerSpecialUse({output}, {input}); return Status::OK(); return ND4J_STATUS_OK; } int logAbsDeterminant(sd::LaunchContext *context, NDArray *input, NDArray *output) { NDArray::prepareSpecialUse({output}, {input}); BUILD_SINGLE_SELECTOR(input->dataType(), return logAbsDeterminant_, (context, input, output), FLOAT_NATIVE); NDArray::registerSpecialUse({output}, {input}); } template<typename T> static __global__ void fillLowerUpperKernel(void *lowerBuf, Nd4jLong *lowerShape, void *upperBuf, Nd4jLong *upperShape, void *matrixBuf, Nd4jLong *matrixShape, Nd4jLong n) { __shared__ T *lowerMatrix; __shared__ T *upperMatrix; __shared__ T *matrix; if (threadIdx.x == 0) { lowerMatrix = reinterpret_cast<T *>(lowerBuf); upperMatrix = reinterpret_cast<T *>(upperBuf); matrix = reinterpret_cast<T *>(matrixBuf); } __syncthreads(); for (int k = blockIdx.x; k < n; k += gridDim.x) { // and then put all values under main diagonal on to it for (int j = threadIdx.x; j < n; j += blockDim.x) { Nd4jLong posX[] = {k, j}; Nd4jLong posD[] = {j, j}; auto xPos = shape::getOffset(lowerShape, posX); auto yPos = shape::getOffset(upperShape, posX); auto iPos = shape::getOffset(matrixShape, posX); auto dPos = shape::getOffset(matrixShape, posD); if (k >= j) lowerMatrix[xPos] = matrix[iPos];//(k, j); else upperMatrix[yPos] = matrix[iPos]; //k, j); } } } template<typename T> static int inverse_(sd::LaunchContext *context, NDArray *input, NDArray *output) { auto n = input->sizeAt(-1); auto n2 = n * n; auto dtype = DataTypeUtils::fromT<T>(); //input->dataType(); // if (dtype != DataType::DOUBLE) // dtype = DataType::FLOAT32; NDArray matrix = NDArrayFactory::create('c', {n, n}, dtype, context); NDArray upper = NDArrayFactory::create('c', {n, n}, dtype, context); NDArray lower = NDArrayFactory::create('c', {n, n}, dtype, context); NDArray compound = NDArrayFactory::create('c', {n, n}, dtype, context); NDArray permutation = NDArrayFactory::create('c', {n, n}, dtype, context); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {input->rankOf() - 2, input->rankOf() - 1}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), {output->rankOf() - 2, output->rankOf() - 1}); auto stream = context->getCudaStream(); for (auto i = 0LL; i < packX.numberOfTads(); i++) { hipLaunchKernelGGL(( fillMatrix<T, T>), dim3(1), dim3(n2), 1024, *stream, matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), i * n2, n); matrix.tickWriteDevice(); //compound.assign(matrix); // if (matrix.dataType() == input->dataType()) lup_<T, int>(context, &matrix, nullptr, nullptr); hipLaunchKernelGGL(( fillLowerUpperKernel<T>), dim3(n), dim3(n), 1024, *stream, lower.specialBuffer(), lower.specialShapeInfo(), upper.specialBuffer(), upper.specialShapeInfo(), matrix.specialBuffer(), matrix.specialShapeInfo(), n); lower.tickWriteDevice(); upper.tickWriteDevice(); // lower.printIndexedBuffer("LOWER"); // upper.printIndexedBuffer("UPPER"); matrix.assign(0); invertUpperMatrix(context, &upper, &matrix); // U^{-1} matrix.tickWriteDevice(); // matrix.printIndexedBuffer("Upper Inverted"); compound.assign(0); invertLowerMatrix(context, &lower, &compound); // L{-1} compound.tickWriteDevice(); // compound.printIndexedBuffer("Lower Inverted"); // matrix.tickWriteDevice(); // compound.tickWriteDevice(); sd::MmulHelper::mmul(&matrix, &compound, &upper, 1.0, 0.0); upper.tickWriteDevice(); // upper.printIndexedBuffer("Full inverted"); hipLaunchKernelGGL(( returnMatrix<T>) , dim3(1), dim3(n2), 1024, *stream, output->specialBuffer(), output->specialShapeInfo(), upper.specialBuffer(), upper.specialShapeInfo(), i * n2, n); } return Status::OK(); } int inverse(sd::LaunchContext *context, NDArray *input, NDArray *output) { NDArray::prepareSpecialUse({output}, {input}); BUILD_SINGLE_SELECTOR(input->dataType(), return inverse_, (context, input, output), FLOAT_NATIVE); NDArray::registerSpecialUse({output}, {input}); } bool checkCholeskyInput(sd::LaunchContext *context, NDArray const *input) { return true; } template<typename F> __global__ void fillBatchKernel(F **dArrayBatch, F *buf, Nd4jLong *offsets, Nd4jLong batchSize) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto i = start; i < batchSize; i += step) { dArrayBatch[i] = buf + offsets[i]; } } template<typename F> __global__ void adjustResultsKernel(F *dArray, Nd4jLong *shape, Nd4jLong *offsets, Nd4jLong batchSize, Nd4jLong n) { //auto i = blockIdx.x * blockDim.x + threadIdx.x; Nd4jLong *shapeOf = shape::shapeOf(shape); Nd4jLong *strideOf = shape::stride(shape); for (auto i = blockIdx.x; i < batchSize; i += gridDim.x) { auto current = dArray + offsets[i]; for (auto r = threadIdx.x; r < n; r += blockDim.x) { for (auto c = r + 1; c < n; c++) { Nd4jLong posRC[] = {r, c}; auto pos = r * n + c; //shape::getOffset(0, shapeOf, strideOf, posRC, 2); current[pos] = 0.; } } } } template<typename F> int cholesky__(LaunchContext *context, NDArray *input, NDArray *output, bool inplace) { if (!inplace) output->assign(input); auto tempOutput =output->dup(); hipsolverDnHandle_t handle = nullptr; auto n = input->sizeAt(-1); auto n2 = n * n; NDArray::prepareSpecialUse({output}, {input}); auto status = hipsolverDnCreate(&handle); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::cholesky_: Cannot create solver handle", status); } F **dArrayBatch = nullptr; auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempOutput.getShapeInfo(), {tempOutput.rankOf() - 2, tempOutput.rankOf() - 1}); const Nd4jLong batchSize = packX.numberOfTads(); int *dInfoArray = nullptr; auto err = hipMalloc((void **) &dArrayBatch, sizeof(F *) * batchSize); if (err) { throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver batch data buffer", err); } err = hipMalloc((void **) &dInfoArray, sizeof(int) * batchSize); if (err) { throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver errors buffer", err); } auto stream = context->getCudaStream(); fillBatchKernel<F> << < 1, batchSize, 128, *stream >> > (dArrayBatch, reinterpret_cast<F *>(tempOutput.specialBuffer()), packX.specialOffsets(), batchSize); status = hipsolverDnSetStream(handle, *stream); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::cholesky_: Cannot set stream to solver handle", status); } const hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER; if (input->dataType() == DataType::DOUBLE) status = hipsolverDnDpotrfBatched( handle, uplo, n, (double **) dArrayBatch, n, dInfoArray, batchSize); else status = hipsolverDnSpotrfBatched( handle, uplo, n, (float **) dArrayBatch, n, dInfoArray, batchSize); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::cholesky_: Cholesky factorization failed for batch", status); } adjustResultsKernel<F> << < batchSize, n2, 128, *stream >> > (reinterpret_cast<F *>(tempOutput.specialBuffer()), packX.specialShapeInfo(), packX.specialOffsets(), batchSize, n); err = hipFree(dArrayBatch); if (err) { throw cuda_exception::build("helpers::cholesky_: Cannot deallocate memory for solver batch data buffer", err); } err = hipFree(dInfoArray); if (err) { throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver errors buffer", err); } if (!inplace) output->assign(tempOutput); else input->assign(tempOutput); NDArray::registerSpecialUse({output}, {input}); return Status::OK(); } // template <typename T> int cholesky_(LaunchContext *context, NDArray *input, NDArray *output, bool inplace) { NDArray::prepareSpecialUse({output}, {input}); if (input->dataType() == DataType::DOUBLE) cholesky__<double>(context, input, output, inplace); else if (input->dataType() == DataType::FLOAT32) cholesky__<float>(context, input, output, inplace); else { std::unique_ptr<NDArray> tempOutput( NDArrayFactory::create_('c', input->getShapeAsVector(), DataType::FLOAT32, context)); tempOutput->assign(input); cholesky__<float>(context, tempOutput.get(), tempOutput.get(), true); output->assign(tempOutput.get()); } NDArray::registerSpecialUse({output}, {input}); return Status::OK(); } int cholesky(sd::LaunchContext *context, NDArray *input, NDArray *output, bool inplace) { // BUILD_SINGLE_SELECTOR(input->dataType(), return cholesky_, (context, input, output, inplace), FLOAT_TYPES); return cholesky_(context, input, output, inplace); } // BUILD_SINGLE_TEMPLATE(template int cholesky_, (LaunchContext* context, NDArray* input, NDArray* output, bool inplace), FLOAT_TYPES); BUILD_SINGLE_TEMPLATE(template int inverse_, (sd::LaunchContext * context, NDArray * input, NDArray * output), FLOAT_NATIVE); template<typename T> __global__ void logDetKernel(T *inputBuf, Nd4jLong *inputShape, Nd4jLong batchNum, Nd4jLong *tadShape, Nd4jLong *tadOffsets, T *outputBuf, Nd4jLong *outputShape) { __shared__ int n; if (threadIdx.x == 0) { n = shape::sizeAt(inputShape, -1); // * shape::sizeAt(inputShape, -1); } __syncthreads(); T *output = outputBuf; T *input = inputBuf; for (auto i = blockIdx.x; i < batchNum; i += gridDim.x) { T *current = input + tadOffsets[i]; auto zIndex = shape::getIndexOffset(i, outputShape); for (auto e = threadIdx.x; e < n; e += blockDim.x) { Nd4jLong diag[] = {e, e}; auto xIndex = shape::getOffset(tadShape, diag); math::atomics::nd4j_atomicAdd(&output[zIndex],math::nd4j_log<T, T>(current[xIndex] * current[xIndex])); } } } template<typename T> int logdetFunctor_(sd::LaunchContext *context, NDArray *input, NDArray *output) { NDArray::prepareSpecialUse({output}, {input}); auto n2 = input->sizeAt(-1) * input->sizeAt(-2); auto stream = context->getCudaStream(); NDArray tempOutput(*input); cholesky(context, input, &tempOutput, false); auto outputBuf = output->dataBuffer()->specialAsT<T>(); //reinterpret_cast<T*>(output->specialBuffer()); // + e * n2; // + e * n2; auto inputBuf = tempOutput.dataBuffer()->specialAsT<T>(); //reinterpret_cast<T*>(tempOutput.specialBuffer()); output->nullify(); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempOutput.getShapeInfo(), {tempOutput.rankOf() - 2, tempOutput.rankOf() - 1}); hipLaunchKernelGGL(( logDetKernel<T>) , dim3(128), dim3(512), 256, *stream, inputBuf, tempOutput.specialShapeInfo(), packX.numberOfTads(), packX.specialShapeInfo(), packX.specialOffsets(), outputBuf, output->specialShapeInfo()); output->tickWriteDevice(); NDArray::registerSpecialUse({output}, {input}); return Status::OK(); } int logdetFunctor(sd::LaunchContext *context, NDArray *input, NDArray *output) { BUILD_SINGLE_SELECTOR(output->dataType(), return logdetFunctor_, (context, input, output), FLOAT_NATIVE); } /* * lup - batched input, batched outputs * */ int lup(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) { BUILD_DOUBLE_SELECTOR(input->dataType(), permutation->dataType(), lup_,(context, input, compound, permutation), FLOAT_NATIVE, INDEXING_TYPES); return Status::OK(); } // BUILD_SINGLE_TEMPLATE(template int logdetFunctor_, // (sd::LaunchContext * context, NDArray * input, NDArray * output), FLOAT_NATIVE); } } }
a9f8122e002d1b3b436c4db3a4a3db85346245d7.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/declarable/helpers/top_k.h> #include <helpers/MmulHelper.h> #include <array/NDArrayFactory.h> #include <graph/Status.h> #include <helpers/ConstantTadHelper.h> #include <helpers/ShapeUtils.h> //#include <ops/declarable/generic/helpers/BroadcastHelper.h> #include <cusolverDn.h> #include <exceptions/cuda_exception.h> namespace sd { namespace ops { namespace helpers { // ------------------------------------------------------------------------------------------------------------------ // // invert the second diagonal for lower diagonal matrix template<typename T> static __global__ void invertKernelLow(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { T* inverted = reinterpret_cast<T *>(invertedBuf); T* input = reinterpret_cast<T*>(inputBuf); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = start + 1; i < n; i += step) { Nd4jLong pos[] = {i, i - 1}; Nd4jLong posX[] = {i, i}; Nd4jLong posY[] = {i - 1, i - 1}; auto xIndex = shape::getOffset(inputShape, pos); auto dxIndex = shape::getOffset(inputShape, posX); auto dyIndex = shape::getOffset(inputShape, posY); auto zIndex = shape::getOffset(invertedShape, pos); // invert lower triangular matrix inverted[zIndex] = -input[xIndex] / (input[dxIndex] * input[dyIndex]); // math::atomics::nd4j_atomicAdd(&inverted[zIndex], - input[xIndex] * inverted[iIndex] / input[dIndex]); } } // ------------------------------------------------------------------------------------------------------------------ // // invert diagonal vals to upper diagonal matrix template<typename T> static __global__ void upvertKernel(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { T *inverted = reinterpret_cast<T *>(invertedBuf); T *input = reinterpret_cast<T *>(inputBuf); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = start; i < n; i += step) { Nd4jLong pos[] = {i, i}; auto xIndex = shape::getOffset(inputShape, pos); auto zIndex = shape::getOffset(invertedShape, pos); // math::atomics::nd4j_atomicDiv(&inverted[zIndex], input[xIndex]); // invert diagonal elements inverted[zIndex] /= input[xIndex]; } } // ------------------------------------------------------------------------------------------------------------------ // // invert upper second diagonal template<typename T> static __global__ void upvertKernelUp(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { __shared__ T* inverted; __shared__ T* input; if (threadIdx.x == 0) { inverted = reinterpret_cast<T *>(invertedBuf); input = reinterpret_cast<T *>(inputBuf); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = start; i < n - 1; i += step) { Nd4jLong pos[] = {i, i + 1}; Nd4jLong posX[] = {i + 1, i + 1}; auto xIndex = shape::getOffset(inputShape, pos); auto iIndex = shape::getOffset(invertedShape, posX); auto zIndex = shape::getOffset(invertedShape, pos); // invert upper matrix math::atomics::nd4j_atomicAdd(&inverted[zIndex], -input[xIndex] * inverted[iIndex]); // / input[yIndex]); //inputMatrix->t<T>(i, i + 1) * invertedMatrix->t<T>(i + 1, i + 1) / inputMatrix->t<T>(i, i) } } // ------------------------------------------------------------------------------------------------------------------ // template<typename T> static __global__ void invertLowKernel(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { T *inverted = reinterpret_cast<T *>(invertedBuf); T *input = reinterpret_cast<T *>(inputBuf); if (threadIdx.x == 0) { inverted = reinterpret_cast<T *>(invertedBuf); input = reinterpret_cast<T *>(inputBuf); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (int i = tid + 2; i < n; i += step) { for (int j = i - 2; j >= 0; --j) for (int k = 0; k < i; k++) { Nd4jLong posZ[] = {i, j}; Nd4jLong posY[] = {k, j}; Nd4jLong posX[] = {i, k}; Nd4jLong posD[] = {i, i}; auto xIndex = shape::getOffset(inputShape, posX); auto yIndex = shape::getOffset(invertedShape, posY); auto dIndex = shape::getOffset(inputShape, posD); auto zIndex = shape::getOffset(invertedShape, posZ); // invert non-diagonal elements math::atomics::nd4j_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex] / input[dIndex]); } } } // ------------------------------------------------------------------------------------------------------------------ // // Invertion of upper triangular matrix non-diagonal elements when main and second diagonals already processed template<typename T> static __global__ void invertUpKernel(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { __shared__ T* inverted; __shared__ T* input; if (threadIdx.x == 0) { inverted = reinterpret_cast<T *>(invertedBuf);; input = reinterpret_cast<T *>(inputBuf); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int i = (int)n - tid - 2; i >= 0; i -= step) { for (int j = i + 2; j < (int)n; j++) for (int k = i; k < (int)n; k++) { Nd4jLong posZ[] = {i, j}; Nd4jLong posY[] = {k, j}; Nd4jLong posX[] = {i, k}; // inversion with Joardan Gauss transformation auto xIndex = shape::getOffset(inputShape, posX); auto yIndex = shape::getOffset(invertedShape, posY); auto zIndex = shape::getOffset(invertedShape, posZ); // invert upper non-diagonal elements math::atomics::nd4j_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex]); } } } // ------------------------------------------------------------------------------------------------------------------ // // procedure to invert lower-triangular matrix. // In current case lower triangular matrix has main diagonal with general values // template<typename T> static void invertLowerMatrix_(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) { int n = inputMatrix->rows(); invertedMatrix->setIdentity(); if (inputMatrix->isIdentityMatrix()) return; auto stream = context->getCudaStream(); // invert lower matrix // invert main diagonal upvertKernel<T><<<1, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); // invert the second diagonal invertKernelLow<T><<<1, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); // invert non-diagonal elements invertLowKernel<T><<<n, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); } // ------------------------------------------------------------------------------------------------------------------ // // caller for invert lower matrix routine void invertLowerMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) { NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix}); BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), invertLowerMatrix_, (context, inputMatrix, invertedMatrix), FLOAT_NATIVE); NDArray::registerSpecialUse({invertedMatrix}, {inputMatrix}); } // ------------------------------------------------------------------------------------------------------------------ // // procedure to invert upper-triangular matrix. // In current case upper triangular matrix has main diagonal with all ones on it. template<typename T> static void invertUpperMatrix_(LaunchContext *context, NDArray* inputMatrix, NDArray* invertedMatrix) { int n = inputMatrix->rows(); invertedMatrix->setIdentity(); auto stream = context->getCudaStream(); if (inputMatrix->isIdentityMatrix()) { // the inverse for I is I return; } // invert upper matrix // invert the second diagonal upvertKernelUp<T><<<1, n, 512, *stream >>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); // invert other elements invertUpKernel<T><<<n, n, 512, *stream >>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); } // ------------------------------------------------------------------------------------------------------------------ // // invertion of upper triangular matrix - runner routine void invertUpperMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) { NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix}); BUILD_SINGLE_SELECTOR(invertedMatrix->dataType(), invertUpperMatrix_, (context, inputMatrix, invertedMatrix), FLOAT_NATIVE); NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix}); } // ------------------------------------------------------------------------------------------------------------------ // // determinant kernel - accumulation product of all values on the main diagonal template<typename T> static __global__ void determinantKernel(T *compound, T *result, Nd4jLong len) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto i = start; i < len; i += step) { auto pos = i * len + i; //shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2); // multiply all diagonal elements math::atomics::nd4j_atomicMul(&result[0], compound[pos]); } } // ------------------------------------------------------------------------------------------------------------------ // // determinant logarithm - accumulation sum of all logarithm values on the main diagonal. All in logarithic values // should be positive template<typename T> static __global__ void determinantLogKernel(T *compound, T *result, Nd4jLong len) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto i = start; i < len; i += step) { auto pos = i * len + i; //shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2); // sum logs of all diagonal elements math::atomics::nd4j_atomicAdd(result, math::nd4j_log<T,T>(math::nd4j_abs(compound[pos]))); } } // ------------------------------------------------------------------------------------------------------------------ // // kernel to copy matrix with given shape to compound tensor with given pos // output - a N-D tensor buffer with rank not less than 2, input - 2D square n x n matrix with n = rowLen template<typename T, typename F> static __global__ void fillMatrix(void *output, Nd4jLong *outShape, void *input, Nd4jLong *inputShape, Nd4jLong pos, Nd4jLong rowLen) { __shared__ F *matrix; __shared__ T *inputBuf; __shared__ Nd4jLong inputLen; __shared__ Nd4jLong n2; if (threadIdx.x == 0) { matrix = reinterpret_cast<F*>(output); inputBuf = reinterpret_cast<T*>(input); inputLen = shape::length(inputShape); n2 = rowLen * rowLen; } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int k = pos + start, j = start; j < n2; k += step, j += step) { auto xIndex = shape::getIndexOffset(k, inputShape); matrix[j] = (F) inputBuf[xIndex]; } } // ------------------------------------------------------------------------------------------------------------------ // // same as above, but without type conversion template<typename T> static __global__ void returnMatrix(void *output, Nd4jLong *outputShape, void *input, Nd4jLong *inputShape, Nd4jLong pos, Nd4jLong rowLen) { __shared__ T* matrix; __shared__ T* outputBuf; __shared__ Nd4jLong outputLen; __shared__ Nd4jLong n2; if (threadIdx.x == 0) { matrix = reinterpret_cast<T *>(input); outputBuf = reinterpret_cast<T *>(output); outputLen = shape::length(inputShape); n2 = rowLen * rowLen; } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int k = pos + start, j = start; j < n2; k += step, j += step) { auto zIndex = shape::getIndexOffset(k, outputShape); outputBuf[zIndex] = matrix[j]; } } // ------------------------------------------------------------------------------------------------------------------ // // fill up permutaion matrix kernel. Permutation matrix filled with zeros and ones template<typename F> static __global__ void fillUpPermutation(void *output, Nd4jLong *shape, int *source, int rowNum) { F *permutation = reinterpret_cast<F *>(output); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto i = start; i < rowNum; i += step) { int val = source[i] - 1; Nd4jLong posF[] = {i, val}; auto pos = shape::getOffset(shape, posF); permutation[pos] = F(1.f); } } // ------------------------------------------------------------------------------------------------------------------ // // LUP decomposition runner - using CUBLAS SOLVER // if permutation is given, then using LUP decomposition, LU decomposition otherwise // L - lower triangular, U - upper triangular, P - permutation matricies // PA = LU // // input - A matrix nxn // compound - C matrix L + U - I, or main diagonal and lower - L matrix, from the 2nd diagonal - U matrix template<typename T, typename I> static void lup_(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) { auto stream = context->getCudaStream(); auto n = input->rows(); cusolverDnHandle_t cusolverH = nullptr; // create solver handle cusolverStatus_t status = cusolverDnCreate(&cusolverH); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("Cannot create cuSolver handle", status); } // set solver stream status = cusolverDnSetStream(cusolverH, *stream); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("Cannot set up stream for cuda solver", status); } int lwork = 0; int *d_info = nullptr; // allocate memory for permutation vector auto err = cudaMalloc((void **) &d_info, sizeof(int)); if (err) { throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver info buffer", err); } DataType dtype = input->dataType(); switch (dtype) { // there are two implementations with cublas for LUP decomposition - double and float case DataType::DOUBLE: { double *d_work = nullptr; // compute internal buffer size double *matrix = reinterpret_cast<double *>(input->specialBuffer()); status = cusolverDnDgetrf_bufferSize( cusolverH, n, n, matrix, n, &lwork); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status); } err = cudaMalloc((void **) &d_work, sizeof(float) * lwork); if (err) { throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", err); } if (permutation == nullptr) { status = cusolverDnDgetrf( cusolverH, n, n, matrix, n, d_work, nullptr, d_info); if (status != CUSOLVER_STATUS_SUCCESS) { throw cuda_exception::build("helpers::lup_: LU factorization is failed due ", status); } } else { NDArray permutVector('c', {n}, sd::DataType::INT32, context); int* permutationBuf = permutVector.dataBuffer()->specialAsT<int>(); status = cusolverDnDgetrf( cusolverH, n, n, matrix, n, d_work, permutationBuf, d_info); if (status != CUSOLVER_STATUS_SUCCESS) { throw cuda_exception::build("helpers::lup_: LU factorization is failed due ", status); } if (permutation->rankOf() == 2) { fillUpPermutation<double> <<< n, n, 1024, *stream >>> (permutation->specialBuffer(), permutation->specialShapeInfo(), permutationBuf, n); } else { permutVector.tickWriteDevice(); input->tickWriteDevice(); compound->assign(input); permutation->assign(permutVector); } } err = cudaFree(d_work); if (err) { throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", err); } } break; case DataType::FLOAT32: { float *matrix = reinterpret_cast<float*>(input->specialBuffer()); float *d_work = nullptr; status = cusolverDnSgetrf_bufferSize( cusolverH, n, n, matrix, n, &lwork); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status); } err = cudaMalloc((void **) &d_work, sizeof(float) * lwork); if (err) { throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", err); } if (permutation == nullptr) status = cusolverDnSgetrf( cusolverH, n, n, matrix, n, d_work, nullptr, d_info); else { NDArray permutVector('c', {n}, DataType::INT32, context); int *permutationBuf = reinterpret_cast<int *>(permutVector.specialBuffer()); status = cusolverDnSgetrf( cusolverH, n, n, matrix, n, d_work, permutationBuf, d_info); if (permutation->rankOf() == 2) { fillUpPermutation<I> <<< n, n, 128, *stream >>> (permutation->specialBuffer(), permutation->specialShapeInfo(), permutationBuf, n); permutation->tickWriteDevice(); } else { input->tickWriteDevice(); compound->assign(input); permutation->assign(permutVector); } } err = cudaFree(d_work); if (err) { throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", err); } } } if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::lup_: Cannot make LU decomposition", status); } err = cudaFree(d_info); if (err) { throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver info buffer", err); } cusolverDnDestroy(cusolverH); // NDArray::registerSpecialUse({input}, {input}); input->tickWriteDevice(); } // ------------------------------------------------------------------------------------------------------------------ // BUILD_DOUBLE_TEMPLATE(template void lup_,(LaunchContext * context, NDArray * input, NDArray * output, NDArray * permutation), FLOAT_NATIVE, INDEXING_TYPES); template <typename T> static __device__ void swapRows(T* matrix, Nd4jLong* shape, Nd4jLong theFirst, Nd4jLong theSecond, Nd4jLong n) { if (theFirst != theSecond) { for (auto i = 0; i < n; i++) { Nd4jLong theFirstPos[] = {theFirst, i}; Nd4jLong theSecondPos[] = {theSecond, i}; auto theFirstIndex = shape::getOffset(shape, theFirstPos, 0); auto theSecondIndex = shape::getOffset(shape, theSecondPos, 0); math::nd4j_swap(matrix[theFirstIndex], matrix[theSecondIndex]); } } } template <typename T> static __device__ void processColumns(Nd4jLong currentRow, Nd4jLong rowNum, T* compoundBuf, Nd4jLong* compoundShape) { Nd4jLong xDiag[] = {currentRow, currentRow}; auto diagIndex = shape::getOffset(compoundShape, xDiag, 0); for (auto j = currentRow + 1; j < rowNum; j++) { Nd4jLong xRow[] = {j, currentRow}; auto rowIndex = shape::getOffset(compoundShape, xRow, 0); compoundBuf[rowIndex] /= compoundBuf[diagIndex]; //output->t<T>(i, i); for (auto k = currentRow + 1; k < rowNum; k++) { Nd4jLong yRow[] = {j, k}; Nd4jLong yCol[] = {currentRow, k}; auto rowIndexY = shape::getOffset(compoundShape, yRow, 0); auto colIndex = shape::getOffset(compoundShape, yCol, 0); compoundBuf[rowIndexY] -= compoundBuf[rowIndex] * compoundBuf[colIndex]; } } } template <typename T> __device__ Nd4jLong argmaxCol(Nd4jLong column, T* compoundBuffer, Nd4jLong* compoundShape) { auto rowNum = shape::sizeAt(compoundShape, 0); Nd4jLong xInitial[] = {column, column}; auto xInitialIndex = shape::getOffset(compoundShape, xInitial, 0); auto maxValue = T(0); //sd::math::nd4j_abs(compoundBuffer[xInitialIndex]); auto result = -1LL; for (auto rowCounter = column; rowCounter < rowNum; rowCounter++) { Nd4jLong xPos[] = {rowCounter, column}; auto xIndex = shape::getOffset(compoundShape, xPos, 0); if (sd::math::nd4j_abs(compoundBuffer[xIndex]) > maxValue) { maxValue = sd::math::nd4j_max(maxValue, sd::math::nd4j_abs(compoundBuffer[xIndex])); result = rowCounter; } } return result; } template <typename T, typename I> static __device__ int luNN(T* matrix, Nd4jLong* shape, I* permutation, Nd4jLong* permuShape, Nd4jLong n) { for (auto i = 0; i < n - 1; i++) { auto pivotIndex = argmaxCol(i, matrix, shape); if (pivotIndex < 0) { return -1;//throw std::runtime_error("helpers::luNN_: input matrix is singular."); } math::nd4j_swap(permutation[shape::getIndexOffset(i, permuShape)], permutation[shape::getIndexOffset(pivotIndex, permuShape)]); swapRows(matrix, shape, (Nd4jLong)i, pivotIndex, n); processColumns(i, n, matrix, shape); } return 0; } template <typename T, typename I> static __global__ void luBatchedKernel(T* outputBuf, Nd4jLong* outputShape, I* permutations, Nd4jLong* permuShape, Nd4jLong* outputTadShape, Nd4jLong* outputTadOffsets, Nd4jLong* permuTadShape, Nd4jLong* permuTadOffsets, Nd4jLong batchNum) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto b = start; b < batchNum; b += step) { T* matrix = outputBuf + outputTadOffsets[b]; I* permutation = permutations + permuTadOffsets[b]; if (0 != luNN(matrix, outputTadShape, permutation, permuTadShape, shape::length(permuTadShape))) break; } } template <typename T, typename I> static void lu_(LaunchContext * context, NDArray* input, NDArray* output, NDArray* permutationVectors) { auto n = input->sizeAt(-1); auto stream = context->getCudaStream(); NDArray iota('c', {n}, permutationVectors->dataType(), context);// = NDArrayFactory::create(); // <int>('c', {n}); iota.linspace(0); iota.syncToDevice(); output->assign(input); // fill up output tensor with zeros // output->tickWriteDevice(); permutationVectors->applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), iota, *permutationVectors, true, nullptr); // permutationVectors->tickWriteDevice(); auto tads = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {-2, -1}); auto permutaionTads = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {-1}); auto batchNum = tads.numberOfTads(); luBatchedKernel<T,I><<<batchNum, 256, 1024, *stream>>>(reinterpret_cast<T*>(output->platformBuffer()), output->specialShapeInfo(), reinterpret_cast<I*>(permutationVectors->platformBuffer()), permutationVectors->specialShapeInfo(), tads.specialShapeInfo(), tads.specialOffsets(), permutaionTads.specialShapeInfo(), permutaionTads.specialOffsets(), batchNum); } void lu(LaunchContext* context, NDArray* input, NDArray* output, NDArray* permutations) { NDArray::prepareSpecialUse({output, permutations}, {input}); BUILD_DOUBLE_SELECTOR(input->dataType(), permutations->dataType(), lu_, (context, input, output, permutations), FLOAT_NATIVE, INDEXING_TYPES); NDArray::registerSpecialUse({output, permutations}, {input}); } // ------------------------------------------------------------------------------------------------------------------ // template<typename T> static int determinant_(sd::LaunchContext *context, NDArray *input, NDArray *output) { Nd4jLong n = input->sizeAt(-1); Nd4jLong n2 = n * n; std::vector<int> dims(); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {input->rankOf() - 2, input->rankOf() - 1}); //auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {output->rankOf() - 1}); // DataType dtype = input->dataType(); // if (dtype != DataType::DOUBLE) // dtype = DataType::FLOAT32; auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, DataTypeUtils::fromT<T>(), context); //, block.getWorkspace()); auto det = NDArrayFactory::create<T>(1, context); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input}); dim3 launchDims(256, 256, 1024); output->assign(1.f); for (int e = 0; e < output->lengthOf(); e++) { Nd4jLong pos = e * n2; // if (matrix.dataType() == input->dataType()) fillMatrix<T, T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); // else // fillMatrix<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); lup_<T, int>(context, &matrix, nullptr, nullptr); // else // lup_<float>(context, &matrix, nullptr, nullptr); auto offset = shape::getIndexOffset(e, output->shapeInfo()); auto inputBuf = reinterpret_cast<T *>(matrix.specialBuffer()); auto outputBuf = reinterpret_cast<T *>(output->specialBuffer()) + offset; // if (matrix.dataType() == input->dataType()) determinantKernel<T> << < launchDims.x, launchDims.y, launchDims.z, *stream >> > (inputBuf, outputBuf, n); // else // determinantKernel<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream >>> (inputBuf, outputBuf, n); } NDArray::registerSpecialUse({output}, {input}); return Status::OK(); } int determinant(sd::LaunchContext *context, NDArray *input, NDArray *output) { NDArray::prepareSpecialUse({output}, {input}); BUILD_SINGLE_SELECTOR(input->dataType(), return determinant_, (context, input, output), FLOAT_NATIVE); NDArray::registerSpecialUse({output}, {input}); } template<typename T> int logAbsDeterminant_(LaunchContext *context, NDArray *input, NDArray *output) { Nd4jLong n = input->sizeAt(-1); Nd4jLong n2 = n * n; std::vector<int> dims(); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {input->rankOf() - 2, input->rankOf() - 1}); //auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {output->rankOf() - 1}); DataType dtype = input->dataType(); if (dtype != DataType::DOUBLE) dtype = DataType::FLOAT32; auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, dtype, context); //, block.getWorkspace()); auto det = NDArrayFactory::create<T>(1, context); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input}); dim3 launchDims(256, 256, 1024); output->assign(0.f); for (int e = 0; e < output->lengthOf(); e++) { Nd4jLong pos = e * n2; // if (matrix.dataType() == input->dataType()) fillMatrix<T, T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); // else // fillMatrix<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); // if (matrix.dataType() == input->dataType()) lup_<T, int>(context, &matrix, nullptr, nullptr); // else // lup_<float>(context, &matrix, nullptr, nullptr); auto offset = shape::getIndexOffset(e, output->shapeInfo()); auto inputBuf = reinterpret_cast<T *>(matrix.specialBuffer()); auto outputBuf = reinterpret_cast<T *>(output->specialBuffer()) + offset; // if (matrix.dataType() == input->dataType()) determinantLogKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(inputBuf, outputBuf, n); // else // determinantLogKernel<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream >>> (inputBuf, outputBuf, n); } NDArray::registerSpecialUse({output}, {input}); return Status::OK(); return ND4J_STATUS_OK; } int logAbsDeterminant(sd::LaunchContext *context, NDArray *input, NDArray *output) { NDArray::prepareSpecialUse({output}, {input}); BUILD_SINGLE_SELECTOR(input->dataType(), return logAbsDeterminant_, (context, input, output), FLOAT_NATIVE); NDArray::registerSpecialUse({output}, {input}); } template<typename T> static __global__ void fillLowerUpperKernel(void *lowerBuf, Nd4jLong *lowerShape, void *upperBuf, Nd4jLong *upperShape, void *matrixBuf, Nd4jLong *matrixShape, Nd4jLong n) { __shared__ T *lowerMatrix; __shared__ T *upperMatrix; __shared__ T *matrix; if (threadIdx.x == 0) { lowerMatrix = reinterpret_cast<T *>(lowerBuf); upperMatrix = reinterpret_cast<T *>(upperBuf); matrix = reinterpret_cast<T *>(matrixBuf); } __syncthreads(); for (int k = blockIdx.x; k < n; k += gridDim.x) { // and then put all values under main diagonal on to it for (int j = threadIdx.x; j < n; j += blockDim.x) { Nd4jLong posX[] = {k, j}; Nd4jLong posD[] = {j, j}; auto xPos = shape::getOffset(lowerShape, posX); auto yPos = shape::getOffset(upperShape, posX); auto iPos = shape::getOffset(matrixShape, posX); auto dPos = shape::getOffset(matrixShape, posD); if (k >= j) lowerMatrix[xPos] = matrix[iPos];//(k, j); else upperMatrix[yPos] = matrix[iPos]; //k, j); } } } template<typename T> static int inverse_(sd::LaunchContext *context, NDArray *input, NDArray *output) { auto n = input->sizeAt(-1); auto n2 = n * n; auto dtype = DataTypeUtils::fromT<T>(); //input->dataType(); // if (dtype != DataType::DOUBLE) // dtype = DataType::FLOAT32; NDArray matrix = NDArrayFactory::create('c', {n, n}, dtype, context); NDArray upper = NDArrayFactory::create('c', {n, n}, dtype, context); NDArray lower = NDArrayFactory::create('c', {n, n}, dtype, context); NDArray compound = NDArrayFactory::create('c', {n, n}, dtype, context); NDArray permutation = NDArrayFactory::create('c', {n, n}, dtype, context); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {input->rankOf() - 2, input->rankOf() - 1}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), {output->rankOf() - 2, output->rankOf() - 1}); auto stream = context->getCudaStream(); for (auto i = 0LL; i < packX.numberOfTads(); i++) { fillMatrix<T, T><<<1, n2, 1024, *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), i * n2, n); matrix.tickWriteDevice(); //compound.assign(matrix); // if (matrix.dataType() == input->dataType()) lup_<T, int>(context, &matrix, nullptr, nullptr); fillLowerUpperKernel<T><<<n, n, 1024, *stream>>>(lower.specialBuffer(), lower.specialShapeInfo(), upper.specialBuffer(), upper.specialShapeInfo(), matrix.specialBuffer(), matrix.specialShapeInfo(), n); lower.tickWriteDevice(); upper.tickWriteDevice(); // lower.printIndexedBuffer("LOWER"); // upper.printIndexedBuffer("UPPER"); matrix.assign(0); invertUpperMatrix(context, &upper, &matrix); // U^{-1} matrix.tickWriteDevice(); // matrix.printIndexedBuffer("Upper Inverted"); compound.assign(0); invertLowerMatrix(context, &lower, &compound); // L{-1} compound.tickWriteDevice(); // compound.printIndexedBuffer("Lower Inverted"); // matrix.tickWriteDevice(); // compound.tickWriteDevice(); sd::MmulHelper::mmul(&matrix, &compound, &upper, 1.0, 0.0); upper.tickWriteDevice(); // upper.printIndexedBuffer("Full inverted"); returnMatrix<T> <<<1, n2, 1024, *stream>>>(output->specialBuffer(), output->specialShapeInfo(), upper.specialBuffer(), upper.specialShapeInfo(), i * n2, n); } return Status::OK(); } int inverse(sd::LaunchContext *context, NDArray *input, NDArray *output) { NDArray::prepareSpecialUse({output}, {input}); BUILD_SINGLE_SELECTOR(input->dataType(), return inverse_, (context, input, output), FLOAT_NATIVE); NDArray::registerSpecialUse({output}, {input}); } bool checkCholeskyInput(sd::LaunchContext *context, NDArray const *input) { return true; } template<typename F> __global__ void fillBatchKernel(F **dArrayBatch, F *buf, Nd4jLong *offsets, Nd4jLong batchSize) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto i = start; i < batchSize; i += step) { dArrayBatch[i] = buf + offsets[i]; } } template<typename F> __global__ void adjustResultsKernel(F *dArray, Nd4jLong *shape, Nd4jLong *offsets, Nd4jLong batchSize, Nd4jLong n) { //auto i = blockIdx.x * blockDim.x + threadIdx.x; Nd4jLong *shapeOf = shape::shapeOf(shape); Nd4jLong *strideOf = shape::stride(shape); for (auto i = blockIdx.x; i < batchSize; i += gridDim.x) { auto current = dArray + offsets[i]; for (auto r = threadIdx.x; r < n; r += blockDim.x) { for (auto c = r + 1; c < n; c++) { Nd4jLong posRC[] = {r, c}; auto pos = r * n + c; //shape::getOffset(0, shapeOf, strideOf, posRC, 2); current[pos] = 0.; } } } } template<typename F> int cholesky__(LaunchContext *context, NDArray *input, NDArray *output, bool inplace) { if (!inplace) output->assign(input); auto tempOutput =output->dup(); cusolverDnHandle_t handle = nullptr; auto n = input->sizeAt(-1); auto n2 = n * n; NDArray::prepareSpecialUse({output}, {input}); auto status = cusolverDnCreate(&handle); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::cholesky_: Cannot create solver handle", status); } F **dArrayBatch = nullptr; auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempOutput.getShapeInfo(), {tempOutput.rankOf() - 2, tempOutput.rankOf() - 1}); const Nd4jLong batchSize = packX.numberOfTads(); int *dInfoArray = nullptr; auto err = cudaMalloc((void **) &dArrayBatch, sizeof(F *) * batchSize); if (err) { throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver batch data buffer", err); } err = cudaMalloc((void **) &dInfoArray, sizeof(int) * batchSize); if (err) { throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver errors buffer", err); } auto stream = context->getCudaStream(); fillBatchKernel<F> << < 1, batchSize, 128, *stream >> > (dArrayBatch, reinterpret_cast<F *>(tempOutput.specialBuffer()), packX.specialOffsets(), batchSize); status = cusolverDnSetStream(handle, *stream); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::cholesky_: Cannot set stream to solver handle", status); } const cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; if (input->dataType() == DataType::DOUBLE) status = cusolverDnDpotrfBatched( handle, uplo, n, (double **) dArrayBatch, n, dInfoArray, batchSize); else status = cusolverDnSpotrfBatched( handle, uplo, n, (float **) dArrayBatch, n, dInfoArray, batchSize); if (CUSOLVER_STATUS_SUCCESS != status) { throw cuda_exception::build("helpers::cholesky_: Cholesky factorization failed for batch", status); } adjustResultsKernel<F> << < batchSize, n2, 128, *stream >> > (reinterpret_cast<F *>(tempOutput.specialBuffer()), packX.specialShapeInfo(), packX.specialOffsets(), batchSize, n); err = cudaFree(dArrayBatch); if (err) { throw cuda_exception::build("helpers::cholesky_: Cannot deallocate memory for solver batch data buffer", err); } err = cudaFree(dInfoArray); if (err) { throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver errors buffer", err); } if (!inplace) output->assign(tempOutput); else input->assign(tempOutput); NDArray::registerSpecialUse({output}, {input}); return Status::OK(); } // template <typename T> int cholesky_(LaunchContext *context, NDArray *input, NDArray *output, bool inplace) { NDArray::prepareSpecialUse({output}, {input}); if (input->dataType() == DataType::DOUBLE) cholesky__<double>(context, input, output, inplace); else if (input->dataType() == DataType::FLOAT32) cholesky__<float>(context, input, output, inplace); else { std::unique_ptr<NDArray> tempOutput( NDArrayFactory::create_('c', input->getShapeAsVector(), DataType::FLOAT32, context)); tempOutput->assign(input); cholesky__<float>(context, tempOutput.get(), tempOutput.get(), true); output->assign(tempOutput.get()); } NDArray::registerSpecialUse({output}, {input}); return Status::OK(); } int cholesky(sd::LaunchContext *context, NDArray *input, NDArray *output, bool inplace) { // BUILD_SINGLE_SELECTOR(input->dataType(), return cholesky_, (context, input, output, inplace), FLOAT_TYPES); return cholesky_(context, input, output, inplace); } // BUILD_SINGLE_TEMPLATE(template int cholesky_, (LaunchContext* context, NDArray* input, NDArray* output, bool inplace), FLOAT_TYPES); BUILD_SINGLE_TEMPLATE(template int inverse_, (sd::LaunchContext * context, NDArray * input, NDArray * output), FLOAT_NATIVE); template<typename T> __global__ void logDetKernel(T *inputBuf, Nd4jLong *inputShape, Nd4jLong batchNum, Nd4jLong *tadShape, Nd4jLong *tadOffsets, T *outputBuf, Nd4jLong *outputShape) { __shared__ int n; if (threadIdx.x == 0) { n = shape::sizeAt(inputShape, -1); // * shape::sizeAt(inputShape, -1); } __syncthreads(); T *output = outputBuf; T *input = inputBuf; for (auto i = blockIdx.x; i < batchNum; i += gridDim.x) { T *current = input + tadOffsets[i]; auto zIndex = shape::getIndexOffset(i, outputShape); for (auto e = threadIdx.x; e < n; e += blockDim.x) { Nd4jLong diag[] = {e, e}; auto xIndex = shape::getOffset(tadShape, diag); math::atomics::nd4j_atomicAdd(&output[zIndex],math::nd4j_log<T, T>(current[xIndex] * current[xIndex])); } } } template<typename T> int logdetFunctor_(sd::LaunchContext *context, NDArray *input, NDArray *output) { NDArray::prepareSpecialUse({output}, {input}); auto n2 = input->sizeAt(-1) * input->sizeAt(-2); auto stream = context->getCudaStream(); NDArray tempOutput(*input); cholesky(context, input, &tempOutput, false); auto outputBuf = output->dataBuffer()->specialAsT<T>(); //reinterpret_cast<T*>(output->specialBuffer()); // + e * n2; // + e * n2; auto inputBuf = tempOutput.dataBuffer()->specialAsT<T>(); //reinterpret_cast<T*>(tempOutput.specialBuffer()); output->nullify(); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempOutput.getShapeInfo(), {tempOutput.rankOf() - 2, tempOutput.rankOf() - 1}); logDetKernel<T> <<<128, 512, 256, *stream>>>(inputBuf, tempOutput.specialShapeInfo(), packX.numberOfTads(), packX.specialShapeInfo(), packX.specialOffsets(), outputBuf, output->specialShapeInfo()); output->tickWriteDevice(); NDArray::registerSpecialUse({output}, {input}); return Status::OK(); } int logdetFunctor(sd::LaunchContext *context, NDArray *input, NDArray *output) { BUILD_SINGLE_SELECTOR(output->dataType(), return logdetFunctor_, (context, input, output), FLOAT_NATIVE); } /* * lup - batched input, batched outputs * */ int lup(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) { BUILD_DOUBLE_SELECTOR(input->dataType(), permutation->dataType(), lup_,(context, input, compound, permutation), FLOAT_NATIVE, INDEXING_TYPES); return Status::OK(); } // BUILD_SINGLE_TEMPLATE(template int logdetFunctor_, // (sd::LaunchContext * context, NDArray * input, NDArray * output), FLOAT_NATIVE); } } }
887bde31f1ea289ec39610f824dccb3e0df4b323.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "proposal_layer.hpp" #include "nms.hpp" namespace caffe { template <typename Dtype> __device__ static int transform_box(Dtype box[], const Dtype dx, const Dtype dy, const Dtype d_log_w, const Dtype d_log_h, const Dtype img_W, const Dtype img_H, const Dtype min_box_W, const Dtype min_box_H) { // width & height of box const Dtype w = box[2] - box[0] + (Dtype)1; const Dtype h = box[3] - box[1] + (Dtype)1; // center location of box const Dtype ctr_x = box[0] + (Dtype)0.5 * w; const Dtype ctr_y = box[1] + (Dtype)0.5 * h; // new center location according to gradient (dx, dy) const Dtype pred_ctr_x = dx * w + ctr_x; const Dtype pred_ctr_y = dy * h + ctr_y; // new width & height according to gradient d(log w), d(log h) const Dtype pred_w = exp(d_log_w) * w; const Dtype pred_h = exp(d_log_h) * h; // update upper-left corner location box[0] = pred_ctr_x - (Dtype)0.5 * pred_w; box[1] = pred_ctr_y - (Dtype)0.5 * pred_h; // update lower-right corner location box[2] = pred_ctr_x + (Dtype)0.5 * pred_w; box[3] = pred_ctr_y + (Dtype)0.5 * pred_h; // adjust new corner locations to be within the image region, box[0] = max((Dtype)0, min(box[0], img_W - (Dtype)1)); box[1] = max((Dtype)0, min(box[1], img_H - (Dtype)1)); box[2] = max((Dtype)0, min(box[2], img_W - (Dtype)1)); box[3] = max((Dtype)0, min(box[3], img_H - (Dtype)1)); // recompute new width & height const Dtype box_w = box[2] - box[0] + (Dtype)1; const Dtype box_h = box[3] - box[1] + (Dtype)1; // check if new box's size >= threshold return (box_w >= min_box_W) * (box_h >= min_box_H); } template <typename Dtype> static void sort_box(Dtype list_cpu[], const int start, const int end, const int num_top) { const Dtype pivot_score = list_cpu[start * 5 + 4]; int left = start + 1, right = end; Dtype temp[5]; while (left <= right) { while (left <= end && list_cpu[left * 5 + 4] >= pivot_score) ++left; while (right > start && list_cpu[right * 5 + 4] <= pivot_score) --right; if (left <= right) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[left * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[left * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } ++left; --right; } } if (right > start) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[start * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[start * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } } if (start < right - 1) { sort_box(list_cpu, start, right - 1, num_top); } if (right + 1 < num_top && right + 1 < end) { sort_box(list_cpu, right + 1, end, num_top); } } template <typename Dtype> __global__ static void enumerate_proposals_gpu(const int nthreads, const Dtype bottom4d[], const Dtype d_anchor4d[], const Dtype anchors[], Dtype proposals[], const int num_anchors, const int bottom_H, const int bottom_W, const Dtype img_H, const Dtype img_W, const Dtype min_box_H, const Dtype min_box_W, const int feat_stride) { CUDA_KERNEL_LOOP(index, nthreads) { const int h = index / num_anchors / bottom_W; const int w = (index / num_anchors) % bottom_W; const int k = index % num_anchors; const Dtype x = w * feat_stride; const Dtype y = h * feat_stride; const Dtype* p_box = d_anchor4d + h * bottom_W + w; const Dtype* p_score = bottom4d + h * bottom_W + w; const int bottom_area = bottom_H * bottom_W; const Dtype dx = p_box[(k * 4 + 0) * bottom_area]; const Dtype dy = p_box[(k * 4 + 1) * bottom_area]; const Dtype d_log_w = p_box[(k * 4 + 2) * bottom_area]; const Dtype d_log_h = p_box[(k * 4 + 3) * bottom_area]; Dtype* const p_proposal = proposals + index * 5; p_proposal[0] = x + anchors[k * 4 + 0]; p_proposal[1] = y + anchors[k * 4 + 1]; p_proposal[2] = x + anchors[k * 4 + 2]; p_proposal[3] = y + anchors[k * 4 + 3]; p_proposal[4] = transform_box(p_proposal, dx, dy, d_log_w, d_log_h, img_W, img_H, min_box_W, min_box_H) * p_score[k * bottom_area]; } } template <typename Dtype> __global__ static void retrieve_rois_gpu(const int nthreads, const int item_index, const Dtype proposals[], const int roi_indices[], Dtype rois[], Dtype roi_scores[]) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* const proposals_index = proposals + roi_indices[index] * 5; rois[index * 5 + 0] = item_index; rois[index * 5 + 1] = proposals_index[0]; rois[index * 5 + 2] = proposals_index[1]; rois[index * 5 + 3] = proposals_index[2]; rois[index * 5 + 4] = proposals_index[3]; if (roi_scores) { roi_scores[index] = proposals_index[4]; } } } template <typename Dtype> void ProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* p_bottom_item = bottom[0]->gpu_data(); const Dtype* p_d_anchor_item = bottom[1]->gpu_data(); const Dtype* p_img_info_cpu = bottom[2]->cpu_data(); Dtype* p_roi_item = top[0]->mutable_gpu_data(); Dtype* p_score_item = (top.size() > 1) ? top[1]->mutable_gpu_data() : NULL; vector<int> proposals_shape(2); vector<int> top_shape(2); proposals_shape[0] = 0; proposals_shape[1] = 5; top_shape[0] = 0; top_shape[1] = 5; for (int n = 0; n < bottom[0]->shape(0); ++n) { // bottom shape: (2 x num_anchors) x H x W const int bottom_H = bottom[0]->height(); const int bottom_W = bottom[0]->width(); // input image height & width const Dtype img_H = p_img_info_cpu[0]; const Dtype img_W = p_img_info_cpu[1]; // minimum box width & height const Dtype min_box_H = min_size_; const Dtype min_box_W = min_size_; // number of all proposals = num_anchors * H * W const int num_proposals = anchors_.shape(0) * bottom_H * bottom_W; // number of top-n proposals before NMS const int pre_nms_topn = ::min(num_proposals, pre_nms_topn_); // number of final RoIs int num_rois = 0; // enumerate all proposals // num_proposals = num_anchors * H * W // (x1, y1, x2, y2, score) for each proposal // NOTE: for bottom, only foreground scores are passed proposals_shape[0] = num_proposals; proposals_.Reshape(proposals_shape); hipLaunchKernelGGL(( enumerate_proposals_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(num_proposals)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_proposals, p_bottom_item + num_proposals, p_d_anchor_item, anchors_.gpu_data(), proposals_.mutable_gpu_data(), anchors_.shape(0), bottom_H, bottom_W, img_H, img_W, min_box_H, min_box_W, feat_stride_); CUDA_POST_KERNEL_CHECK; sort_box(proposals_.mutable_cpu_data(), 0, num_proposals - 1, pre_nms_topn_); nms_gpu(pre_nms_topn, proposals_.gpu_data(), &nms_mask_, roi_indices_.mutable_cpu_data(), &num_rois, 0, nms_thresh_, post_nms_topn_); hipLaunchKernelGGL(( retrieve_rois_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(num_rois)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_rois, n, proposals_.gpu_data(), roi_indices_.gpu_data(), p_roi_item, p_score_item); CUDA_POST_KERNEL_CHECK; top_shape[0] += num_rois; p_bottom_item += bottom[0]->offset(1); p_d_anchor_item += bottom[1]->offset(1); p_roi_item += num_rois * 5; p_score_item += num_rois * 1; } top[0]->Reshape(top_shape); if (top.size() > 1) { top_shape.pop_back(); top[1]->Reshape(top_shape); } } INSTANTIATE_LAYER_GPU_FUNCS(ProposalLayer); } // namespace caffe
887bde31f1ea289ec39610f824dccb3e0df4b323.cu
#include "proposal_layer.hpp" #include "nms.hpp" namespace caffe { template <typename Dtype> __device__ static int transform_box(Dtype box[], const Dtype dx, const Dtype dy, const Dtype d_log_w, const Dtype d_log_h, const Dtype img_W, const Dtype img_H, const Dtype min_box_W, const Dtype min_box_H) { // width & height of box const Dtype w = box[2] - box[0] + (Dtype)1; const Dtype h = box[3] - box[1] + (Dtype)1; // center location of box const Dtype ctr_x = box[0] + (Dtype)0.5 * w; const Dtype ctr_y = box[1] + (Dtype)0.5 * h; // new center location according to gradient (dx, dy) const Dtype pred_ctr_x = dx * w + ctr_x; const Dtype pred_ctr_y = dy * h + ctr_y; // new width & height according to gradient d(log w), d(log h) const Dtype pred_w = exp(d_log_w) * w; const Dtype pred_h = exp(d_log_h) * h; // update upper-left corner location box[0] = pred_ctr_x - (Dtype)0.5 * pred_w; box[1] = pred_ctr_y - (Dtype)0.5 * pred_h; // update lower-right corner location box[2] = pred_ctr_x + (Dtype)0.5 * pred_w; box[3] = pred_ctr_y + (Dtype)0.5 * pred_h; // adjust new corner locations to be within the image region, box[0] = max((Dtype)0, min(box[0], img_W - (Dtype)1)); box[1] = max((Dtype)0, min(box[1], img_H - (Dtype)1)); box[2] = max((Dtype)0, min(box[2], img_W - (Dtype)1)); box[3] = max((Dtype)0, min(box[3], img_H - (Dtype)1)); // recompute new width & height const Dtype box_w = box[2] - box[0] + (Dtype)1; const Dtype box_h = box[3] - box[1] + (Dtype)1; // check if new box's size >= threshold return (box_w >= min_box_W) * (box_h >= min_box_H); } template <typename Dtype> static void sort_box(Dtype list_cpu[], const int start, const int end, const int num_top) { const Dtype pivot_score = list_cpu[start * 5 + 4]; int left = start + 1, right = end; Dtype temp[5]; while (left <= right) { while (left <= end && list_cpu[left * 5 + 4] >= pivot_score) ++left; while (right > start && list_cpu[right * 5 + 4] <= pivot_score) --right; if (left <= right) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[left * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[left * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } ++left; --right; } } if (right > start) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[start * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[start * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } } if (start < right - 1) { sort_box(list_cpu, start, right - 1, num_top); } if (right + 1 < num_top && right + 1 < end) { sort_box(list_cpu, right + 1, end, num_top); } } template <typename Dtype> __global__ static void enumerate_proposals_gpu(const int nthreads, const Dtype bottom4d[], const Dtype d_anchor4d[], const Dtype anchors[], Dtype proposals[], const int num_anchors, const int bottom_H, const int bottom_W, const Dtype img_H, const Dtype img_W, const Dtype min_box_H, const Dtype min_box_W, const int feat_stride) { CUDA_KERNEL_LOOP(index, nthreads) { const int h = index / num_anchors / bottom_W; const int w = (index / num_anchors) % bottom_W; const int k = index % num_anchors; const Dtype x = w * feat_stride; const Dtype y = h * feat_stride; const Dtype* p_box = d_anchor4d + h * bottom_W + w; const Dtype* p_score = bottom4d + h * bottom_W + w; const int bottom_area = bottom_H * bottom_W; const Dtype dx = p_box[(k * 4 + 0) * bottom_area]; const Dtype dy = p_box[(k * 4 + 1) * bottom_area]; const Dtype d_log_w = p_box[(k * 4 + 2) * bottom_area]; const Dtype d_log_h = p_box[(k * 4 + 3) * bottom_area]; Dtype* const p_proposal = proposals + index * 5; p_proposal[0] = x + anchors[k * 4 + 0]; p_proposal[1] = y + anchors[k * 4 + 1]; p_proposal[2] = x + anchors[k * 4 + 2]; p_proposal[3] = y + anchors[k * 4 + 3]; p_proposal[4] = transform_box(p_proposal, dx, dy, d_log_w, d_log_h, img_W, img_H, min_box_W, min_box_H) * p_score[k * bottom_area]; } } template <typename Dtype> __global__ static void retrieve_rois_gpu(const int nthreads, const int item_index, const Dtype proposals[], const int roi_indices[], Dtype rois[], Dtype roi_scores[]) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* const proposals_index = proposals + roi_indices[index] * 5; rois[index * 5 + 0] = item_index; rois[index * 5 + 1] = proposals_index[0]; rois[index * 5 + 2] = proposals_index[1]; rois[index * 5 + 3] = proposals_index[2]; rois[index * 5 + 4] = proposals_index[3]; if (roi_scores) { roi_scores[index] = proposals_index[4]; } } } template <typename Dtype> void ProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* p_bottom_item = bottom[0]->gpu_data(); const Dtype* p_d_anchor_item = bottom[1]->gpu_data(); const Dtype* p_img_info_cpu = bottom[2]->cpu_data(); Dtype* p_roi_item = top[0]->mutable_gpu_data(); Dtype* p_score_item = (top.size() > 1) ? top[1]->mutable_gpu_data() : NULL; vector<int> proposals_shape(2); vector<int> top_shape(2); proposals_shape[0] = 0; proposals_shape[1] = 5; top_shape[0] = 0; top_shape[1] = 5; for (int n = 0; n < bottom[0]->shape(0); ++n) { // bottom shape: (2 x num_anchors) x H x W const int bottom_H = bottom[0]->height(); const int bottom_W = bottom[0]->width(); // input image height & width const Dtype img_H = p_img_info_cpu[0]; const Dtype img_W = p_img_info_cpu[1]; // minimum box width & height const Dtype min_box_H = min_size_; const Dtype min_box_W = min_size_; // number of all proposals = num_anchors * H * W const int num_proposals = anchors_.shape(0) * bottom_H * bottom_W; // number of top-n proposals before NMS const int pre_nms_topn = std::min(num_proposals, pre_nms_topn_); // number of final RoIs int num_rois = 0; // enumerate all proposals // num_proposals = num_anchors * H * W // (x1, y1, x2, y2, score) for each proposal // NOTE: for bottom, only foreground scores are passed proposals_shape[0] = num_proposals; proposals_.Reshape(proposals_shape); enumerate_proposals_gpu<Dtype><<<CAFFE_GET_BLOCKS(num_proposals), CAFFE_CUDA_NUM_THREADS>>>( num_proposals, p_bottom_item + num_proposals, p_d_anchor_item, anchors_.gpu_data(), proposals_.mutable_gpu_data(), anchors_.shape(0), bottom_H, bottom_W, img_H, img_W, min_box_H, min_box_W, feat_stride_); CUDA_POST_KERNEL_CHECK; sort_box(proposals_.mutable_cpu_data(), 0, num_proposals - 1, pre_nms_topn_); nms_gpu(pre_nms_topn, proposals_.gpu_data(), &nms_mask_, roi_indices_.mutable_cpu_data(), &num_rois, 0, nms_thresh_, post_nms_topn_); retrieve_rois_gpu<Dtype><<<CAFFE_GET_BLOCKS(num_rois), CAFFE_CUDA_NUM_THREADS>>>( num_rois, n, proposals_.gpu_data(), roi_indices_.gpu_data(), p_roi_item, p_score_item); CUDA_POST_KERNEL_CHECK; top_shape[0] += num_rois; p_bottom_item += bottom[0]->offset(1); p_d_anchor_item += bottom[1]->offset(1); p_roi_item += num_rois * 5; p_score_item += num_rois * 1; } top[0]->Reshape(top_shape); if (top.size() > 1) { top_shape.pop_back(); top[1]->Reshape(top_shape); } } INSTANTIATE_LAYER_GPU_FUNCS(ProposalLayer); } // namespace caffe
646300eccc1b597d1cf26c3ac2293e5347ee9673.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar This file implements lower case, and is called by ztrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_NONBATCHED #include "ztrtri.cuh" #include "ztrtri_lower_device.cuh" /******************************************************************************/ __global__ void ztrtri_diag_lower_kernel( magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA) { ztrtri_diag_lower_device(diag, n, A, lda, d_dinvA); } /******************************************************************************/ __global__ void triple_zgemm16_part1_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm16_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm16_part2_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm16_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm32_part1_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm32_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm32_part2_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm32_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm64_part1_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm64_part2_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part1_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm_above64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part2_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm_above64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part3_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm_above64_part3_lower_device( n, Ain, lda, d_dinvA, jb, npages); }
646300eccc1b597d1cf26c3ac2293e5347ee9673.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar This file implements lower case, and is called by ztrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_NONBATCHED #include "ztrtri.cuh" #include "ztrtri_lower_device.cuh" /******************************************************************************/ __global__ void ztrtri_diag_lower_kernel( magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA) { ztrtri_diag_lower_device(diag, n, A, lda, d_dinvA); } /******************************************************************************/ __global__ void triple_zgemm16_part1_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm16_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm16_part2_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm16_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm32_part1_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm32_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm32_part2_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm32_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm64_part1_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm64_part2_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part1_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm_above64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part2_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm_above64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part3_lower_kernel( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { triple_zgemm_above64_part3_lower_device( n, Ain, lda, d_dinvA, jb, npages); }
9508c21e7a629cfbc76646b0898b3381004199a6.hip
// !!! This is a file automatically generated by hipify!!! /* Author: Cao Thanh Tung, Qi Meng Date: 15/03/2011 File Name: cudaMissing.cu This file include all CUDA code to perform the inserting missing sites step =============================================================================== Copyright (c) 2011, School of Computing, National University of Singapore. All rights reserved. Project homepage: http://www.comp.nus.edu.sg/~tants/delaunay.html If you use GPU-DT and you like it or have comments on its usefulness etc., we would love to hear from you at <[email protected]>. You may share with us your experience and any possibilities that we may improve the work/code. =============================================================================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the National University of University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission from the National University of Singapore. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma warning(disable: 4311 4312) #include <hip/device_functions.h> #include <stdio.h> #include <string.h> #include "../gpudt.h" #include "hip/hip_runtime.h" #include "common.h" #include "cudaCCW.h" #include "cudaScanLargeArray.h" #define MAXINT 2147483647 /*********************************************************** * Declarations ***********************************************************/ #define WBLOCK 256 #define INSERT_TRIANGLE(v0, v1, v2, tri) \ tmp = tri; \ cnewtri[tmp] = step; \ ctriangles[tmp * 9 + 3] = v2; \ ctriangles[tmp * 9 + 4] = v0; \ ctriangles[tmp * 9 + 5] = v1; \ ctriangles[tmp * 9 + 6] = atomicExch(&cvertarr[v0], (tmp << 2)); \ ctriangles[tmp * 9 + 7] = atomicExch(&cvertarr[v1], (tmp << 2) | 1); \ ctriangles[tmp * 9 + 8] = atomicExch(&cvertarr[v2], (tmp << 2) | 2); \ #define SET_TRIANGLE(vOrg, vDest, vApex, nOrg, nDest, nApex, tri, ori) \ ctriangles[(tri) * 9 + 3 + ((ori) + 1) % 3] = (vOrg); \ ctriangles[(tri) * 9 + 3 + ((ori) + 2) % 3] = (vDest); \ ctriangles[(tri) * 9 + 3 + (ori)] = (vApex); \ ctriangles[(tri) * 9 + 6 + (ori)] = (nOrg); \ ctriangles[(tri) * 9 + 6 + ((ori) + 1) % 3] = (nDest); \ ctriangles[(tri) * 9 + 6 + ((ori) + 2) % 3] = (nApex) #define UPDATE_TEMP_LINK(pTriOri, pNext) \ if ((pTriOri) >= 0) \ ctriangles[decode_tri(pTriOri) * 9 + 6 + decode_ori(pTriOri)] = -(pNext) #define UPDATE_LINK(pTriOri, pNext) \ if ((pTriOri) >= 0) \ ctriangles[decode_tri(pTriOri) * 9 + decode_ori(pTriOri)] = (pNext) /************************************************************** * Exported methods **************************************************************/ extern "C" void cudaMissing(); /************************************************************** * Definitions **************************************************************/ // Decode an oriented triangle. // An oriented triangle consists of 32 bits. // - 30 highest bits represent the triangle index, // - 2 lowest bits represent the orientation (the starting vertex, 0, 1 or 2) #define decode_tri(x) ((x) >> 2) #define decode_ori(x) ((x) & 3) #define encode_tri(tri, ori) (((tri) << 2) | (ori)) #define MAX(x, y) ((x) < (y) ? (y) : (x)) /************************************************************ * Variables and functions shared with the main module ************************************************************/ extern int nTris, nVerts, nPoints,nConstraints; extern int *ctriangles; extern int *cvertarr; extern int *tvertices; extern REAL2 *covertices; extern short *cnewtri; extern int step; extern int *cflag; /******************************************************************* * Fill an array with increasing numbers *******************************************************************/ __global__ void kernelFillIncrement(int *list, int start, int length) { int x = blockIdx.x * blockDim.x + threadIdx.x; int noThreads = blockDim.x * gridDim.x; for (; x < length; x += noThreads) list[x] = start + x; } /******************************************************************** * Collect all dead triangles into a list. ********************************************************************/ __global__ void kernelMarkDeadTriangles(int *cmarker, short *cnewtri, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris) return ; cmarker[x] = (cnewtri[x] >= 0 ? 0 : 1); } __global__ void kernelCollectDeadTriangles(int *cdeadTri, short *cnewtri, int *cmarker, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] >= 0) return ; int id = cmarker[x]; cdeadTri[id] = x; } /******************************************************************** * Locate the triangle which we are gonna insert a missing site to. * - If the anchor of the missing site is not yet inserted, skip * - Locate the triangle and mark it to avoid two insertions into * the same triangle. * - Guarantee that the missing site is not on the boundary due to * huge fake boundary added. ********************************************************************/ __global__ void kernelLocateTriangleContainer(int *ctriangles, int *cvertarr, int *tvertices, int *clocation, int *ctags, REAL2 *covertices, int nVerts, int *cflag) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nVerts || cvertarr[x] != -1) return ; int anchor = tvertices[x]; if (cvertarr[anchor] < 0) { clocation[x] = MAXINT; return ; } REAL2 v = covertices[x]; REAL2 vanchor = covertices[anchor]; REAL ccDest, ccApex, ccOpposite; int pNextTri, pTri, pOri; pNextTri = cvertarr[anchor]; pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); int pDest, pApex, pOrg, pTmp; REAL2 vDest, vApex, vOrg, vTmp; pDest = ctriangles[pTri * 9 + 3 + (pOri + 2) % 3]; // Dest vDest = covertices[pDest]; ccDest = cuda_ccw(vanchor, vDest, v); do { pApex = ctriangles[pTri * 9 + 3 + pOri]; // apex vApex = covertices[pApex]; ccApex = cuda_ccw(vanchor, vApex, v); if (ccDest >= 0.0 && ccApex <= 0.0) // Inside the angle break; pDest = pApex; vDest = vApex; ccDest = ccApex; pNextTri = ctriangles[pTri * 9 + (pOri + 2) % 3]; pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); } while (true); // Found an angle, now look for the actual triangle // containing me. ccOpposite = cuda_ccw(vDest, vApex, v); if (ccOpposite < 0.0) { // It's not right here, need to walk a bit further while (true) { // Get the opposite triangle pNextTri = ctriangles[pTri * 9 + (pOri + 1) % 3]; //if (pNextTri < 0) { // cvertarr[x] = -100; // clocation[x] = encode_tri(pTri, (pOri + 1) % 3); // return ; //} pTri = decode_tri(pNextTri); // Rotate the triangle so that the org is opposite the previous org pOri = (decode_ori(pNextTri) + 2) % 3; pOrg = ctriangles[pTri * 9 + 3 + (pOri + 1) % 3]; vOrg = covertices[pOrg]; pTmp = pDest; pDest = pApex; pApex = pTmp; vTmp = vDest; vDest = vApex; vApex = vTmp; ccDest = cuda_ccw(vOrg, vDest, v); ccApex = cuda_ccw(vApex, vOrg, v); bool moveleft; if (ccDest >= 0.0) if (ccApex >= 0.0) // Found it! break; else moveleft = false; else if (ccApex >= 0.0) moveleft = true; else moveleft = (vOrg.x - v.x) * (vApex.x - vDest.x) + (vOrg.y - v.y) * (vApex.y - vDest.y) > 0.0; if (moveleft) { pOri = (pOri + 2) % 3; pApex = pDest; pDest = pOrg; vApex = vDest; vDest = vOrg; ccOpposite = ccDest; // Orientation is unimportant } else { pOri = (pOri + 1) % 3; pDest = pApex; pApex = pOrg; vDest = vApex; vApex = vOrg; ccOpposite = ccApex; } } } int c0 = 0; if (ccDest == 0.0) c0++; if (ccApex == 0.0) c0++; if (ccOpposite == 0.0) c0++; if (c0 == 0) { // Easiest case, it's right here! clocation[x] = pNextTri + 1; // Mark to indicate that it's a simple case. atomicMin(&ctags[pTri], x); } else if (c0 > 1) { // Duplicate point clocation[x] = pNextTri + 1; cvertarr[x] = -2; return ; } else { // On an edge. // Make sure our 'location' triangle always face toward that edge // (i.e. that edge will be opposite to the origin) if (ccDest == 0.0) pOri = (pOri + 2) % 3; else if (ccApex == 0.0) pOri = (pOri + 1) % 3; clocation[x] = -encode_tri(pTri, pOri) - 1; // To avoid deadlock when 3 sites want to insert on 3 edges, // and they try to mark 3 pairs triangles: (a, b), (b, c), (c, a) atomicMin(&ctags[pTri], x); atomicMin(&ctags[decode_tri(ctriangles[pTri * 9 + (pOri + 1) % 3])], x); } *cflag = 1; } /*************************************************************************** * Determine which missing point insertion can be take place, * mark those triangles that need to be deleted. ***************************************************************************/ __global__ void kernelPreprocessTriangles(int *ctriangles, int *cvertarr, int *clocation, int *ctags, int *tvertices, short *cnewtri, int *cmarker, BYTE *caffected, int nVerts, int step) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nVerts || cvertarr[x] != -1) return ; int pNextTri = clocation[x]; if (pNextTri == MAXINT) return ; int pTri = decode_tri(abs(pNextTri) - 1); int pOri = decode_ori(abs(pNextTri) - 1); int popp, pOppTri, pOppOri; bool success; if (pNextTri >= 0) // one triangle success = (ctags[pTri] == x); else { popp = (ctriangles[pTri * 9 + (pOri + 1) % 3]); pOppTri = decode_tri(popp); pOppOri = decode_ori(popp); success = (ctags[pTri] == x && ctags[pOppTri] == x); } if (success) { cmarker[x] = 2; cnewtri[pTri] = -step; caffected[ctriangles[pTri * 9 + 3]] = 1; caffected[ctriangles[pTri * 9 + 4]] = 1; caffected[ctriangles[pTri * 9 + 5]] = 1; if (pNextTri < 0) { cnewtri[pOppTri] = -step; caffected[ctriangles[pOppTri * 9 + 3 + pOppOri]] = 1; } } } /************************************************************ * Fix the vertex array for those affected sites ************************************************************/ __global__ void kernelFixVertexArrayMissing(int *ctriangles, int *cvertarr, BYTE *caffected, short *cnewtri, int nVerts) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nVerts || caffected[x] != 1) return ; int p = cvertarr[x], pnext = p; // Find the first valid triangle while (pnext >= 0 && cnewtri[decode_tri(pnext)] < 0) pnext = ctriangles[decode_tri(pnext) * 9 + 6 + decode_ori(pnext)]; if (pnext != p) cvertarr[x] = pnext; while (pnext >= 0) { // Find an invalid triangle do { p = pnext; pnext = ctriangles[decode_tri(p) * 9 + 6 + decode_ori(p)]; } while (pnext >= 0 && cnewtri[decode_tri(pnext)] >= 0); if (pnext >= 0) { // Now pnext is deleted, so we fix the link for p. // Find the next valid triangle while (pnext >= 0 && cnewtri[decode_tri(pnext)] < 0) pnext = ctriangles[decode_tri(pnext) * 9 + 6 + decode_ori(pnext)]; ctriangles[decode_tri(p) * 9 + 6 + decode_ori(p)] = pnext; } } } __global__ void kernelInsertMissingSites(int *ctriangles, int *cvertarr, int *clocation, int *cmarker, int *cavailtri, int *cprefix, short *cnewtri, int nVerts, int step) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nVerts || cmarker[x] != 2) return ; int pNextTri = clocation[x]; int pTri = decode_tri(abs(pNextTri) - 1); int pOri = decode_ori(abs(pNextTri) - 1); int pOrg, pDest, pApex, pOpposite; int t1, t2, t3, t4, tmp; int offset = cprefix[x]; t1 = pTri; t2 = cavailtri[offset]; t3 = cavailtri[offset + 1]; pApex = ctriangles[pTri * 9 + 3 + pOri]; pOrg = ctriangles[pTri * 9 + 3 + (pOri + 1) % 3]; pDest = ctriangles[pTri * 9 + 3 + (pOri + 2) % 3]; if (pNextTri >= 0) { // one triangle INSERT_TRIANGLE(pOrg, pDest, x, t1); INSERT_TRIANGLE(pDest, pApex, x, t2); INSERT_TRIANGLE(pApex, pOrg, x, t3); } else { int nDest = ctriangles[pTri * 9 + (pOri + 1) % 3]; int pOppTri = decode_tri(nDest); int pOppOri = decode_ori(nDest); pOpposite = ctriangles[pOppTri * 9 + 3 + pOppOri]; t4 = pOppTri; INSERT_TRIANGLE(pOrg, pDest, x, t1); INSERT_TRIANGLE(pDest, pOpposite, x, t2); INSERT_TRIANGLE(pOpposite, pApex, x, t3); INSERT_TRIANGLE(pApex, pOrg, x, t4); } } /****************************************************************** * Update the links between triangles after adding new triangles ******************************************************************/ __global__ void kernelUpdateMissingTriangleLinks(int *ctriangles, int *cvertarr, short *cnewtri, int nTris, int step) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] != step) return ; int p0, p1, p2, n0 = -1, n1 = -1, n2 = -1; int nCounter, pNextTri, pTri, pOri, pTri9; int x9 = x * 9; p2 = ctriangles[x9 + 3]; p1 = ctriangles[x9 + 5]; p0 = ctriangles[x9 + 4]; nCounter = 0; // orientation 0 // Travel through the list of triangles sharing vertex 0 with this triangle. // In this list we can find at most two triangles sharing edge (p0, p1) and // (p2, p0) with our triangle. pNextTri = cvertarr[p0]; while (pNextTri >= 0 && nCounter < 2) { pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); pTri9 = pTri * 9; if (p2 == ctriangles[pTri9 + 3 + (pOri + 2) % 3]) { // NextDest n2 = pNextTri; ctriangles[pTri9 + pOri] = (x << 2) | 2; nCounter++; } if (p1 == ctriangles[pTri9 + 3 + pOri]) { // NextApex n0 = (pTri << 2) | ((pOri + 2) % 3); ctriangles[pTri9 + (pOri + 2) % 3] = (x << 2); nCounter++; } pNextTri = ctriangles[pTri9 + 6 + pOri]; } // orientation 1 // Find the triangle with edge (p1, p2) pNextTri = cvertarr[p1]; while (pNextTri >= 0) { pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); pTri9 = pTri * 9; if (p2 == ctriangles[pTri9 + 3 + pOri]) { // NextApex n1 = (pTri << 2) | ((pOri + 2) % 3); ctriangles[pTri9 + (pOri + 2) % 3] = (x << 2) | 1; break ; } pNextTri = ctriangles[pTri9 + 6 + pOri]; } ctriangles[x9 + 0] = n0; ctriangles[x9 + 1] = n1; ctriangles[x9 + 2] = n2; } /******************************************************************** * Fix vertex array ********************************************************************/ __global__ void kernelFixVertArray_Missing(int *ctriangles,int nTris, int *cvertarr) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris) return ; int v0 = ctriangles[x * 9 + 4]; int v1 = ctriangles[x * 9 + 5]; int v2 = ctriangles[x * 9 + 3]; ctriangles[x * 9 + 6] = atomicExch(&cvertarr[v0], (x << 2)); ctriangles[x * 9 + 7] = atomicExch(&cvertarr[v1], (x << 2) | 1); ctriangles[x * 9 + 8] = atomicExch(&cvertarr[v2], (x << 2) | 2); } __global__ void kernelMarkValidTriangles1(short *cnewtri, int *cvalid, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris) return ; cvalid[x] = (cnewtri[x] >= 0) ? 1 : 0; } __global__ void kernelCollectEmptySlots1(short *cnewtri, int *cprefix, int *cempty, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] >= 0) return ; int id = x - cprefix[x]; cempty[id] = x; } __global__ void kernelFillEmptySlots1(short *cnewtri, int *cprefix, int *cempty, int *ctriangles, int nTris, int newnTris, int offset) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] < 0) return ; int value; if (x < newnTris) value = x; else { value = cempty[cprefix[x] - offset]; for (int i = 0; i < 9; i++) ctriangles[value * 9 + i] = ctriangles[x * 9 + i]; } cprefix[x] = value; } __global__ void kernelFixIndices1(int *ctriangles, int *newindex, int nTris) { __shared__ int ct[WBLOCK * 9]; int tId = threadIdx.x; int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x, x9 = x * 9; int i, id; if (x >= nTris) return ; // Cooperatively read all triangles processed by one block for (i = 0, id = tId; i < 9; i++, id += WBLOCK) ct[id] = ctriangles[x9 + id]; __syncthreads(); if (x + tId < nTris) { i = tId * 9; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; i++; i++; i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); } __syncthreads(); for (i = 0, id = tId; i < 9; i++, id += WBLOCK) ctriangles[x9 + id] = ct[id]; } /******************************************************************** * Insert missing sites caused by overlapping or bad case shifting ********************************************************************/ void cudaMissing() { cutilSafeCall( hipMemcpyToSymbol(constData, hostConst, 13 * sizeof(REAL)) ); // Collect dead triangles, insert new triangles to these slots first. int *cdeadtri, *cmarker, *cprefix; cutilSafeCall( hipMalloc((void **) &cprefix, 2 * nVerts * sizeof(int)) ); cutilSafeCall( hipMalloc((void **) &cmarker, 2 * nVerts * sizeof(int)) ); dim3 block = dim3(WBLOCK); dim3 grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); int lastItem; // Mark all dead triangles as 1 in cmarker array, 0 if not. hipLaunchKernelGGL(( kernelMarkDeadTriangles), dim3(grid), dim3(block) , 0, 0, cmarker, cnewtri, nTris); cutilCheckError(); cutilSafeCall( hipMemcpy(&lastItem, cmarker + nTris - 1, sizeof(int), hipMemcpyDeviceToHost) ); // Use prefix sum to compute the offset prescanArray(cprefix, cmarker, nTris); // Temporary release what we no longer need. cutilSafeCall( hipFree(cmarker)); // Compute the size needed for the list of dead triangles // We also store the unused slots (after nTris but less than 2 * nVerts) int deadCounter; cutilSafeCall( hipMemcpy(&deadCounter, cprefix + nTris - 1, sizeof(int), hipMemcpyDeviceToHost) ); deadCounter += lastItem; int tailTri = nVerts * 2 - nTris; int deadListSize = deadCounter + tailTri; cutilSafeCall( hipMalloc((void **) &cdeadtri, deadListSize * sizeof(int)) ); // Collect these dead triangles into the hipLaunchKernelGGL(( kernelCollectDeadTriangles), dim3(grid), dim3(block) , 0, 0, cdeadtri, cnewtri, cprefix, nTris); cutilCheckError(); //printf("Dead triangles: %i\n", deadCounter); grid = dim3(256); //tailTri / block.x + 1); hipLaunchKernelGGL(( kernelFillIncrement), dim3(grid), dim3(block) , 0, 0, cdeadtri + deadCounter, nTris, tailTri); cutilCheckError(); /******************************************************** * Process missing sites ********************************************************/ int flag, *clocation; BYTE *caffected; cutilSafeCall( hipMalloc((void **) &caffected, nVerts * sizeof(BYTE)) ); cutilSafeCall( hipMalloc((void **) &clocation, nVerts * sizeof(int)) ); cutilSafeCall( hipMalloc((void **) &cmarker, nVerts * sizeof(int)) ); block = dim3(128); int triUsed = 0; do { // cprefix will be used as a marker for voting which insertion can be processed cutilSafeCall( hipMemset(cprefix, 127, nVerts * 2 * sizeof(int)) ); cutilSafeCall( hipMemset(cflag, 0, sizeof(int)) ); // Locate triangles containing the missing sites grid = dim3(STRIPE, nVerts / (STRIPE * block.x) + 1); hipLaunchKernelGGL(( kernelLocateTriangleContainer), dim3(grid), dim3(block) , 0, 0, ctriangles, cvertarr, tvertices, clocation, cprefix, covertices, nVerts, cflag); cutilCheckError(); cutilSafeCall( hipMemcpy(&flag, cflag, sizeof(int), hipMemcpyDeviceToHost) ); if (flag > 0) { cutilSafeCall( hipMemset(cmarker, 0, nVerts * sizeof(int)) ); cutilSafeCall( hipMemset(caffected, 0, nVerts) ); // Determine which missing point insertion can be done in this pass hipLaunchKernelGGL(( kernelPreprocessTriangles), dim3(grid), dim3(block) , 0, 0, ctriangles, cvertarr, clocation, cprefix, tvertices, cnewtri, cmarker, caffected, nVerts, step); // In cmarker we have the number of new triangles // that will be generated by inserting each site (0 or 2). prescanArray(cprefix, cmarker, nVerts); // We remove the container triangle and fix the vertex array. hipLaunchKernelGGL(( kernelFixVertexArrayMissing), dim3(grid), dim3(block) , 0, 0, ctriangles, cvertarr, caffected, cnewtri, nVerts); // We then insert three new triangles for each missing site inserted. hipLaunchKernelGGL(( kernelInsertMissingSites), dim3(grid), dim3(block) , 0, 0, ctriangles, cvertarr, clocation, cmarker, cdeadtri + triUsed, cprefix, cnewtri, nVerts, step); // Update the offset in the dead triangle list cutilSafeCall( hipMemcpy(&lastItem, cmarker + nVerts - 1, sizeof(int), hipMemcpyDeviceToHost) ); int used; cutilSafeCall( hipMemcpy(&used, cprefix + nVerts - 1, sizeof(int), hipMemcpyDeviceToHost) ); triUsed += used + lastItem; int newsize = MAX(nTris, nTris - deadCounter + triUsed); // Update links between the new triangles and the old one. grid = dim3(STRIPE, newsize / (STRIPE * block.x) + 1); hipLaunchKernelGGL(( kernelUpdateMissingTriangleLinks), dim3(grid), dim3(block) , 0, 0, ctriangles, cvertarr, cnewtri, newsize, step); //printf("--------Insert missing sites - step %i ; Inserted %i triangles\n", step, used + lastItem); } step++; } while (flag > 0); // We do not keep track of the dead triangles after this, // because after removing the fake boundary, there would be a lot more, and they // will be recompute and recompact by then. deadCounter -= triUsed; // If we have used up all the dead triangles and more, we update nTris if (deadCounter < 0) nTris -= deadCounter; /******* DONE *******/ /********************************************************* * Compact the triangle list *********************************************************/ cutilSafeCall( hipFree(cmarker)); cutilSafeCall( hipFree(clocation)); cutilSafeCall( hipFree(caffected)); cutilSafeCall( hipFree(cprefix)); cutilSafeCall( hipFree(tvertices)); cutilSafeCall( hipFree(cdeadtri) ); /********************************************************* * Compact the triangle list *********************************************************/ if(deadCounter>0) { int *cvalid, *cprefix1; cutilSafeCall( hipMalloc((void **) &cvalid, 2 * nVerts * sizeof(int)) ); cutilSafeCall( hipMalloc((void **) &cprefix1, 2 * nVerts * sizeof(int)) ); block = dim3(WBLOCK); grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); // Mark the valid triangles in the list hipLaunchKernelGGL(( kernelMarkValidTriangles1), dim3(grid), dim3(block) , 0, 0, cnewtri, cvalid, nTris); cutilCheckError(); // Compute the offset of them in the new list prescanArray(cprefix1, cvalid, nTris); int newnTris, lastitem, offset; cutilSafeCall( hipMemcpy(&newnTris, cprefix1 + nTris - 1, sizeof(int), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(&lastitem, cvalid + nTris - 1, sizeof(int), hipMemcpyDeviceToHost) ); newnTris += lastitem; cutilSafeCall( hipMemcpy(&offset, cprefix1 + newnTris, sizeof(int), hipMemcpyDeviceToHost) ); // printf("nTris = %i, new nTris = %i\n", nTris, newnTris); // Find all empty slots in the list hipLaunchKernelGGL(( kernelCollectEmptySlots1), dim3(grid), dim3(block) , 0, 0, cnewtri, cprefix1, cvalid, nTris); cutilCheckError(); // Move those valid triangles at the end of the list // to the holes in the list. grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); hipLaunchKernelGGL(( kernelFillEmptySlots1), dim3(grid), dim3(block) , 0, 0, cnewtri, cprefix1, cvalid, ctriangles, nTris, newnTris, offset); cutilCheckError(); // Fix the links after the index of our triangles are mixed up grid = dim3(STRIPE, newnTris / (STRIPE * block.x) + 1); hipLaunchKernelGGL(( kernelFixIndices1), dim3(grid), dim3(block) , 0, 0, ctriangles, cprefix1, newnTris); cutilCheckError(); cutilSafeCall( hipFree(cprefix1)); cutilSafeCall( hipFree(cvalid)); nTris = newnTris; } }
9508c21e7a629cfbc76646b0898b3381004199a6.cu
/* Author: Cao Thanh Tung, Qi Meng Date: 15/03/2011 File Name: cudaMissing.cu This file include all CUDA code to perform the inserting missing sites step =============================================================================== Copyright (c) 2011, School of Computing, National University of Singapore. All rights reserved. Project homepage: http://www.comp.nus.edu.sg/~tants/delaunay.html If you use GPU-DT and you like it or have comments on its usefulness etc., we would love to hear from you at <[email protected]>. You may share with us your experience and any possibilities that we may improve the work/code. =============================================================================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the National University of University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission from the National University of Singapore. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma warning(disable: 4311 4312) #include <device_functions.h> #include <stdio.h> #include <string.h> #include "../gpudt.h" #include "cuda.h" #include "common.h" #include "cudaCCW.h" #include "cudaScanLargeArray.h" #define MAXINT 2147483647 /*********************************************************** * Declarations ***********************************************************/ #define WBLOCK 256 #define INSERT_TRIANGLE(v0, v1, v2, tri) \ tmp = tri; \ cnewtri[tmp] = step; \ ctriangles[tmp * 9 + 3] = v2; \ ctriangles[tmp * 9 + 4] = v0; \ ctriangles[tmp * 9 + 5] = v1; \ ctriangles[tmp * 9 + 6] = atomicExch(&cvertarr[v0], (tmp << 2)); \ ctriangles[tmp * 9 + 7] = atomicExch(&cvertarr[v1], (tmp << 2) | 1); \ ctriangles[tmp * 9 + 8] = atomicExch(&cvertarr[v2], (tmp << 2) | 2); \ #define SET_TRIANGLE(vOrg, vDest, vApex, nOrg, nDest, nApex, tri, ori) \ ctriangles[(tri) * 9 + 3 + ((ori) + 1) % 3] = (vOrg); \ ctriangles[(tri) * 9 + 3 + ((ori) + 2) % 3] = (vDest); \ ctriangles[(tri) * 9 + 3 + (ori)] = (vApex); \ ctriangles[(tri) * 9 + 6 + (ori)] = (nOrg); \ ctriangles[(tri) * 9 + 6 + ((ori) + 1) % 3] = (nDest); \ ctriangles[(tri) * 9 + 6 + ((ori) + 2) % 3] = (nApex) #define UPDATE_TEMP_LINK(pTriOri, pNext) \ if ((pTriOri) >= 0) \ ctriangles[decode_tri(pTriOri) * 9 + 6 + decode_ori(pTriOri)] = -(pNext) #define UPDATE_LINK(pTriOri, pNext) \ if ((pTriOri) >= 0) \ ctriangles[decode_tri(pTriOri) * 9 + decode_ori(pTriOri)] = (pNext) /************************************************************** * Exported methods **************************************************************/ extern "C" void cudaMissing(); /************************************************************** * Definitions **************************************************************/ // Decode an oriented triangle. // An oriented triangle consists of 32 bits. // - 30 highest bits represent the triangle index, // - 2 lowest bits represent the orientation (the starting vertex, 0, 1 or 2) #define decode_tri(x) ((x) >> 2) #define decode_ori(x) ((x) & 3) #define encode_tri(tri, ori) (((tri) << 2) | (ori)) #define MAX(x, y) ((x) < (y) ? (y) : (x)) /************************************************************ * Variables and functions shared with the main module ************************************************************/ extern int nTris, nVerts, nPoints,nConstraints; extern int *ctriangles; extern int *cvertarr; extern int *tvertices; extern REAL2 *covertices; extern short *cnewtri; extern int step; extern int *cflag; /******************************************************************* * Fill an array with increasing numbers *******************************************************************/ __global__ void kernelFillIncrement(int *list, int start, int length) { int x = blockIdx.x * blockDim.x + threadIdx.x; int noThreads = blockDim.x * gridDim.x; for (; x < length; x += noThreads) list[x] = start + x; } /******************************************************************** * Collect all dead triangles into a list. ********************************************************************/ __global__ void kernelMarkDeadTriangles(int *cmarker, short *cnewtri, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris) return ; cmarker[x] = (cnewtri[x] >= 0 ? 0 : 1); } __global__ void kernelCollectDeadTriangles(int *cdeadTri, short *cnewtri, int *cmarker, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] >= 0) return ; int id = cmarker[x]; cdeadTri[id] = x; } /******************************************************************** * Locate the triangle which we are gonna insert a missing site to. * - If the anchor of the missing site is not yet inserted, skip * - Locate the triangle and mark it to avoid two insertions into * the same triangle. * - Guarantee that the missing site is not on the boundary due to * huge fake boundary added. ********************************************************************/ __global__ void kernelLocateTriangleContainer(int *ctriangles, int *cvertarr, int *tvertices, int *clocation, int *ctags, REAL2 *covertices, int nVerts, int *cflag) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nVerts || cvertarr[x] != -1) return ; int anchor = tvertices[x]; if (cvertarr[anchor] < 0) { clocation[x] = MAXINT; return ; } REAL2 v = covertices[x]; REAL2 vanchor = covertices[anchor]; REAL ccDest, ccApex, ccOpposite; int pNextTri, pTri, pOri; pNextTri = cvertarr[anchor]; pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); int pDest, pApex, pOrg, pTmp; REAL2 vDest, vApex, vOrg, vTmp; pDest = ctriangles[pTri * 9 + 3 + (pOri + 2) % 3]; // Dest vDest = covertices[pDest]; ccDest = cuda_ccw(vanchor, vDest, v); do { pApex = ctriangles[pTri * 9 + 3 + pOri]; // apex vApex = covertices[pApex]; ccApex = cuda_ccw(vanchor, vApex, v); if (ccDest >= 0.0 && ccApex <= 0.0) // Inside the angle break; pDest = pApex; vDest = vApex; ccDest = ccApex; pNextTri = ctriangles[pTri * 9 + (pOri + 2) % 3]; pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); } while (true); // Found an angle, now look for the actual triangle // containing me. ccOpposite = cuda_ccw(vDest, vApex, v); if (ccOpposite < 0.0) { // It's not right here, need to walk a bit further while (true) { // Get the opposite triangle pNextTri = ctriangles[pTri * 9 + (pOri + 1) % 3]; //if (pNextTri < 0) { // cvertarr[x] = -100; // clocation[x] = encode_tri(pTri, (pOri + 1) % 3); // return ; //} pTri = decode_tri(pNextTri); // Rotate the triangle so that the org is opposite the previous org pOri = (decode_ori(pNextTri) + 2) % 3; pOrg = ctriangles[pTri * 9 + 3 + (pOri + 1) % 3]; vOrg = covertices[pOrg]; pTmp = pDest; pDest = pApex; pApex = pTmp; vTmp = vDest; vDest = vApex; vApex = vTmp; ccDest = cuda_ccw(vOrg, vDest, v); ccApex = cuda_ccw(vApex, vOrg, v); bool moveleft; if (ccDest >= 0.0) if (ccApex >= 0.0) // Found it! break; else moveleft = false; else if (ccApex >= 0.0) moveleft = true; else moveleft = (vOrg.x - v.x) * (vApex.x - vDest.x) + (vOrg.y - v.y) * (vApex.y - vDest.y) > 0.0; if (moveleft) { pOri = (pOri + 2) % 3; pApex = pDest; pDest = pOrg; vApex = vDest; vDest = vOrg; ccOpposite = ccDest; // Orientation is unimportant } else { pOri = (pOri + 1) % 3; pDest = pApex; pApex = pOrg; vDest = vApex; vApex = vOrg; ccOpposite = ccApex; } } } int c0 = 0; if (ccDest == 0.0) c0++; if (ccApex == 0.0) c0++; if (ccOpposite == 0.0) c0++; if (c0 == 0) { // Easiest case, it's right here! clocation[x] = pNextTri + 1; // Mark to indicate that it's a simple case. atomicMin(&ctags[pTri], x); } else if (c0 > 1) { // Duplicate point clocation[x] = pNextTri + 1; cvertarr[x] = -2; return ; } else { // On an edge. // Make sure our 'location' triangle always face toward that edge // (i.e. that edge will be opposite to the origin) if (ccDest == 0.0) pOri = (pOri + 2) % 3; else if (ccApex == 0.0) pOri = (pOri + 1) % 3; clocation[x] = -encode_tri(pTri, pOri) - 1; // To avoid deadlock when 3 sites want to insert on 3 edges, // and they try to mark 3 pairs triangles: (a, b), (b, c), (c, a) atomicMin(&ctags[pTri], x); atomicMin(&ctags[decode_tri(ctriangles[pTri * 9 + (pOri + 1) % 3])], x); } *cflag = 1; } /*************************************************************************** * Determine which missing point insertion can be take place, * mark those triangles that need to be deleted. ***************************************************************************/ __global__ void kernelPreprocessTriangles(int *ctriangles, int *cvertarr, int *clocation, int *ctags, int *tvertices, short *cnewtri, int *cmarker, BYTE *caffected, int nVerts, int step) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nVerts || cvertarr[x] != -1) return ; int pNextTri = clocation[x]; if (pNextTri == MAXINT) return ; int pTri = decode_tri(abs(pNextTri) - 1); int pOri = decode_ori(abs(pNextTri) - 1); int popp, pOppTri, pOppOri; bool success; if (pNextTri >= 0) // one triangle success = (ctags[pTri] == x); else { popp = (ctriangles[pTri * 9 + (pOri + 1) % 3]); pOppTri = decode_tri(popp); pOppOri = decode_ori(popp); success = (ctags[pTri] == x && ctags[pOppTri] == x); } if (success) { cmarker[x] = 2; cnewtri[pTri] = -step; caffected[ctriangles[pTri * 9 + 3]] = 1; caffected[ctriangles[pTri * 9 + 4]] = 1; caffected[ctriangles[pTri * 9 + 5]] = 1; if (pNextTri < 0) { cnewtri[pOppTri] = -step; caffected[ctriangles[pOppTri * 9 + 3 + pOppOri]] = 1; } } } /************************************************************ * Fix the vertex array for those affected sites ************************************************************/ __global__ void kernelFixVertexArrayMissing(int *ctriangles, int *cvertarr, BYTE *caffected, short *cnewtri, int nVerts) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nVerts || caffected[x] != 1) return ; int p = cvertarr[x], pnext = p; // Find the first valid triangle while (pnext >= 0 && cnewtri[decode_tri(pnext)] < 0) pnext = ctriangles[decode_tri(pnext) * 9 + 6 + decode_ori(pnext)]; if (pnext != p) cvertarr[x] = pnext; while (pnext >= 0) { // Find an invalid triangle do { p = pnext; pnext = ctriangles[decode_tri(p) * 9 + 6 + decode_ori(p)]; } while (pnext >= 0 && cnewtri[decode_tri(pnext)] >= 0); if (pnext >= 0) { // Now pnext is deleted, so we fix the link for p. // Find the next valid triangle while (pnext >= 0 && cnewtri[decode_tri(pnext)] < 0) pnext = ctriangles[decode_tri(pnext) * 9 + 6 + decode_ori(pnext)]; ctriangles[decode_tri(p) * 9 + 6 + decode_ori(p)] = pnext; } } } __global__ void kernelInsertMissingSites(int *ctriangles, int *cvertarr, int *clocation, int *cmarker, int *cavailtri, int *cprefix, short *cnewtri, int nVerts, int step) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nVerts || cmarker[x] != 2) return ; int pNextTri = clocation[x]; int pTri = decode_tri(abs(pNextTri) - 1); int pOri = decode_ori(abs(pNextTri) - 1); int pOrg, pDest, pApex, pOpposite; int t1, t2, t3, t4, tmp; int offset = cprefix[x]; t1 = pTri; t2 = cavailtri[offset]; t3 = cavailtri[offset + 1]; pApex = ctriangles[pTri * 9 + 3 + pOri]; pOrg = ctriangles[pTri * 9 + 3 + (pOri + 1) % 3]; pDest = ctriangles[pTri * 9 + 3 + (pOri + 2) % 3]; if (pNextTri >= 0) { // one triangle INSERT_TRIANGLE(pOrg, pDest, x, t1); INSERT_TRIANGLE(pDest, pApex, x, t2); INSERT_TRIANGLE(pApex, pOrg, x, t3); } else { int nDest = ctriangles[pTri * 9 + (pOri + 1) % 3]; int pOppTri = decode_tri(nDest); int pOppOri = decode_ori(nDest); pOpposite = ctriangles[pOppTri * 9 + 3 + pOppOri]; t4 = pOppTri; INSERT_TRIANGLE(pOrg, pDest, x, t1); INSERT_TRIANGLE(pDest, pOpposite, x, t2); INSERT_TRIANGLE(pOpposite, pApex, x, t3); INSERT_TRIANGLE(pApex, pOrg, x, t4); } } /****************************************************************** * Update the links between triangles after adding new triangles ******************************************************************/ __global__ void kernelUpdateMissingTriangleLinks(int *ctriangles, int *cvertarr, short *cnewtri, int nTris, int step) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] != step) return ; int p0, p1, p2, n0 = -1, n1 = -1, n2 = -1; int nCounter, pNextTri, pTri, pOri, pTri9; int x9 = x * 9; p2 = ctriangles[x9 + 3]; p1 = ctriangles[x9 + 5]; p0 = ctriangles[x9 + 4]; nCounter = 0; // orientation 0 // Travel through the list of triangles sharing vertex 0 with this triangle. // In this list we can find at most two triangles sharing edge (p0, p1) and // (p2, p0) with our triangle. pNextTri = cvertarr[p0]; while (pNextTri >= 0 && nCounter < 2) { pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); pTri9 = pTri * 9; if (p2 == ctriangles[pTri9 + 3 + (pOri + 2) % 3]) { // NextDest n2 = pNextTri; ctriangles[pTri9 + pOri] = (x << 2) | 2; nCounter++; } if (p1 == ctriangles[pTri9 + 3 + pOri]) { // NextApex n0 = (pTri << 2) | ((pOri + 2) % 3); ctriangles[pTri9 + (pOri + 2) % 3] = (x << 2); nCounter++; } pNextTri = ctriangles[pTri9 + 6 + pOri]; } // orientation 1 // Find the triangle with edge (p1, p2) pNextTri = cvertarr[p1]; while (pNextTri >= 0) { pTri = decode_tri(pNextTri); pOri = decode_ori(pNextTri); pTri9 = pTri * 9; if (p2 == ctriangles[pTri9 + 3 + pOri]) { // NextApex n1 = (pTri << 2) | ((pOri + 2) % 3); ctriangles[pTri9 + (pOri + 2) % 3] = (x << 2) | 1; break ; } pNextTri = ctriangles[pTri9 + 6 + pOri]; } ctriangles[x9 + 0] = n0; ctriangles[x9 + 1] = n1; ctriangles[x9 + 2] = n2; } /******************************************************************** * Fix vertex array ********************************************************************/ __global__ void kernelFixVertArray_Missing(int *ctriangles,int nTris, int *cvertarr) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris) return ; int v0 = ctriangles[x * 9 + 4]; int v1 = ctriangles[x * 9 + 5]; int v2 = ctriangles[x * 9 + 3]; ctriangles[x * 9 + 6] = atomicExch(&cvertarr[v0], (x << 2)); ctriangles[x * 9 + 7] = atomicExch(&cvertarr[v1], (x << 2) | 1); ctriangles[x * 9 + 8] = atomicExch(&cvertarr[v2], (x << 2) | 2); } __global__ void kernelMarkValidTriangles1(short *cnewtri, int *cvalid, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris) return ; cvalid[x] = (cnewtri[x] >= 0) ? 1 : 0; } __global__ void kernelCollectEmptySlots1(short *cnewtri, int *cprefix, int *cempty, int nTris) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] >= 0) return ; int id = x - cprefix[x]; cempty[id] = x; } __global__ void kernelFillEmptySlots1(short *cnewtri, int *cprefix, int *cempty, int *ctriangles, int nTris, int newnTris, int offset) { int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if (x >= nTris || cnewtri[x] < 0) return ; int value; if (x < newnTris) value = x; else { value = cempty[cprefix[x] - offset]; for (int i = 0; i < 9; i++) ctriangles[value * 9 + i] = ctriangles[x * 9 + i]; } cprefix[x] = value; } __global__ void kernelFixIndices1(int *ctriangles, int *newindex, int nTris) { __shared__ int ct[WBLOCK * 9]; int tId = threadIdx.x; int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x, x9 = x * 9; int i, id; if (x >= nTris) return ; // Cooperatively read all triangles processed by one block for (i = 0, id = tId; i < 9; i++, id += WBLOCK) ct[id] = ctriangles[x9 + id]; __syncthreads(); if (x + tId < nTris) { i = tId * 9; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; i++; i++; i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); i++; if (ct[i] >= 0) ct[i] = encode_tri(newindex[decode_tri(ct[i])], decode_ori(ct[i])); } __syncthreads(); for (i = 0, id = tId; i < 9; i++, id += WBLOCK) ctriangles[x9 + id] = ct[id]; } /******************************************************************** * Insert missing sites caused by overlapping or bad case shifting ********************************************************************/ void cudaMissing() { cutilSafeCall( cudaMemcpyToSymbol(constData, hostConst, 13 * sizeof(REAL)) ); // Collect dead triangles, insert new triangles to these slots first. int *cdeadtri, *cmarker, *cprefix; cutilSafeCall( cudaMalloc((void **) &cprefix, 2 * nVerts * sizeof(int)) ); cutilSafeCall( cudaMalloc((void **) &cmarker, 2 * nVerts * sizeof(int)) ); dim3 block = dim3(WBLOCK); dim3 grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); int lastItem; // Mark all dead triangles as 1 in cmarker array, 0 if not. kernelMarkDeadTriangles<<< grid, block >>>(cmarker, cnewtri, nTris); cutilCheckError(); cutilSafeCall( cudaMemcpy(&lastItem, cmarker + nTris - 1, sizeof(int), cudaMemcpyDeviceToHost) ); // Use prefix sum to compute the offset prescanArray(cprefix, cmarker, nTris); // Temporary release what we no longer need. cutilSafeCall( cudaFree(cmarker)); // Compute the size needed for the list of dead triangles // We also store the unused slots (after nTris but less than 2 * nVerts) int deadCounter; cutilSafeCall( cudaMemcpy(&deadCounter, cprefix + nTris - 1, sizeof(int), cudaMemcpyDeviceToHost) ); deadCounter += lastItem; int tailTri = nVerts * 2 - nTris; int deadListSize = deadCounter + tailTri; cutilSafeCall( cudaMalloc((void **) &cdeadtri, deadListSize * sizeof(int)) ); // Collect these dead triangles into the kernelCollectDeadTriangles<<< grid, block >>>(cdeadtri, cnewtri, cprefix, nTris); cutilCheckError(); //printf("Dead triangles: %i\n", deadCounter); grid = dim3(256); //tailTri / block.x + 1); kernelFillIncrement<<< grid, block >>>(cdeadtri + deadCounter, nTris, tailTri); cutilCheckError(); /******************************************************** * Process missing sites ********************************************************/ int flag, *clocation; BYTE *caffected; cutilSafeCall( cudaMalloc((void **) &caffected, nVerts * sizeof(BYTE)) ); cutilSafeCall( cudaMalloc((void **) &clocation, nVerts * sizeof(int)) ); cutilSafeCall( cudaMalloc((void **) &cmarker, nVerts * sizeof(int)) ); block = dim3(128); int triUsed = 0; do { // cprefix will be used as a marker for voting which insertion can be processed cutilSafeCall( cudaMemset(cprefix, 127, nVerts * 2 * sizeof(int)) ); cutilSafeCall( cudaMemset(cflag, 0, sizeof(int)) ); // Locate triangles containing the missing sites grid = dim3(STRIPE, nVerts / (STRIPE * block.x) + 1); kernelLocateTriangleContainer<<< grid, block >>>(ctriangles, cvertarr, tvertices, clocation, cprefix, covertices, nVerts, cflag); cutilCheckError(); cutilSafeCall( cudaMemcpy(&flag, cflag, sizeof(int), cudaMemcpyDeviceToHost) ); if (flag > 0) { cutilSafeCall( cudaMemset(cmarker, 0, nVerts * sizeof(int)) ); cutilSafeCall( cudaMemset(caffected, 0, nVerts) ); // Determine which missing point insertion can be done in this pass kernelPreprocessTriangles<<< grid, block >>>(ctriangles, cvertarr, clocation, cprefix, tvertices, cnewtri, cmarker, caffected, nVerts, step); // In cmarker we have the number of new triangles // that will be generated by inserting each site (0 or 2). prescanArray(cprefix, cmarker, nVerts); // We remove the container triangle and fix the vertex array. kernelFixVertexArrayMissing<<< grid, block >>>(ctriangles, cvertarr, caffected, cnewtri, nVerts); // We then insert three new triangles for each missing site inserted. kernelInsertMissingSites<<< grid, block >>>(ctriangles, cvertarr, clocation, cmarker, cdeadtri + triUsed, cprefix, cnewtri, nVerts, step); // Update the offset in the dead triangle list cutilSafeCall( cudaMemcpy(&lastItem, cmarker + nVerts - 1, sizeof(int), cudaMemcpyDeviceToHost) ); int used; cutilSafeCall( cudaMemcpy(&used, cprefix + nVerts - 1, sizeof(int), cudaMemcpyDeviceToHost) ); triUsed += used + lastItem; int newsize = MAX(nTris, nTris - deadCounter + triUsed); // Update links between the new triangles and the old one. grid = dim3(STRIPE, newsize / (STRIPE * block.x) + 1); kernelUpdateMissingTriangleLinks<<< grid, block >>>(ctriangles, cvertarr, cnewtri, newsize, step); //printf("--------Insert missing sites - step %i ; Inserted %i triangles\n", step, used + lastItem); } step++; } while (flag > 0); // We do not keep track of the dead triangles after this, // because after removing the fake boundary, there would be a lot more, and they // will be recompute and recompact by then. deadCounter -= triUsed; // If we have used up all the dead triangles and more, we update nTris if (deadCounter < 0) nTris -= deadCounter; /******* DONE *******/ /********************************************************* * Compact the triangle list *********************************************************/ cutilSafeCall( cudaFree(cmarker)); cutilSafeCall( cudaFree(clocation)); cutilSafeCall( cudaFree(caffected)); cutilSafeCall( cudaFree(cprefix)); cutilSafeCall( cudaFree(tvertices)); cutilSafeCall( cudaFree(cdeadtri) ); /********************************************************* * Compact the triangle list *********************************************************/ if(deadCounter>0) { int *cvalid, *cprefix1; cutilSafeCall( cudaMalloc((void **) &cvalid, 2 * nVerts * sizeof(int)) ); cutilSafeCall( cudaMalloc((void **) &cprefix1, 2 * nVerts * sizeof(int)) ); block = dim3(WBLOCK); grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); // Mark the valid triangles in the list kernelMarkValidTriangles1<<< grid, block >>>(cnewtri, cvalid, nTris); cutilCheckError(); // Compute the offset of them in the new list prescanArray(cprefix1, cvalid, nTris); int newnTris, lastitem, offset; cutilSafeCall( cudaMemcpy(&newnTris, cprefix1 + nTris - 1, sizeof(int), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(&lastitem, cvalid + nTris - 1, sizeof(int), cudaMemcpyDeviceToHost) ); newnTris += lastitem; cutilSafeCall( cudaMemcpy(&offset, cprefix1 + newnTris, sizeof(int), cudaMemcpyDeviceToHost) ); // printf("nTris = %i, new nTris = %i\n", nTris, newnTris); // Find all empty slots in the list kernelCollectEmptySlots1<<< grid, block >>>(cnewtri, cprefix1, cvalid, nTris); cutilCheckError(); // Move those valid triangles at the end of the list // to the holes in the list. grid = dim3(STRIPE, nTris / (STRIPE * block.x) + 1); kernelFillEmptySlots1<<< grid, block >>>(cnewtri, cprefix1, cvalid, ctriangles, nTris, newnTris, offset); cutilCheckError(); // Fix the links after the index of our triangles are mixed up grid = dim3(STRIPE, newnTris / (STRIPE * block.x) + 1); kernelFixIndices1<<< grid, block >>>(ctriangles, cprefix1, newnTris); cutilCheckError(); cutilSafeCall( cudaFree(cprefix1)); cutilSafeCall( cudaFree(cvalid)); nTris = newnTris; } }
4f214709d69906b0e7bbe3c5d2224e297b8b1a05.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/zmergeblockkrylov.cu, normal z -> c, Tue Aug 30 09:38:47 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 16 #define PRECISION_c // These routines merge multiple kernels from qmr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_cmergeblockkrylov_kernel( int num_rows, int num_cols, magmaFloatComplex *alpha, magmaFloatComplex *p, magmaFloatComplex *x ) { int num_vecs = num_cols; int row = blockIdx.x * blockDim.x + threadIdx.x; int vec = blockIdx.y; if ( row<num_rows ) { magmaFloatComplex val = x[ row + vec * num_rows ]; for( int j=0; j<num_vecs; j++ ){ magmaFloatComplex lalpha = alpha[ j * num_vecs + vec ]; magmaFloatComplex xval = p[ row + j * num_rows ]; val += lalpha * xval; } x[ row + vec * num_rows ] = val; } } /** Purpose ------- Mergels multiple operations into one kernel: v = y / rho y = y / rho w = wt / psi z = z / psi @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex_ptr matrix containing all SKP @param[in] p magmaFloatComplex_ptr search directions @param[in,out] x magmaFloatComplex_ptr approximation vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cmergeblockkrylov( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex_ptr alpha, magmaFloatComplex_ptr p, magmaFloatComplex_ptr x, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE, num_cols ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_cmergeblockkrylov_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, p, x ); return MAGMA_SUCCESS; }
4f214709d69906b0e7bbe3c5d2224e297b8b1a05.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/zmergeblockkrylov.cu, normal z -> c, Tue Aug 30 09:38:47 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 16 #define PRECISION_c // These routines merge multiple kernels from qmr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_cmergeblockkrylov_kernel( int num_rows, int num_cols, magmaFloatComplex *alpha, magmaFloatComplex *p, magmaFloatComplex *x ) { int num_vecs = num_cols; int row = blockIdx.x * blockDim.x + threadIdx.x; int vec = blockIdx.y; if ( row<num_rows ) { magmaFloatComplex val = x[ row + vec * num_rows ]; for( int j=0; j<num_vecs; j++ ){ magmaFloatComplex lalpha = alpha[ j * num_vecs + vec ]; magmaFloatComplex xval = p[ row + j * num_rows ]; val += lalpha * xval; } x[ row + vec * num_rows ] = val; } } /** Purpose ------- Mergels multiple operations into one kernel: v = y / rho y = y / rho w = wt / psi z = z / psi @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex_ptr matrix containing all SKP @param[in] p magmaFloatComplex_ptr search directions @param[in,out] x magmaFloatComplex_ptr approximation vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cmergeblockkrylov( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex_ptr alpha, magmaFloatComplex_ptr p, magmaFloatComplex_ptr x, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE, num_cols ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_cmergeblockkrylov_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>> ( num_rows, num_cols, alpha, p, x ); return MAGMA_SUCCESS; }
5a8c5c9d8805cd0bbe27d5e4e49694440453201d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cudaCompress/util/Quantize.h> #include <cudaCompress/tools/Operator.h> #include <cudaCompress/tools/Functor.h> #include <cudaCompress/cudaUtil.h> #include <cudaCompress/InstanceImpl.h> #include <cudaCompress/reduce/reduce_app.cui> #include "QuantizeKernels.cui" namespace cudaCompress { namespace util { void floatToByte2D(byte* dpQuant, uint channelCount, uint channel, const float* dpData, uint sizeX, uint sizeY, uint rowPitchSrc) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; dim3 blockSize(64, 4); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; dim3 blockCount(blockCountX, blockCountY); hipLaunchKernelGGL(( quantize2Dkernel), dim3(blockCount), dim3(blockSize), 0, 0, dpQuant + channel, channelCount, dpData, sizeX, sizeY, rowPitchSrc); cudaCheckMsg("quantize2Dkernel execution failed"); } void byteToFloat2D(float* dpData, const byte* dpQuant, uint channelCount, uint channel, uint sizeX, uint sizeY, uint rowPitchDst) { if(rowPitchDst == 0) rowPitchDst = sizeX; dim3 blockSize(64, 4); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; dim3 blockCount(blockCountX, blockCountY); hipLaunchKernelGGL(( unquantize2Dkernel), dim3(blockCount), dim3(blockSize), 0, 0, dpData, dpQuant + channel, channelCount, sizeX, sizeY, rowPitchDst); cudaCheckMsg("unquantize2Dkernel execution failed"); } void symbolize(ushort* dpSymbols, const short* dpData, uint sizeX, uint sizeY, uint sizeZ, uint rowPitchSrc, uint slicePitchSrc) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; if(slicePitchSrc == 0) slicePitchSrc = rowPitchSrc * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); hipLaunchKernelGGL(( symbolizeKernel), dim3(blockCount), dim3(blockSize), 0, 0, dpSymbols, dpData, sizeX, sizeY, sizeZ, rowPitchSrc, slicePitchSrc); cudaCheckMsg("symbolizeKernel execution failed"); } void unsymbolize(short* dpData, const ushort* dpSymbols, uint sizeX, uint sizeY, uint sizeZ, uint rowPitchDst, uint slicePitchDst) { if(rowPitchDst == 0) rowPitchDst = sizeX; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); hipLaunchKernelGGL(( unsymbolizeKernel), dim3(blockCount), dim3(blockSize), 0, 0, dpData, dpSymbols, sizeX, sizeY, sizeZ, rowPitchDst, slicePitchDst); cudaCheckMsg("unsymbolizeKernel execution failed"); } template<typename Symbol> void quantizeToSymbols(Symbol* dpQuant, const float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; if(slicePitchSrc == 0) slicePitchSrc = rowPitchSrc * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); #define CASE(Type) \ case Type: \ hipLaunchKernelGGL(( quantizeToSymbolsKernel<Symbol, Type>) \ , dim3(blockCount), dim3(blockSize), 0, 0, \ dpQuant, dpData, 1.0f / quantizationStep, sizeX, sizeY, sizeZ, rowPitchSrc, slicePitchSrc); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("quantizeToSymbolsKernel execution failed"); } template<typename Symbol> void quantizeToSymbolsRoundtrip(Symbol* dpQuant, float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; if(slicePitchSrc == 0) slicePitchSrc = rowPitchSrc * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); #define CASE(Type) \ case Type: \ hipLaunchKernelGGL(( quantizeToSymbolsRoundtripKernel<Symbol, Type>) \ , dim3(blockCount), dim3(blockSize), 0, 0, \ dpQuant, dpData, quantizationStep, 1.0f / quantizationStep, sizeX, sizeY, sizeZ, rowPitchSrc, slicePitchSrc); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("quantizeToSymbolsRoundtripKernel execution failed"); } template<typename Symbol> void unquantizeFromSymbols(float* dpData, const Symbol* dpQuant, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchDst, uint slicePitchDst, EQuantizeType quantType) { if(rowPitchDst == 0) rowPitchDst = sizeX; if(slicePitchDst == 0) slicePitchDst = rowPitchDst * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); #define CASE(Type) \ case Type: \ hipLaunchKernelGGL(( unquantizeFromSymbolsKernel<Symbol, Type>) \ , dim3(blockCount), dim3(blockSize), 0, 0, \ dpData, dpQuant, quantizationStep, sizeX, sizeY, sizeZ, rowPitchDst, slicePitchDst); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("unquantizeFromSymbolsKernel execution failed"); } void quantizeToSymbols(ushort* dpSymbols, const float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { quantizeToSymbols<ushort>(dpSymbols, dpData, sizeX, sizeY, sizeZ, quantizationStep, rowPitchSrc, slicePitchSrc, quantType); } void quantizeToSymbolsRoundtrip(ushort* dpSymbols, float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { quantizeToSymbolsRoundtrip<ushort>(dpSymbols, dpData, sizeX, sizeY, sizeZ, quantizationStep, rowPitchSrc, slicePitchSrc, quantType); } void unquantizeFromSymbols(float* dpData, const ushort* dpSymbols, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchDst, uint slicePitchDst, EQuantizeType quantType) { unquantizeFromSymbols<ushort>(dpData, dpSymbols, sizeX, sizeY, sizeZ, quantizationStep, rowPitchDst, slicePitchDst, quantType); } void quantizeToSymbols(uint* dpSymbols, const float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { quantizeToSymbols<uint>(dpSymbols, dpData, sizeX, sizeY, sizeZ, quantizationStep, rowPitchSrc, slicePitchSrc, quantType); } void quantizeToSymbolsRoundtrip(uint* dpSymbols, float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { quantizeToSymbolsRoundtrip<uint>(dpSymbols, dpData, sizeX, sizeY, sizeZ, quantizationStep, rowPitchSrc, slicePitchSrc, quantType); } void unquantizeFromSymbols(float* dpData, const uint* dpSymbols, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchDst, uint slicePitchDst, EQuantizeType quantType) { unquantizeFromSymbols<uint>(dpData, dpSymbols, sizeX, sizeY, sizeZ, quantizationStep, rowPitchDst, slicePitchDst, quantType); } void quantizeToSymbols2D(ushort* dpSymbols, const float* dpData, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchSrc, EQuantizeType quantType) { quantizeToSymbols(dpSymbols, dpData, sizeX, sizeY, 1, quantizationStep, rowPitchSrc, 0, quantType); } void quantizeToSymbolsRoundtrip2D(ushort* dpSymbols, float* dpData, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchSrc, EQuantizeType quantType) { quantizeToSymbolsRoundtrip(dpSymbols, dpData, sizeX, sizeY, 1, quantizationStep, rowPitchSrc, 0, quantType); } void unquantizeFromSymbols2D(float* dpData, const ushort* dpSymbols, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchDst, EQuantizeType quantType) { unquantizeFromSymbols(dpData, dpSymbols, sizeX, sizeY, 1, quantizationStep, rowPitchDst, 0, quantType); } void quantizeToSymbols2D(uint* dpSymbols, const float* dpData, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchSrc, EQuantizeType quantType) { quantizeToSymbols(dpSymbols, dpData, sizeX, sizeY, 1, quantizationStep, rowPitchSrc, 0, quantType); } void quantizeToSymbolsRoundtrip2D(uint* dpSymbols, float* dpData, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchSrc, EQuantizeType quantType) { quantizeToSymbolsRoundtrip(dpSymbols, dpData, sizeX, sizeY, 1, quantizationStep, rowPitchSrc, 0, quantType); } void unquantizeFromSymbols2D(float* dpData, const uint* dpSymbols, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchDst, EQuantizeType quantType) { unquantizeFromSymbols(dpData, dpSymbols, sizeX, sizeY, 1, quantizationStep, rowPitchDst, 0, quantType); } void quantizeToShort(short* dpQuant, const float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; if(slicePitchSrc == 0) slicePitchSrc = rowPitchSrc * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); #define CASE(Type) \ case Type: \ hipLaunchKernelGGL(( quantizeToShortKernel<Type>) \ , dim3(blockCount), dim3(blockSize), 0, 0, \ dpQuant, dpData, 1.0f / quantizationStep, sizeX, sizeY, sizeZ, rowPitchSrc, slicePitchSrc); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("quantizeToShortKernel execution failed"); } void unquantizeFromShort(float* dpData, const short* dpQuant, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchDst, uint slicePitchDst, EQuantizeType quantType) { if(rowPitchDst == 0) rowPitchDst = sizeX; if(slicePitchDst == 0) slicePitchDst = rowPitchDst * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); #define CASE(Type) \ case Type: \ hipLaunchKernelGGL(( unquantizeFromShortKernel<Type>) \ , dim3(blockCount), dim3(blockSize), 0, 0, \ dpData, dpQuant, quantizationStep, sizeX, sizeY, sizeZ, rowPitchDst, slicePitchDst); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("unquantizeFromShortKernel execution failed"); } void quantizeDifferenceToSymbols2D(ushort* dpSymbols, const float* dpData, float quantizationStep, const byte* dpReference, uint channelCount, uint channel, uint sizeX, uint sizeY, uint rowPitchSrc, EQuantizeType quantType) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; dim3 blockSize(64, 4); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; dim3 blockCount(blockCountX, blockCountY); #define CASE(Type) \ case Type: \ hipLaunchKernelGGL(( quantizeDifferenceToSymbols2Dkernel<Type>) \ , dim3(blockCount), dim3(blockSize), 0, 0, \ dpSymbols, dpData, 1.0f / quantizationStep, dpReference + channel, channelCount, sizeX, sizeY, rowPitchSrc); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("quantizeDifferenceToSymbols2Dkernel execution failed"); } void quantizeDifferenceToSymbolsRoundtrip2D(ushort* dpSymbols, float* dpData, float quantizationStep, const byte* dpReference, uint channelCount, uint channel, uint sizeX, uint sizeY, uint rowPitchSrc, EQuantizeType quantType) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; dim3 blockSize(64, 4); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; dim3 blockCount(blockCountX, blockCountY); #define CASE(Type) \ case Type: \ hipLaunchKernelGGL(( quantizeDifferenceToSymbolsRoundtrip2Dkernel<Type>) \ , dim3(blockCount), dim3(blockSize), 0, 0, \ dpSymbols, dpData, quantizationStep, 1.0f / quantizationStep, dpReference + channel, channelCount, sizeX, sizeY, rowPitchSrc); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("quantizeDifferenceToSymbolsRoundtrip2Dkernel execution failed"); } void unquantizeDifferenceFromSymbols2D(float* dpData, const ushort* dpSymbols, float quantizationStep, const byte* dpReference, uint channelCount, uint channel, uint sizeX, uint sizeY, uint rowPitchDst, EQuantizeType quantType, hipStream_t stream) { if(rowPitchDst == 0) rowPitchDst = sizeX; dim3 blockSize(64, 4); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; dim3 blockCount(blockCountX, blockCountY); #define CASE(Type) \ case Type: \ hipLaunchKernelGGL(( unquantizeDifferenceFromSymbols2Dkernel<Type>) \ , dim3(blockCount), dim3(blockSize), 0, stream, \ dpData, dpSymbols, quantizationStep, dpReference + channel, channelCount, sizeX, sizeY, rowPitchDst); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("unquantizeDifferenceFromSymbols2Dkernel execution failed"); } void getMaxAbs(Instance* pInstance, const float* dpImage, uint elemCount, float* dpValMax) { if(dpValMax != NULL) { reduceArray<float, OperatorMax<float>, FunctorAbs<float> >(dpValMax, dpImage, elemCount, pInstance->m_pReducePlan); cudaCheckMsg("getVolumeFloatMaxAbs: Error in reduceArray"); } } } }
5a8c5c9d8805cd0bbe27d5e4e49694440453201d.cu
#include <cudaCompress/util/Quantize.h> #include <cudaCompress/tools/Operator.h> #include <cudaCompress/tools/Functor.h> #include <cudaCompress/cudaUtil.h> #include <cudaCompress/InstanceImpl.h> #include <cudaCompress/reduce/reduce_app.cui> #include "QuantizeKernels.cui" namespace cudaCompress { namespace util { void floatToByte2D(byte* dpQuant, uint channelCount, uint channel, const float* dpData, uint sizeX, uint sizeY, uint rowPitchSrc) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; dim3 blockSize(64, 4); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; dim3 blockCount(blockCountX, blockCountY); quantize2Dkernel<<<blockCount, blockSize>>>(dpQuant + channel, channelCount, dpData, sizeX, sizeY, rowPitchSrc); cudaCheckMsg("quantize2Dkernel execution failed"); } void byteToFloat2D(float* dpData, const byte* dpQuant, uint channelCount, uint channel, uint sizeX, uint sizeY, uint rowPitchDst) { if(rowPitchDst == 0) rowPitchDst = sizeX; dim3 blockSize(64, 4); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; dim3 blockCount(blockCountX, blockCountY); unquantize2Dkernel<<<blockCount, blockSize>>>(dpData, dpQuant + channel, channelCount, sizeX, sizeY, rowPitchDst); cudaCheckMsg("unquantize2Dkernel execution failed"); } void symbolize(ushort* dpSymbols, const short* dpData, uint sizeX, uint sizeY, uint sizeZ, uint rowPitchSrc, uint slicePitchSrc) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; if(slicePitchSrc == 0) slicePitchSrc = rowPitchSrc * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); symbolizeKernel<<<blockCount, blockSize>>>(dpSymbols, dpData, sizeX, sizeY, sizeZ, rowPitchSrc, slicePitchSrc); cudaCheckMsg("symbolizeKernel execution failed"); } void unsymbolize(short* dpData, const ushort* dpSymbols, uint sizeX, uint sizeY, uint sizeZ, uint rowPitchDst, uint slicePitchDst) { if(rowPitchDst == 0) rowPitchDst = sizeX; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); unsymbolizeKernel<<<blockCount, blockSize>>>(dpData, dpSymbols, sizeX, sizeY, sizeZ, rowPitchDst, slicePitchDst); cudaCheckMsg("unsymbolizeKernel execution failed"); } template<typename Symbol> void quantizeToSymbols(Symbol* dpQuant, const float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; if(slicePitchSrc == 0) slicePitchSrc = rowPitchSrc * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); #define CASE(Type) \ case Type: \ quantizeToSymbolsKernel<Symbol, Type> \ <<<blockCount, blockSize>>> \ (dpQuant, dpData, 1.0f / quantizationStep, sizeX, sizeY, sizeZ, rowPitchSrc, slicePitchSrc); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("quantizeToSymbolsKernel execution failed"); } template<typename Symbol> void quantizeToSymbolsRoundtrip(Symbol* dpQuant, float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; if(slicePitchSrc == 0) slicePitchSrc = rowPitchSrc * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); #define CASE(Type) \ case Type: \ quantizeToSymbolsRoundtripKernel<Symbol, Type> \ <<<blockCount, blockSize>>> \ (dpQuant, dpData, quantizationStep, 1.0f / quantizationStep, sizeX, sizeY, sizeZ, rowPitchSrc, slicePitchSrc); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("quantizeToSymbolsRoundtripKernel execution failed"); } template<typename Symbol> void unquantizeFromSymbols(float* dpData, const Symbol* dpQuant, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchDst, uint slicePitchDst, EQuantizeType quantType) { if(rowPitchDst == 0) rowPitchDst = sizeX; if(slicePitchDst == 0) slicePitchDst = rowPitchDst * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); #define CASE(Type) \ case Type: \ unquantizeFromSymbolsKernel<Symbol, Type> \ <<<blockCount, blockSize>>> \ (dpData, dpQuant, quantizationStep, sizeX, sizeY, sizeZ, rowPitchDst, slicePitchDst); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("unquantizeFromSymbolsKernel execution failed"); } void quantizeToSymbols(ushort* dpSymbols, const float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { quantizeToSymbols<ushort>(dpSymbols, dpData, sizeX, sizeY, sizeZ, quantizationStep, rowPitchSrc, slicePitchSrc, quantType); } void quantizeToSymbolsRoundtrip(ushort* dpSymbols, float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { quantizeToSymbolsRoundtrip<ushort>(dpSymbols, dpData, sizeX, sizeY, sizeZ, quantizationStep, rowPitchSrc, slicePitchSrc, quantType); } void unquantizeFromSymbols(float* dpData, const ushort* dpSymbols, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchDst, uint slicePitchDst, EQuantizeType quantType) { unquantizeFromSymbols<ushort>(dpData, dpSymbols, sizeX, sizeY, sizeZ, quantizationStep, rowPitchDst, slicePitchDst, quantType); } void quantizeToSymbols(uint* dpSymbols, const float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { quantizeToSymbols<uint>(dpSymbols, dpData, sizeX, sizeY, sizeZ, quantizationStep, rowPitchSrc, slicePitchSrc, quantType); } void quantizeToSymbolsRoundtrip(uint* dpSymbols, float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { quantizeToSymbolsRoundtrip<uint>(dpSymbols, dpData, sizeX, sizeY, sizeZ, quantizationStep, rowPitchSrc, slicePitchSrc, quantType); } void unquantizeFromSymbols(float* dpData, const uint* dpSymbols, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchDst, uint slicePitchDst, EQuantizeType quantType) { unquantizeFromSymbols<uint>(dpData, dpSymbols, sizeX, sizeY, sizeZ, quantizationStep, rowPitchDst, slicePitchDst, quantType); } void quantizeToSymbols2D(ushort* dpSymbols, const float* dpData, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchSrc, EQuantizeType quantType) { quantizeToSymbols(dpSymbols, dpData, sizeX, sizeY, 1, quantizationStep, rowPitchSrc, 0, quantType); } void quantizeToSymbolsRoundtrip2D(ushort* dpSymbols, float* dpData, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchSrc, EQuantizeType quantType) { quantizeToSymbolsRoundtrip(dpSymbols, dpData, sizeX, sizeY, 1, quantizationStep, rowPitchSrc, 0, quantType); } void unquantizeFromSymbols2D(float* dpData, const ushort* dpSymbols, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchDst, EQuantizeType quantType) { unquantizeFromSymbols(dpData, dpSymbols, sizeX, sizeY, 1, quantizationStep, rowPitchDst, 0, quantType); } void quantizeToSymbols2D(uint* dpSymbols, const float* dpData, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchSrc, EQuantizeType quantType) { quantizeToSymbols(dpSymbols, dpData, sizeX, sizeY, 1, quantizationStep, rowPitchSrc, 0, quantType); } void quantizeToSymbolsRoundtrip2D(uint* dpSymbols, float* dpData, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchSrc, EQuantizeType quantType) { quantizeToSymbolsRoundtrip(dpSymbols, dpData, sizeX, sizeY, 1, quantizationStep, rowPitchSrc, 0, quantType); } void unquantizeFromSymbols2D(float* dpData, const uint* dpSymbols, uint sizeX, uint sizeY, float quantizationStep, uint rowPitchDst, EQuantizeType quantType) { unquantizeFromSymbols(dpData, dpSymbols, sizeX, sizeY, 1, quantizationStep, rowPitchDst, 0, quantType); } void quantizeToShort(short* dpQuant, const float* dpData, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchSrc, uint slicePitchSrc, EQuantizeType quantType) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; if(slicePitchSrc == 0) slicePitchSrc = rowPitchSrc * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); #define CASE(Type) \ case Type: \ quantizeToShortKernel<Type> \ <<<blockCount, blockSize>>> \ (dpQuant, dpData, 1.0f / quantizationStep, sizeX, sizeY, sizeZ, rowPitchSrc, slicePitchSrc); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("quantizeToShortKernel execution failed"); } void unquantizeFromShort(float* dpData, const short* dpQuant, uint sizeX, uint sizeY, uint sizeZ, float quantizationStep, uint rowPitchDst, uint slicePitchDst, EQuantizeType quantType) { if(rowPitchDst == 0) rowPitchDst = sizeX; if(slicePitchDst == 0) slicePitchDst = rowPitchDst * sizeY; dim3 blockSize(64, 4, 1); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; uint blockCountZ = (sizeZ + blockSize.z - 1) / blockSize.z; dim3 blockCount(blockCountX, blockCountY, blockCountZ); #define CASE(Type) \ case Type: \ unquantizeFromShortKernel<Type> \ <<<blockCount, blockSize>>> \ (dpData, dpQuant, quantizationStep, sizeX, sizeY, sizeZ, rowPitchDst, slicePitchDst); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("unquantizeFromShortKernel execution failed"); } void quantizeDifferenceToSymbols2D(ushort* dpSymbols, const float* dpData, float quantizationStep, const byte* dpReference, uint channelCount, uint channel, uint sizeX, uint sizeY, uint rowPitchSrc, EQuantizeType quantType) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; dim3 blockSize(64, 4); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; dim3 blockCount(blockCountX, blockCountY); #define CASE(Type) \ case Type: \ quantizeDifferenceToSymbols2Dkernel<Type> \ <<<blockCount, blockSize>>> \ (dpSymbols, dpData, 1.0f / quantizationStep, dpReference + channel, channelCount, sizeX, sizeY, rowPitchSrc); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("quantizeDifferenceToSymbols2Dkernel execution failed"); } void quantizeDifferenceToSymbolsRoundtrip2D(ushort* dpSymbols, float* dpData, float quantizationStep, const byte* dpReference, uint channelCount, uint channel, uint sizeX, uint sizeY, uint rowPitchSrc, EQuantizeType quantType) { if(rowPitchSrc == 0) rowPitchSrc = sizeX; dim3 blockSize(64, 4); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; dim3 blockCount(blockCountX, blockCountY); #define CASE(Type) \ case Type: \ quantizeDifferenceToSymbolsRoundtrip2Dkernel<Type> \ <<<blockCount, blockSize>>> \ (dpSymbols, dpData, quantizationStep, 1.0f / quantizationStep, dpReference + channel, channelCount, sizeX, sizeY, rowPitchSrc); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("quantizeDifferenceToSymbolsRoundtrip2Dkernel execution failed"); } void unquantizeDifferenceFromSymbols2D(float* dpData, const ushort* dpSymbols, float quantizationStep, const byte* dpReference, uint channelCount, uint channel, uint sizeX, uint sizeY, uint rowPitchDst, EQuantizeType quantType, cudaStream_t stream) { if(rowPitchDst == 0) rowPitchDst = sizeX; dim3 blockSize(64, 4); uint blockCountX = (sizeX + blockSize.x - 1) / blockSize.x; uint blockCountY = (sizeY + blockSize.y - 1) / blockSize.y; dim3 blockCount(blockCountX, blockCountY); #define CASE(Type) \ case Type: \ unquantizeDifferenceFromSymbols2Dkernel<Type> \ <<<blockCount, blockSize, 0, stream>>> \ (dpData, dpSymbols, quantizationStep, dpReference + channel, channelCount, sizeX, sizeY, rowPitchDst); \ break; switch(quantType) { CASE(QUANTIZE_DEADZONE); CASE(QUANTIZE_UNIFORM); } #undef CASE cudaCheckMsg("unquantizeDifferenceFromSymbols2Dkernel execution failed"); } void getMaxAbs(Instance* pInstance, const float* dpImage, uint elemCount, float* dpValMax) { if(dpValMax != NULL) { reduceArray<float, OperatorMax<float>, FunctorAbs<float> >(dpValMax, dpImage, elemCount, pInstance->m_pReducePlan); cudaCheckMsg("getVolumeFloatMaxAbs: Error in reduceArray"); } } } }
fe12b48c3654a155d3a5a131f500eea34125d300.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/native/hip/SortingCommon.cuh> #include <THH/THHThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/sort.h> namespace at { namespace native { void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices) { sorted_indices.copy_(linearIndex); const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); using device_ptr = thrust::device_ptr<int64_t>; // Fill sortedOrigIndices with sequential indices const auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly // Sort; a stable sort is not required // NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, LTOp<int64_t>()); } }}
fe12b48c3654a155d3a5a131f500eea34125d300.cu
#include <ATen/ATen.h> #include <ATen/native/cuda/SortingCommon.cuh> #include <THC/THCThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/sort.h> namespace at { namespace native { void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices) { sorted_indices.copy_(linearIndex); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); using device_ptr = thrust::device_ptr<int64_t>; // Fill sortedOrigIndices with sequential indices const auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly // Sort; a stable sort is not required // NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, LTOp<int64_t>()); } }}
fd38f4b7e70f22bfa9130243a7a00229657c76ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * GridTools * * Copyright (c) 2014-2019, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include "../../tools/multiplet.hpp" #include "gtest/gtest.h" #include <gridtools/common/gt_assert.hpp> #include <gridtools/storage/data_store.hpp> #include <gridtools/storage/storage_cuda/cuda_storage.hpp> #include <gridtools/storage/storage_cuda/cuda_storage_info.hpp> #include <gridtools/storage/storage_cuda/data_view_helpers.hpp> using namespace gridtools; const int c_x = 3 /* < 32 for this test */, c_y = 5, c_z = 7; template <typename View> __global__ void mul2(View s) { bool correct_dims = (s.template total_length<0>() == c_x) && (s.template total_length<1>() == c_y) && (s.template total_length<2>() == c_z); bool correct_size = (s.padded_total_length() == 32 * c_y * c_z); s(0, 0, 0) *= (2 * correct_dims * correct_size); s(1, 0, 0) *= (2 * correct_dims * correct_size); } TEST(DataViewTest, Simple) { typedef cuda_storage_info<0, layout_map<2, 1, 0>> storage_info_t; typedef data_store<cuda_storage<double>, storage_info_t> data_store_t; // create and allocate a data_store GT_CONSTEXPR storage_info_t si(c_x, c_y, c_z); data_store_t ds(si); // create a rw view and fill with some data data_view<data_store_t> dv = make_host_view(ds); GT_STATIC_ASSERT((is_data_view<decltype(dv)>::value), "is_data_view check failed"); dv(0, 0, 0) = 50; dv(1, 0, 0) = 60; // check if interface works ASSERT_TRUE((si.length<0>() == dv.length<0>())); ASSERT_TRUE((si.length<1>() == dv.length<1>())); ASSERT_TRUE((si.length<2>() == dv.length<2>())); ASSERT_TRUE((si.total_length<0>() == dv.total_length<0>())); ASSERT_TRUE((si.total_length<1>() == dv.total_length<1>())); ASSERT_TRUE((si.total_length<2>() == dv.total_length<2>())); ASSERT_TRUE((si.begin<0>() == dv.begin<0>())); ASSERT_TRUE((si.begin<1>() == dv.begin<1>())); ASSERT_TRUE((si.begin<2>() == dv.begin<2>())); ASSERT_TRUE((si.total_begin<0>() == dv.total_begin<0>())); ASSERT_TRUE((si.total_begin<1>() == dv.total_begin<1>())); ASSERT_TRUE((si.total_begin<2>() == dv.total_begin<2>())); ASSERT_TRUE((si.end<0>() == dv.end<0>())); ASSERT_TRUE((si.end<1>() == dv.end<1>())); ASSERT_TRUE((si.end<2>() == dv.end<2>())); ASSERT_TRUE((si.total_end<0>() == dv.total_end<0>())); ASSERT_TRUE((si.total_end<1>() == dv.total_end<1>())); ASSERT_TRUE((si.total_end<2>() == dv.total_end<2>())); ASSERT_TRUE((si.padded_total_length() == dv.padded_total_length())); ASSERT_TRUE(si.index(1, 0, 1) == c_y * 32 + 1); // check if data is there EXPECT_EQ(50, dv(0, 0, 0)); EXPECT_EQ(dv(1, 0, 0), 60); // create a ro view data_view<data_store_t, access_mode::read_only> dvro = make_host_view<access_mode::read_only>(ds); // check if data is the same EXPECT_EQ(50, dvro(0, 0, 0)); EXPECT_EQ(dvro(1, 0, 0), 60); // views are valid (ds <--> dv and ds <--> dvro) EXPECT_TRUE(check_consistency(ds, dv)); EXPECT_TRUE(check_consistency(ds, dvro)); EXPECT_TRUE(dv.valid()); EXPECT_TRUE(dvro.valid()); // sync, create a device view and call kernel ds.sync(); auto devv = make_device_view(ds); GT_STATIC_ASSERT((is_data_view<decltype(devv)>::value), "is_data_view check failed"); EXPECT_TRUE(check_consistency(ds, devv)); EXPECT_FALSE(check_consistency(ds, dv)); EXPECT_FALSE(check_consistency(ds, dvro)); EXPECT_TRUE(devv.valid()); EXPECT_FALSE(dv.valid()); EXPECT_FALSE(dvro.valid()); hipLaunchKernelGGL(( mul2), dim3(1), dim3(1), 0, 0, devv); // sync and check if read only host view is valid ds.sync(); EXPECT_FALSE(check_consistency(ds, devv)); EXPECT_FALSE(check_consistency(ds, dv)); EXPECT_TRUE(check_consistency(ds, dvro)); EXPECT_FALSE(devv.valid()); EXPECT_FALSE(dv.valid()); EXPECT_TRUE(dvro.valid()); // check if data is the same EXPECT_EQ(100, dvro(0, 0, 0)); EXPECT_EQ(dvro(1, 0, 0), 120); // create and allocate a second storage data_store_t ds_tmp(si); // again create a view data_view<data_store_t> dv_tmp = make_host_view<access_mode::read_write>(ds_tmp); // the combination ds_tmp <--> dv/dvro is not a valid view EXPECT_FALSE(check_consistency(ds, dv_tmp)); EXPECT_FALSE(check_consistency(ds_tmp, devv)); EXPECT_FALSE(check_consistency(ds_tmp, dvro)); EXPECT_TRUE(check_consistency(ds_tmp, dv_tmp)); EXPECT_TRUE(dv_tmp.valid()); EXPECT_FALSE(devv.valid()); EXPECT_TRUE(dvro.valid()); EXPECT_TRUE(dv_tmp.valid()); // destroy a storage, this should also invalidate the views ds.reset(); EXPECT_FALSE(check_consistency(ds, dv)); EXPECT_FALSE(check_consistency(ds, dvro)); } TEST(DataViewTest, ZeroSize) { typedef cuda_storage_info<0, layout_map<0>> storage_info_t; typedef data_store<cuda_storage<double>, storage_info_t> data_store_t; // create and allocate a data_store data_store_t ds; make_host_view<access_mode::read_only>(ds); make_device_view<access_mode::read_only>(ds); } TEST(DataViewTest, Looping) { typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t; storage_info_t si(2 + 2, 2 + 4, 2 + 6); typedef data_store<cuda_storage<triplet>, storage_info_t> data_store_t; data_store_t ds(si, [](int i, int j, int k) { return triplet{i, j, k}; }, "ds"); auto view = make_host_view<access_mode::read_write>(ds); for (int i = view.begin<0>(); i <= view.end<0>(); ++i) { for (int j = view.begin<1>(); j <= view.end<1>(); ++j) { for (int k = view.begin<2>(); k <= view.end<2>(); ++k) { EXPECT_EQ(view(i, j, k), (triplet{i, j, k})); } } } for (int i = view.total_begin<0>(); i <= view.total_end<0>(); ++i) { for (int j = view.total_begin<1>(); j <= view.total_end<1>(); ++j) { for (int k = view.total_begin<2>(); k <= view.total_end<2>(); ++k) { EXPECT_EQ(view(i, j, k), (triplet{i, j, k})); } } } } TEST(DataViewTest, TargetView) { typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t; storage_info_t si(2 + 2, 2 + 4, 2 + 6); typedef data_store<cuda_storage<triplet>, storage_info_t> data_store_t; data_store_t ds(si, [](int i, int j, int k) { return triplet{i, j, k}; }, "ds"); auto target_view = make_target_view<access_mode::read_only>(ds); auto device_view = make_device_view<access_mode::read_only>(ds); ASSERT_EQ(advanced::get_raw_pointer_of(device_view), advanced::get_raw_pointer_of(target_view)); } TEST(DataViewTest, CheckMemorySpace) { typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t; storage_info_t si(2 + 2 * 1, 2 + 2 * 3, 2 + 2 * 3); typedef data_store<cuda_storage<int>, storage_info_t> data_store_t; data_store_t ds(si, -1, "ds"); auto view = make_device_view<access_mode::read_write>(ds); EXPECT_THROW(view(0, 0, 1), std::runtime_error); }
fd38f4b7e70f22bfa9130243a7a00229657c76ad.cu
/* * GridTools * * Copyright (c) 2014-2019, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include "../../tools/multiplet.hpp" #include "gtest/gtest.h" #include <gridtools/common/gt_assert.hpp> #include <gridtools/storage/data_store.hpp> #include <gridtools/storage/storage_cuda/cuda_storage.hpp> #include <gridtools/storage/storage_cuda/cuda_storage_info.hpp> #include <gridtools/storage/storage_cuda/data_view_helpers.hpp> using namespace gridtools; const int c_x = 3 /* < 32 for this test */, c_y = 5, c_z = 7; template <typename View> __global__ void mul2(View s) { bool correct_dims = (s.template total_length<0>() == c_x) && (s.template total_length<1>() == c_y) && (s.template total_length<2>() == c_z); bool correct_size = (s.padded_total_length() == 32 * c_y * c_z); s(0, 0, 0) *= (2 * correct_dims * correct_size); s(1, 0, 0) *= (2 * correct_dims * correct_size); } TEST(DataViewTest, Simple) { typedef cuda_storage_info<0, layout_map<2, 1, 0>> storage_info_t; typedef data_store<cuda_storage<double>, storage_info_t> data_store_t; // create and allocate a data_store GT_CONSTEXPR storage_info_t si(c_x, c_y, c_z); data_store_t ds(si); // create a rw view and fill with some data data_view<data_store_t> dv = make_host_view(ds); GT_STATIC_ASSERT((is_data_view<decltype(dv)>::value), "is_data_view check failed"); dv(0, 0, 0) = 50; dv(1, 0, 0) = 60; // check if interface works ASSERT_TRUE((si.length<0>() == dv.length<0>())); ASSERT_TRUE((si.length<1>() == dv.length<1>())); ASSERT_TRUE((si.length<2>() == dv.length<2>())); ASSERT_TRUE((si.total_length<0>() == dv.total_length<0>())); ASSERT_TRUE((si.total_length<1>() == dv.total_length<1>())); ASSERT_TRUE((si.total_length<2>() == dv.total_length<2>())); ASSERT_TRUE((si.begin<0>() == dv.begin<0>())); ASSERT_TRUE((si.begin<1>() == dv.begin<1>())); ASSERT_TRUE((si.begin<2>() == dv.begin<2>())); ASSERT_TRUE((si.total_begin<0>() == dv.total_begin<0>())); ASSERT_TRUE((si.total_begin<1>() == dv.total_begin<1>())); ASSERT_TRUE((si.total_begin<2>() == dv.total_begin<2>())); ASSERT_TRUE((si.end<0>() == dv.end<0>())); ASSERT_TRUE((si.end<1>() == dv.end<1>())); ASSERT_TRUE((si.end<2>() == dv.end<2>())); ASSERT_TRUE((si.total_end<0>() == dv.total_end<0>())); ASSERT_TRUE((si.total_end<1>() == dv.total_end<1>())); ASSERT_TRUE((si.total_end<2>() == dv.total_end<2>())); ASSERT_TRUE((si.padded_total_length() == dv.padded_total_length())); ASSERT_TRUE(si.index(1, 0, 1) == c_y * 32 + 1); // check if data is there EXPECT_EQ(50, dv(0, 0, 0)); EXPECT_EQ(dv(1, 0, 0), 60); // create a ro view data_view<data_store_t, access_mode::read_only> dvro = make_host_view<access_mode::read_only>(ds); // check if data is the same EXPECT_EQ(50, dvro(0, 0, 0)); EXPECT_EQ(dvro(1, 0, 0), 60); // views are valid (ds <--> dv and ds <--> dvro) EXPECT_TRUE(check_consistency(ds, dv)); EXPECT_TRUE(check_consistency(ds, dvro)); EXPECT_TRUE(dv.valid()); EXPECT_TRUE(dvro.valid()); // sync, create a device view and call kernel ds.sync(); auto devv = make_device_view(ds); GT_STATIC_ASSERT((is_data_view<decltype(devv)>::value), "is_data_view check failed"); EXPECT_TRUE(check_consistency(ds, devv)); EXPECT_FALSE(check_consistency(ds, dv)); EXPECT_FALSE(check_consistency(ds, dvro)); EXPECT_TRUE(devv.valid()); EXPECT_FALSE(dv.valid()); EXPECT_FALSE(dvro.valid()); mul2<<<1, 1>>>(devv); // sync and check if read only host view is valid ds.sync(); EXPECT_FALSE(check_consistency(ds, devv)); EXPECT_FALSE(check_consistency(ds, dv)); EXPECT_TRUE(check_consistency(ds, dvro)); EXPECT_FALSE(devv.valid()); EXPECT_FALSE(dv.valid()); EXPECT_TRUE(dvro.valid()); // check if data is the same EXPECT_EQ(100, dvro(0, 0, 0)); EXPECT_EQ(dvro(1, 0, 0), 120); // create and allocate a second storage data_store_t ds_tmp(si); // again create a view data_view<data_store_t> dv_tmp = make_host_view<access_mode::read_write>(ds_tmp); // the combination ds_tmp <--> dv/dvro is not a valid view EXPECT_FALSE(check_consistency(ds, dv_tmp)); EXPECT_FALSE(check_consistency(ds_tmp, devv)); EXPECT_FALSE(check_consistency(ds_tmp, dvro)); EXPECT_TRUE(check_consistency(ds_tmp, dv_tmp)); EXPECT_TRUE(dv_tmp.valid()); EXPECT_FALSE(devv.valid()); EXPECT_TRUE(dvro.valid()); EXPECT_TRUE(dv_tmp.valid()); // destroy a storage, this should also invalidate the views ds.reset(); EXPECT_FALSE(check_consistency(ds, dv)); EXPECT_FALSE(check_consistency(ds, dvro)); } TEST(DataViewTest, ZeroSize) { typedef cuda_storage_info<0, layout_map<0>> storage_info_t; typedef data_store<cuda_storage<double>, storage_info_t> data_store_t; // create and allocate a data_store data_store_t ds; make_host_view<access_mode::read_only>(ds); make_device_view<access_mode::read_only>(ds); } TEST(DataViewTest, Looping) { typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t; storage_info_t si(2 + 2, 2 + 4, 2 + 6); typedef data_store<cuda_storage<triplet>, storage_info_t> data_store_t; data_store_t ds(si, [](int i, int j, int k) { return triplet{i, j, k}; }, "ds"); auto view = make_host_view<access_mode::read_write>(ds); for (int i = view.begin<0>(); i <= view.end<0>(); ++i) { for (int j = view.begin<1>(); j <= view.end<1>(); ++j) { for (int k = view.begin<2>(); k <= view.end<2>(); ++k) { EXPECT_EQ(view(i, j, k), (triplet{i, j, k})); } } } for (int i = view.total_begin<0>(); i <= view.total_end<0>(); ++i) { for (int j = view.total_begin<1>(); j <= view.total_end<1>(); ++j) { for (int k = view.total_begin<2>(); k <= view.total_end<2>(); ++k) { EXPECT_EQ(view(i, j, k), (triplet{i, j, k})); } } } } TEST(DataViewTest, TargetView) { typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t; storage_info_t si(2 + 2, 2 + 4, 2 + 6); typedef data_store<cuda_storage<triplet>, storage_info_t> data_store_t; data_store_t ds(si, [](int i, int j, int k) { return triplet{i, j, k}; }, "ds"); auto target_view = make_target_view<access_mode::read_only>(ds); auto device_view = make_device_view<access_mode::read_only>(ds); ASSERT_EQ(advanced::get_raw_pointer_of(device_view), advanced::get_raw_pointer_of(target_view)); } TEST(DataViewTest, CheckMemorySpace) { typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t; storage_info_t si(2 + 2 * 1, 2 + 2 * 3, 2 + 2 * 3); typedef data_store<cuda_storage<int>, storage_info_t> data_store_t; data_store_t ds(si, -1, "ds"); auto view = make_device_view<access_mode::read_write>(ds); EXPECT_THROW(view(0, 0, 1), std::runtime_error); }
148789724fd3828fb60febe55312b1248c506fbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <cuml/common/cuml_allocator.hpp> #include <iostream> #include "cache/cache.cuh" #include "test_utils.h" namespace MLCommon { namespace Cache { class CacheTest : public ::testing::Test { protected: void SetUp() override { CUDA_CHECK(hipStreamCreate(&stream)); allocator = std::shared_ptr<deviceAllocator>(new defaultDeviceAllocator()); allocate(x_dev, n_rows * n_cols); updateDevice(x_dev, x_host, n_rows * n_cols, stream); allocate(tile_dev, n_rows * n_cols); allocate(keys_dev, n); allocate(is_cached, n); allocate(cache_idx_dev, n); updateDevice(keys_dev, keys_host, n, stream); allocate(zeroone_dev, n); allocate(int_array_dev, 12); updateDevice(zeroone_dev, zeroone_host, n, stream); allocate(argfirst_dev, n_rows); } void TearDown() override { CUDA_CHECK(hipFree(x_dev)); CUDA_CHECK(hipFree(tile_dev)); CUDA_CHECK(hipFree(keys_dev)); CUDA_CHECK(hipFree(cache_idx_dev)); CUDA_CHECK(hipFree(is_cached)); CUDA_CHECK(hipFree(zeroone_dev)); CUDA_CHECK(hipFree(int_array_dev)); CUDA_CHECK(hipFree(argfirst_dev)); CUDA_CHECK(hipStreamDestroy(stream)); } int n_rows = 10; int n_cols = 2; int n = 10; float *x_dev; int *keys_dev; int *cache_idx_dev; int *int_array_dev; float x_host[20] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}; float *tile_dev; int keys_host[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; int zeroone_host[10] = {0, 0, 0, 0, 0, 1, 1, 1, 1, 1}; int *zeroone_dev; int *argfirst_dev; std::shared_ptr<deviceAllocator> allocator; hipStream_t stream; bool *is_cached; }; __global__ void test_argfirst(const int *array, int n, int *res) { int k = threadIdx.x; res[k] = arg_first_ge(array, n, k); } TEST_F(CacheTest, TestArgFirst) { int argfirst_host[10] = {0, 1, 1, 1, 2, 2, 4, 4, 6, 7}; updateDevice(argfirst_dev, argfirst_host, 10, stream); hipLaunchKernelGGL(( test_argfirst), dim3(1), dim3(10), 0, 0, argfirst_dev, 10, int_array_dev); int idx_exp[10] = {0, 1, 4, 6, 6, 8, 8, 9, 10, 10}; EXPECT_TRUE(devArrMatchHost(idx_exp, int_array_dev, 10, Compare<int>())); } __global__ void test_nth_occurrence(const int *array, int n, int val, int *res) { int k = threadIdx.x; res[k] = find_nth_occurrence(array, n, val, k); } TEST_F(CacheTest, TestNthOccurrence) { hipLaunchKernelGGL(( test_nth_occurrence), dim3(1), dim3(10), 0, 0, zeroone_dev, 10, 0, int_array_dev); int idx_exp[10] = {0, 1, 2, 3, 4, -1, -1, -1, -1, -1}; EXPECT_TRUE(devArrMatchHost(idx_exp, int_array_dev, 10, Compare<int>())); hipLaunchKernelGGL(( test_nth_occurrence), dim3(1), dim3(10), 0, 0, zeroone_dev, 10, 1, int_array_dev); int idx_exp2[10] = {5, 6, 7, 8, 9, -1, -1, -1, -1, -1}; EXPECT_TRUE(devArrMatchHost(idx_exp2, int_array_dev, 10, Compare<int>())); } template <int nthreads, int associativity> __global__ void test_rank_set_entries(const int *array, int n, int *res) { const int items_per_thread = ceildiv(associativity, nthreads); __shared__ int rank[items_per_thread * nthreads]; rank_set_entries<nthreads, associativity>(array, n, rank); int block_offset = blockIdx.x * associativity; for (int i = 0; i < items_per_thread; i++) { int k = threadIdx.x * items_per_thread + i; if (k < associativity && block_offset + k < n) res[block_offset + k] = rank[k]; } } TEST_F(CacheTest, TestRankEntries) { // Three cache sets, with 4 elements each int val[12] = {12, 11, 10, 9, 8, 6, 7, 5, 4, 1, 2, 3}; updateDevice(int_array_dev, val, 12, stream); const int nthreads = 4; hipLaunchKernelGGL(( test_rank_set_entries<nthreads, 4>) , dim3(3), dim3(nthreads), 0, 0, int_array_dev, 12, int_array_dev); // expect that each block is sorted separately // the indices that sorts the block are the following int idx_exp[12] = {3, 2, 1, 0, 3, 1, 2, 0, 3, 0, 1, 2}; EXPECT_TRUE(devArrMatchHost(idx_exp, int_array_dev, 12, Compare<int>())); // do the same with less than 4 threads const int nthreads3 = 3; updateDevice(int_array_dev, val, 12, stream); hipLaunchKernelGGL(( test_rank_set_entries<nthreads3, 4>) , dim3(3), dim3(nthreads3), 0, 0, int_array_dev, 12, int_array_dev); EXPECT_TRUE(devArrMatchHost(idx_exp, int_array_dev, 12, Compare<int>())); } TEST_F(CacheTest, TestSimple) { float cache_size = 5 * sizeof(float) * n_cols / (1024 * 1024.0); Cache<float, 2> cache(allocator, stream, n_cols, cache_size); ASSERT_EQ(cache.GetSize(), 4); cache.GetCacheIdx(keys_dev, n, cache_idx_dev, is_cached, stream); EXPECT_TRUE(devArrMatch(false, is_cached, n, Compare<bool>())); int cache_set[10] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; EXPECT_TRUE(devArrMatchHost(cache_set, cache_idx_dev, n, Compare<int>())); int n_cached = 1; cache.GetCacheIdxPartitioned(keys_dev, n, cache_idx_dev, &n_cached, stream); EXPECT_EQ(n_cached, 0); } TEST_F(CacheTest, TestAssignCacheIdx) { float cache_size = 5 * sizeof(float) * n_cols / (1024 * 1024.0); Cache<float, 2> cache(allocator, stream, n_cols, cache_size); ASSERT_EQ(cache.GetSize(), 4); int n_cached; cache.GetCacheIdxPartitioned(keys_dev, n, cache_idx_dev, &n_cached, stream); cache.AssignCacheIdx(keys_dev, n, cache_idx_dev, stream); int cache_idx_exp[10] = {0, 1, -1, -1, -1, 2, 3, -1, -1, -1}; int keys_exp[10] = {8, 6, 4, 2, 0, 9, 7, 5, 3, 1}; EXPECT_TRUE(devArrMatchHost(cache_idx_exp, cache_idx_dev, n, Compare<int>())); EXPECT_TRUE(devArrMatchHost(keys_exp, keys_dev, n, Compare<int>())); // Now the elements that have been assigned a cache slot are considered cached // A subsequent cache lookup should give us their cache indices. updateDevice(keys_dev, keys_host, n, stream); cache.GetCacheIdxPartitioned(keys_dev, n, cache_idx_dev, &n_cached, stream); ASSERT_EQ(n_cached, 4); int keys_exp2[4] = {6, 7, 8, 9}; EXPECT_TRUE(devArrMatchHost(keys_exp2, keys_dev, n_cached, Compare<int>())); int cache_idx_exp2[4] = {1, 3, 0, 2}; EXPECT_TRUE( devArrMatchHost(cache_idx_exp2, cache_idx_dev, n_cached, Compare<int>())); // Find cache slots, when not available int non_cached = n - n_cached; cache.AssignCacheIdx(keys_dev + n_cached, non_cached, cache_idx_dev + n_cached, stream); int cache_idx_exp3[6] = {-1, -1, -1, -1, -1, -1}; EXPECT_TRUE(devArrMatchHost(cache_idx_exp3, cache_idx_dev + n_cached, non_cached, Compare<int>())); } TEST_F(CacheTest, TestEvict) { float cache_size = 8 * sizeof(float) * n_cols / (1024 * 1024.0); Cache<float, 4> cache(allocator, stream, n_cols, cache_size); ASSERT_EQ(cache.GetSize(), 8); int n_cached; cache.GetCacheIdxPartitioned(keys_dev, 5, cache_idx_dev, &n_cached, stream); ASSERT_EQ(n_cached, 0); cache.AssignCacheIdx(keys_dev, 5, cache_idx_dev, stream); int cache_idx_exp[5] = {0, 1, 2, 4, 5}; int keys_exp[5] = {4, 2, 0, 3, 1}; EXPECT_TRUE(devArrMatchHost(cache_idx_exp, cache_idx_dev, 5, Compare<int>())); EXPECT_TRUE(devArrMatchHost(keys_exp, keys_dev, 5, Compare<int>())); int idx_host[10] = {2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; updateDevice(keys_dev, idx_host, 10, stream); cache.GetCacheIdxPartitioned(keys_dev, 10, cache_idx_dev, &n_cached, stream); EXPECT_EQ(n_cached, 3); int cache_idx_exp2[3] = {1, 4, 0}; EXPECT_TRUE( devArrMatchHost(cache_idx_exp2, cache_idx_dev, 3, Compare<int>())); cache.AssignCacheIdx(keys_dev + n_cached, 10 - n_cached, cache_idx_dev + n_cached, stream); int keys_exp3[10] = {2, 3, 4, 10, 8, 6, 11, 9, 7, 5}; int cache_idx_exp3[10] = {1, 4, 0, 3, 2, -1, 6, 7, 5, -1}; EXPECT_TRUE(devArrMatchHost(keys_exp3, keys_dev, 10, Compare<int>())); EXPECT_TRUE( devArrMatchHost(cache_idx_exp3, cache_idx_dev, 10, Compare<int>())); } TEST_F(CacheTest, TestStoreCollect) { float cache_size = 8 * sizeof(float) * n_cols / (1024 * 1024.0); Cache<float, 4> cache(allocator, stream, n_cols, cache_size); ASSERT_EQ(cache.GetSize(), 8); int n_cached; cache.GetCacheIdxPartitioned(keys_dev, 5, cache_idx_dev, &n_cached, stream); cache.AssignCacheIdx(keys_dev, 5, cache_idx_dev, stream); cache.GetCacheIdxPartitioned(keys_dev, 5, cache_idx_dev, &n_cached, stream); cache.StoreVecs(x_dev, 10, n_cached, cache_idx_dev, stream, keys_dev); cache.GetCacheIdxPartitioned(keys_dev, 5, cache_idx_dev, &n_cached, stream); cache.GetVecs(cache_idx_dev, n_cached, tile_dev, stream); int cache_idx_host[10]; updateHost(cache_idx_host, cache_idx_dev, n_cached, stream); int keys_host[10]; updateHost(keys_host, keys_dev, n_cached, stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int i = 0; i < n_cached; i++) { EXPECT_TRUE(devArrMatch(x_dev + keys_host[i] * n_cols, tile_dev + i * n_cols, n_cols, Compare<int>())) << "vector " << i; } for (int k = 0; k < 4; k++) { cache.GetCacheIdxPartitioned(keys_dev, 10, cache_idx_dev, &n_cached, stream); if (k == 0) { EXPECT_EQ(n_cached, 5); } else { EXPECT_EQ(n_cached, 8); } cache.AssignCacheIdx(keys_dev + n_cached, 10 - n_cached, cache_idx_dev + n_cached, stream); cache.StoreVecs(x_dev, 10, 10 - n_cached, cache_idx_dev + n_cached, stream, keys_dev + n_cached); cache.GetVecs(cache_idx_dev, 10, tile_dev, stream); updateHost(cache_idx_host, cache_idx_dev, 10, stream); updateHost(keys_host, keys_dev, 10, stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int i = 0; i < 10; i++) { if (cache_idx_host[i] >= 0) { EXPECT_TRUE(devArrMatch(x_dev + keys_host[i] * n_cols, tile_dev + i * n_cols, n_cols, Compare<int>())) << "vector " << i; } } } } }; // end namespace Cache }; // end namespace MLCommon
148789724fd3828fb60febe55312b1248c506fbf.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <cuml/common/cuml_allocator.hpp> #include <iostream> #include "cache/cache.cuh" #include "test_utils.h" namespace MLCommon { namespace Cache { class CacheTest : public ::testing::Test { protected: void SetUp() override { CUDA_CHECK(cudaStreamCreate(&stream)); allocator = std::shared_ptr<deviceAllocator>(new defaultDeviceAllocator()); allocate(x_dev, n_rows * n_cols); updateDevice(x_dev, x_host, n_rows * n_cols, stream); allocate(tile_dev, n_rows * n_cols); allocate(keys_dev, n); allocate(is_cached, n); allocate(cache_idx_dev, n); updateDevice(keys_dev, keys_host, n, stream); allocate(zeroone_dev, n); allocate(int_array_dev, 12); updateDevice(zeroone_dev, zeroone_host, n, stream); allocate(argfirst_dev, n_rows); } void TearDown() override { CUDA_CHECK(cudaFree(x_dev)); CUDA_CHECK(cudaFree(tile_dev)); CUDA_CHECK(cudaFree(keys_dev)); CUDA_CHECK(cudaFree(cache_idx_dev)); CUDA_CHECK(cudaFree(is_cached)); CUDA_CHECK(cudaFree(zeroone_dev)); CUDA_CHECK(cudaFree(int_array_dev)); CUDA_CHECK(cudaFree(argfirst_dev)); CUDA_CHECK(cudaStreamDestroy(stream)); } int n_rows = 10; int n_cols = 2; int n = 10; float *x_dev; int *keys_dev; int *cache_idx_dev; int *int_array_dev; float x_host[20] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}; float *tile_dev; int keys_host[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; int zeroone_host[10] = {0, 0, 0, 0, 0, 1, 1, 1, 1, 1}; int *zeroone_dev; int *argfirst_dev; std::shared_ptr<deviceAllocator> allocator; cudaStream_t stream; bool *is_cached; }; __global__ void test_argfirst(const int *array, int n, int *res) { int k = threadIdx.x; res[k] = arg_first_ge(array, n, k); } TEST_F(CacheTest, TestArgFirst) { int argfirst_host[10] = {0, 1, 1, 1, 2, 2, 4, 4, 6, 7}; updateDevice(argfirst_dev, argfirst_host, 10, stream); test_argfirst<<<1, 10>>>(argfirst_dev, 10, int_array_dev); int idx_exp[10] = {0, 1, 4, 6, 6, 8, 8, 9, 10, 10}; EXPECT_TRUE(devArrMatchHost(idx_exp, int_array_dev, 10, Compare<int>())); } __global__ void test_nth_occurrence(const int *array, int n, int val, int *res) { int k = threadIdx.x; res[k] = find_nth_occurrence(array, n, val, k); } TEST_F(CacheTest, TestNthOccurrence) { test_nth_occurrence<<<1, 10>>>(zeroone_dev, 10, 0, int_array_dev); int idx_exp[10] = {0, 1, 2, 3, 4, -1, -1, -1, -1, -1}; EXPECT_TRUE(devArrMatchHost(idx_exp, int_array_dev, 10, Compare<int>())); test_nth_occurrence<<<1, 10>>>(zeroone_dev, 10, 1, int_array_dev); int idx_exp2[10] = {5, 6, 7, 8, 9, -1, -1, -1, -1, -1}; EXPECT_TRUE(devArrMatchHost(idx_exp2, int_array_dev, 10, Compare<int>())); } template <int nthreads, int associativity> __global__ void test_rank_set_entries(const int *array, int n, int *res) { const int items_per_thread = ceildiv(associativity, nthreads); __shared__ int rank[items_per_thread * nthreads]; rank_set_entries<nthreads, associativity>(array, n, rank); int block_offset = blockIdx.x * associativity; for (int i = 0; i < items_per_thread; i++) { int k = threadIdx.x * items_per_thread + i; if (k < associativity && block_offset + k < n) res[block_offset + k] = rank[k]; } } TEST_F(CacheTest, TestRankEntries) { // Three cache sets, with 4 elements each int val[12] = {12, 11, 10, 9, 8, 6, 7, 5, 4, 1, 2, 3}; updateDevice(int_array_dev, val, 12, stream); const int nthreads = 4; test_rank_set_entries<nthreads, 4> <<<3, nthreads>>>(int_array_dev, 12, int_array_dev); // expect that each block is sorted separately // the indices that sorts the block are the following int idx_exp[12] = {3, 2, 1, 0, 3, 1, 2, 0, 3, 0, 1, 2}; EXPECT_TRUE(devArrMatchHost(idx_exp, int_array_dev, 12, Compare<int>())); // do the same with less than 4 threads const int nthreads3 = 3; updateDevice(int_array_dev, val, 12, stream); test_rank_set_entries<nthreads3, 4> <<<3, nthreads3>>>(int_array_dev, 12, int_array_dev); EXPECT_TRUE(devArrMatchHost(idx_exp, int_array_dev, 12, Compare<int>())); } TEST_F(CacheTest, TestSimple) { float cache_size = 5 * sizeof(float) * n_cols / (1024 * 1024.0); Cache<float, 2> cache(allocator, stream, n_cols, cache_size); ASSERT_EQ(cache.GetSize(), 4); cache.GetCacheIdx(keys_dev, n, cache_idx_dev, is_cached, stream); EXPECT_TRUE(devArrMatch(false, is_cached, n, Compare<bool>())); int cache_set[10] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; EXPECT_TRUE(devArrMatchHost(cache_set, cache_idx_dev, n, Compare<int>())); int n_cached = 1; cache.GetCacheIdxPartitioned(keys_dev, n, cache_idx_dev, &n_cached, stream); EXPECT_EQ(n_cached, 0); } TEST_F(CacheTest, TestAssignCacheIdx) { float cache_size = 5 * sizeof(float) * n_cols / (1024 * 1024.0); Cache<float, 2> cache(allocator, stream, n_cols, cache_size); ASSERT_EQ(cache.GetSize(), 4); int n_cached; cache.GetCacheIdxPartitioned(keys_dev, n, cache_idx_dev, &n_cached, stream); cache.AssignCacheIdx(keys_dev, n, cache_idx_dev, stream); int cache_idx_exp[10] = {0, 1, -1, -1, -1, 2, 3, -1, -1, -1}; int keys_exp[10] = {8, 6, 4, 2, 0, 9, 7, 5, 3, 1}; EXPECT_TRUE(devArrMatchHost(cache_idx_exp, cache_idx_dev, n, Compare<int>())); EXPECT_TRUE(devArrMatchHost(keys_exp, keys_dev, n, Compare<int>())); // Now the elements that have been assigned a cache slot are considered cached // A subsequent cache lookup should give us their cache indices. updateDevice(keys_dev, keys_host, n, stream); cache.GetCacheIdxPartitioned(keys_dev, n, cache_idx_dev, &n_cached, stream); ASSERT_EQ(n_cached, 4); int keys_exp2[4] = {6, 7, 8, 9}; EXPECT_TRUE(devArrMatchHost(keys_exp2, keys_dev, n_cached, Compare<int>())); int cache_idx_exp2[4] = {1, 3, 0, 2}; EXPECT_TRUE( devArrMatchHost(cache_idx_exp2, cache_idx_dev, n_cached, Compare<int>())); // Find cache slots, when not available int non_cached = n - n_cached; cache.AssignCacheIdx(keys_dev + n_cached, non_cached, cache_idx_dev + n_cached, stream); int cache_idx_exp3[6] = {-1, -1, -1, -1, -1, -1}; EXPECT_TRUE(devArrMatchHost(cache_idx_exp3, cache_idx_dev + n_cached, non_cached, Compare<int>())); } TEST_F(CacheTest, TestEvict) { float cache_size = 8 * sizeof(float) * n_cols / (1024 * 1024.0); Cache<float, 4> cache(allocator, stream, n_cols, cache_size); ASSERT_EQ(cache.GetSize(), 8); int n_cached; cache.GetCacheIdxPartitioned(keys_dev, 5, cache_idx_dev, &n_cached, stream); ASSERT_EQ(n_cached, 0); cache.AssignCacheIdx(keys_dev, 5, cache_idx_dev, stream); int cache_idx_exp[5] = {0, 1, 2, 4, 5}; int keys_exp[5] = {4, 2, 0, 3, 1}; EXPECT_TRUE(devArrMatchHost(cache_idx_exp, cache_idx_dev, 5, Compare<int>())); EXPECT_TRUE(devArrMatchHost(keys_exp, keys_dev, 5, Compare<int>())); int idx_host[10] = {2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; updateDevice(keys_dev, idx_host, 10, stream); cache.GetCacheIdxPartitioned(keys_dev, 10, cache_idx_dev, &n_cached, stream); EXPECT_EQ(n_cached, 3); int cache_idx_exp2[3] = {1, 4, 0}; EXPECT_TRUE( devArrMatchHost(cache_idx_exp2, cache_idx_dev, 3, Compare<int>())); cache.AssignCacheIdx(keys_dev + n_cached, 10 - n_cached, cache_idx_dev + n_cached, stream); int keys_exp3[10] = {2, 3, 4, 10, 8, 6, 11, 9, 7, 5}; int cache_idx_exp3[10] = {1, 4, 0, 3, 2, -1, 6, 7, 5, -1}; EXPECT_TRUE(devArrMatchHost(keys_exp3, keys_dev, 10, Compare<int>())); EXPECT_TRUE( devArrMatchHost(cache_idx_exp3, cache_idx_dev, 10, Compare<int>())); } TEST_F(CacheTest, TestStoreCollect) { float cache_size = 8 * sizeof(float) * n_cols / (1024 * 1024.0); Cache<float, 4> cache(allocator, stream, n_cols, cache_size); ASSERT_EQ(cache.GetSize(), 8); int n_cached; cache.GetCacheIdxPartitioned(keys_dev, 5, cache_idx_dev, &n_cached, stream); cache.AssignCacheIdx(keys_dev, 5, cache_idx_dev, stream); cache.GetCacheIdxPartitioned(keys_dev, 5, cache_idx_dev, &n_cached, stream); cache.StoreVecs(x_dev, 10, n_cached, cache_idx_dev, stream, keys_dev); cache.GetCacheIdxPartitioned(keys_dev, 5, cache_idx_dev, &n_cached, stream); cache.GetVecs(cache_idx_dev, n_cached, tile_dev, stream); int cache_idx_host[10]; updateHost(cache_idx_host, cache_idx_dev, n_cached, stream); int keys_host[10]; updateHost(keys_host, keys_dev, n_cached, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int i = 0; i < n_cached; i++) { EXPECT_TRUE(devArrMatch(x_dev + keys_host[i] * n_cols, tile_dev + i * n_cols, n_cols, Compare<int>())) << "vector " << i; } for (int k = 0; k < 4; k++) { cache.GetCacheIdxPartitioned(keys_dev, 10, cache_idx_dev, &n_cached, stream); if (k == 0) { EXPECT_EQ(n_cached, 5); } else { EXPECT_EQ(n_cached, 8); } cache.AssignCacheIdx(keys_dev + n_cached, 10 - n_cached, cache_idx_dev + n_cached, stream); cache.StoreVecs(x_dev, 10, 10 - n_cached, cache_idx_dev + n_cached, stream, keys_dev + n_cached); cache.GetVecs(cache_idx_dev, 10, tile_dev, stream); updateHost(cache_idx_host, cache_idx_dev, 10, stream); updateHost(keys_host, keys_dev, 10, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int i = 0; i < 10; i++) { if (cache_idx_host[i] >= 0) { EXPECT_TRUE(devArrMatch(x_dev + keys_host[i] * n_cols, tile_dev + i * n_cols, n_cols, Compare<int>())) << "vector " << i; } } } } }; // end namespace Cache }; // end namespace MLCommon
68e4f8b06221f78087d4e56bf920314564f6702c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void kernel( int *a ) { int ix = blockIdx.x*blockDim.x + threadIdx.x; int iy = blockIdx.y*blockDim.y + threadIdx.y; int idx = iy * blockDim.x * gridDim.x + ix; a[idx] = a[idx] + 1; } int main() { int *host_array; int *dev_array; host_array = (int *) malloc(sizeof(int)*16); hipMalloc(&dev_array, sizeof(int)*16); hipMemset(dev_array, 0, 16); dim3 block(2,2); dim3 threadPerBlock(2,2); hipLaunchKernelGGL(( kernel), dim3(block), dim3(threadPerBlock), 0, 0, dev_array); hipMemcpy(host_array, dev_array, sizeof(int)*16, hipMemcpyDeviceToHost); for(int i = 0; i < 16; i++) printf(" %d ", host_array[i]); printf("\n"); free(host_array); hipFree(dev_array); hipDeviceReset(); return 0; }
68e4f8b06221f78087d4e56bf920314564f6702c.cu
#include <stdio.h> __global__ void kernel( int *a ) { int ix = blockIdx.x*blockDim.x + threadIdx.x; int iy = blockIdx.y*blockDim.y + threadIdx.y; int idx = iy * blockDim.x * gridDim.x + ix; a[idx] = a[idx] + 1; } int main() { int *host_array; int *dev_array; host_array = (int *) malloc(sizeof(int)*16); cudaMalloc(&dev_array, sizeof(int)*16); cudaMemset(dev_array, 0, 16); dim3 block(2,2); dim3 threadPerBlock(2,2); kernel<<<block, threadPerBlock>>>(dev_array); cudaMemcpy(host_array, dev_array, sizeof(int)*16, cudaMemcpyDeviceToHost); for(int i = 0; i < 16; i++) printf(" %d ", host_array[i]); printf("\n"); free(host_array); cudaFree(dev_array); cudaDeviceReset(); return 0; }
ed47fef69327d72692036069e6539622368bf4e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "simple_lock.h" #define NBLOCKS_TRUE 512 #define NTHREADS_TRUE 512 * 2 __global__ void blockCounterUnlocked( int *nblocks ){ if(threadIdx.x == 0){ *nblocks = *nblocks + 1; } } __global__ void blockCounter1( Lock lock, int *nblocks ){ if(threadIdx.x == 0){ lock.lock(); *nblocks = *nblocks + 1; lock.unlock(); } } // THIS KERNEL WILL CREATE A DIVERGENCE CONDITION // AND STALL OUT. DON'T USE IT. __global__ void blockCounter2( Lock lock, int *nblocks ){ lock.lock(); if(threadIdx.x == 0){ *nblocks = *nblocks + 1 ; } lock.unlock(); } int main(){ int nblocks_host, *nblocks_dev; Lock lock; float elapsedTime; hipEvent_t start, stop; hipMalloc((void**) &nblocks_dev, sizeof(int)); //blockCounterUnlocked: nblocks_host = 0; hipMemcpy( nblocks_dev, &nblocks_host, sizeof(int), hipMemcpyHostToDevice ); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord( start, 0 ); hipLaunchKernelGGL(( blockCounterUnlocked), dim3(NBLOCKS_TRUE), dim3(NTHREADS_TRUE), 0, 0, nblocks_dev); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &elapsedTime, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); hipMemcpy( &nblocks_host, nblocks_dev, sizeof(int), hipMemcpyDeviceToHost ); printf("blockCounterUnlocked <<< %d, %d >>> () counted %d blocks in %f ms.\n", NBLOCKS_TRUE, NTHREADS_TRUE, nblocks_host, elapsedTime); //blockCounter1: nblocks_host = 0; hipMemcpy( nblocks_dev, &nblocks_host, sizeof(int), hipMemcpyHostToDevice ); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord( start, 0 ); hipLaunchKernelGGL(( blockCounter1), dim3(NBLOCKS_TRUE), dim3(NTHREADS_TRUE), 0, 0, lock, nblocks_dev); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &elapsedTime, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); hipMemcpy( &nblocks_host, nblocks_dev, sizeof(int), hipMemcpyDeviceToHost ); printf("blockCounter1 <<< %d, %d >>> () counted %d blocks in %f ms.\n", NBLOCKS_TRUE, NTHREADS_TRUE, nblocks_host, elapsedTime); hipFree(nblocks_dev); }
ed47fef69327d72692036069e6539622368bf4e2.cu
#include <stdio.h> #include "simple_lock.h" #define NBLOCKS_TRUE 512 #define NTHREADS_TRUE 512 * 2 __global__ void blockCounterUnlocked( int *nblocks ){ if(threadIdx.x == 0){ *nblocks = *nblocks + 1; } } __global__ void blockCounter1( Lock lock, int *nblocks ){ if(threadIdx.x == 0){ lock.lock(); *nblocks = *nblocks + 1; lock.unlock(); } } // THIS KERNEL WILL CREATE A DIVERGENCE CONDITION // AND STALL OUT. DON'T USE IT. __global__ void blockCounter2( Lock lock, int *nblocks ){ lock.lock(); if(threadIdx.x == 0){ *nblocks = *nblocks + 1 ; } lock.unlock(); } int main(){ int nblocks_host, *nblocks_dev; Lock lock; float elapsedTime; cudaEvent_t start, stop; cudaMalloc((void**) &nblocks_dev, sizeof(int)); //blockCounterUnlocked: nblocks_host = 0; cudaMemcpy( nblocks_dev, &nblocks_host, sizeof(int), cudaMemcpyHostToDevice ); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); blockCounterUnlocked<<<NBLOCKS_TRUE, NTHREADS_TRUE>>>(nblocks_dev); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsedTime, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); cudaMemcpy( &nblocks_host, nblocks_dev, sizeof(int), cudaMemcpyDeviceToHost ); printf("blockCounterUnlocked <<< %d, %d >>> () counted %d blocks in %f ms.\n", NBLOCKS_TRUE, NTHREADS_TRUE, nblocks_host, elapsedTime); //blockCounter1: nblocks_host = 0; cudaMemcpy( nblocks_dev, &nblocks_host, sizeof(int), cudaMemcpyHostToDevice ); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); blockCounter1<<<NBLOCKS_TRUE, NTHREADS_TRUE>>>(lock, nblocks_dev); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsedTime, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); cudaMemcpy( &nblocks_host, nblocks_dev, sizeof(int), cudaMemcpyDeviceToHost ); printf("blockCounter1 <<< %d, %d >>> () counted %d blocks in %f ms.\n", NBLOCKS_TRUE, NTHREADS_TRUE, nblocks_host, elapsedTime); cudaFree(nblocks_dev); }
d7bbde174ef48024dd1ff8b3aedbdb77791218cd.hip
// !!! This is a file automatically generated by hipify!!! /** * covariance.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin) * to work with Collective Mind Framework and OpenME interfqce for automatic * and collective tuning and data mining: http://cTuning.org * */ #ifndef WINDOWS #include <unistd.h> #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <hip/hip_runtime.h> #include "polybench.h" #ifdef OPENME #include <openme.h> #endif //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 1.05 #define GPU_DEVICE 0 /* Problem size */ #define M 512 //2048 #define N 512 //2048 /* Thread block dimensions for kernel 1*/ #define DIM_THREAD_BLOCK_KERNEL_1_X 256 #define DIM_THREAD_BLOCK_KERNEL_1_Y 1 /* Thread block dimensions for kernel 2*/ #define DIM_THREAD_BLOCK_KERNEL_2_X 32 #define DIM_THREAD_BLOCK_KERNEL_2_Y 8 /* Thread block dimensions for kernel 3*/ #define DIM_THREAD_BLOCK_KERNEL_3_X 256 #define DIM_THREAD_BLOCK_KERNEL_3_Y 1 #define sqrt_of_array_cell(x,j) sqrt(x[j]) #define FLOAT_N 3214212.01 #define EPS 0.005 /* Can switch DATA_TYPE between float and double */ # ifndef DATA_TYPE # define DATA_TYPE float # endif void init_arrays(DATA_TYPE* data) { int i, j; for (i = 1; i < (M+1); i++) { for (j = 1; j < (N+1); j++) { data[i*(N+1) + j] = ((DATA_TYPE) i*j) / M; } } } void covariance(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean) { int i, j, j1,j2; /* Determine mean of column vectors of input data matrix */ for (j = 1; j < (M+1); j++) { mean[j] = 0.0; for (i = 1; i < (N+1); i++) { mean[j] += data[i*(M+1) + j]; } mean[j] /= FLOAT_N; } /* Center the column vectors. */ for (i = 1; i < (N+1); i++) { for (j = 1; j < (M+1); j++) { data[i*(M+1) + j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 1; j1 < (M+1); j1++) { for (j2 = j1; j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for (i = 1; i < N+1; i++) { symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2]; } symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2]; } } } void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) { int i,j,fail; fail = 0; for (i=1; i < (M+1); i++) { for (j=1; j < (N+1); j++) { if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { int devID = 0; hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); hipGetDeviceProperties(&deviceProp, GPU_DEVICE); if (deviceProp.computeMode == hipComputeModeProhibited) { printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); else printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); hipSetDevice( GPU_DEVICE ); } __global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; if ((j >= 1) && (j < (M+1))) { mean[j] = 0.0; int i; for(i = 1; i < (N+1); i++) { mean[j] += data[i * (M+1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } } __global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; int i = blockIdx.y * blockDim.y + threadIdx.y + 1; if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1))) { data[i * (M+1) + j] -= mean[j]; } } __global__ void covar_kernel(DATA_TYPE *symmat, DATA_TYPE *data) { int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1; int i, j2; if ((j1 >= 1) && (j1 < (M+1))) { for (j2 = j1; j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for(i = 1; i < (N+1); i++) { symmat[j1 * (M+1) + j2] += data[i *(M+1) + j1] * data[i *(M+1) + j2]; } symmat[j2 * (M+1) + j1] = symmat[j1 * (M+1) + j2]; } } } void covarianceCuda(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean, DATA_TYPE* symmat_outputFromGpu) { hipError_t error; double t_start, t_end; DATA_TYPE *data_gpu; DATA_TYPE *mean_gpu; DATA_TYPE *symmat_gpu; error=hipMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1)); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (M+1)); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (M+1)); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (M+1) * (N+1), hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (M+1) * (M+1), hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (M+1), hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y); dim3 grid1((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1); dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y); dim3 grid2((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), (size_t)(ceil((float)N) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X))); dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y); dim3 grid3((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), 1); // t_start = rtclock(); hipLaunchKernelGGL(( mean_kernel), dim3(grid1), dim3(block1), 0, 0, mean_gpu,data_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( reduce_kernel), dim3(grid2), dim3(block2), 0, 0, mean_gpu,data_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( covar_kernel), dim3(grid3), dim3(block3), 0, 0, symmat_gpu,data_gpu); hipDeviceSynchronize(); // t_end = rtclock(); // fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); error=hipMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1), hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } hipFree(data_gpu); hipFree(symmat_gpu); hipFree(mean_gpu); } int main() { /* Prepare ctuning vars */ long ct_repeat=0; long ct_repeat_max=1; double t_start, t_end; DATA_TYPE* data; DATA_TYPE* symmat; DATA_TYPE* mean; DATA_TYPE* symmat_outputFromGpu; #ifdef OPENME openme_init(NULL,NULL,NULL,0); openme_callback("PROGRAM_START", NULL); #endif /* Run kernel. */ if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN")); data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); symmat = (DATA_TYPE*)malloc((M+1)*(M+1)*sizeof(DATA_TYPE)); mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE)); symmat_outputFromGpu = (DATA_TYPE*)malloc((M+1)*(M+1)*sizeof(DATA_TYPE)); srand(1); init_arrays(data); GPU_argv_init(); #ifdef OPENME openme_callback("ACC_KERNEL_START", NULL); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { covarianceCuda(data, symmat, mean, symmat_outputFromGpu); } #ifdef OPENME openme_callback("ACC_KERNEL_END", NULL); #endif srand(1); init_arrays(data); #ifdef OPENME openme_callback("KERNEL_START", NULL); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { covariance(data, symmat, mean); } #ifdef OPENME openme_callback("KERNEL_END", NULL); #endif compareResults(symmat, symmat_outputFromGpu); free(data); free(symmat); free(mean); free(symmat_outputFromGpu); #ifdef OPENME openme_callback("PROGRAM_END", NULL); #endif return 0; }
d7bbde174ef48024dd1ff8b3aedbdb77791218cd.cu
/** * covariance.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin) * to work with Collective Mind Framework and OpenME interfqce for automatic * and collective tuning and data mining: http://cTuning.org * */ #ifndef WINDOWS #include <unistd.h> #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include "polybench.h" #ifdef OPENME #include <openme.h> #endif //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 1.05 #define GPU_DEVICE 0 /* Problem size */ #define M 512 //2048 #define N 512 //2048 /* Thread block dimensions for kernel 1*/ #define DIM_THREAD_BLOCK_KERNEL_1_X 256 #define DIM_THREAD_BLOCK_KERNEL_1_Y 1 /* Thread block dimensions for kernel 2*/ #define DIM_THREAD_BLOCK_KERNEL_2_X 32 #define DIM_THREAD_BLOCK_KERNEL_2_Y 8 /* Thread block dimensions for kernel 3*/ #define DIM_THREAD_BLOCK_KERNEL_3_X 256 #define DIM_THREAD_BLOCK_KERNEL_3_Y 1 #define sqrt_of_array_cell(x,j) sqrt(x[j]) #define FLOAT_N 3214212.01 #define EPS 0.005 /* Can switch DATA_TYPE between float and double */ # ifndef DATA_TYPE # define DATA_TYPE float # endif void init_arrays(DATA_TYPE* data) { int i, j; for (i = 1; i < (M+1); i++) { for (j = 1; j < (N+1); j++) { data[i*(N+1) + j] = ((DATA_TYPE) i*j) / M; } } } void covariance(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean) { int i, j, j1,j2; /* Determine mean of column vectors of input data matrix */ for (j = 1; j < (M+1); j++) { mean[j] = 0.0; for (i = 1; i < (N+1); i++) { mean[j] += data[i*(M+1) + j]; } mean[j] /= FLOAT_N; } /* Center the column vectors. */ for (i = 1; i < (N+1); i++) { for (j = 1; j < (M+1); j++) { data[i*(M+1) + j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 1; j1 < (M+1); j1++) { for (j2 = j1; j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for (i = 1; i < N+1; i++) { symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2]; } symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2]; } } } void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) { int i,j,fail; fail = 0; for (i=1; i < (M+1); i++) { for (j=1; j < (N+1); j++) { if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { int devID = 0; cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); if (deviceProp.computeMode == cudaComputeModeProhibited) { printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); else printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); cudaSetDevice( GPU_DEVICE ); } __global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; if ((j >= 1) && (j < (M+1))) { mean[j] = 0.0; int i; for(i = 1; i < (N+1); i++) { mean[j] += data[i * (M+1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } } __global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; int i = blockIdx.y * blockDim.y + threadIdx.y + 1; if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1))) { data[i * (M+1) + j] -= mean[j]; } } __global__ void covar_kernel(DATA_TYPE *symmat, DATA_TYPE *data) { int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1; int i, j2; if ((j1 >= 1) && (j1 < (M+1))) { for (j2 = j1; j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for(i = 1; i < (N+1); i++) { symmat[j1 * (M+1) + j2] += data[i *(M+1) + j1] * data[i *(M+1) + j2]; } symmat[j2 * (M+1) + j1] = symmat[j1 * (M+1) + j2]; } } } void covarianceCuda(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean, DATA_TYPE* symmat_outputFromGpu) { cudaError_t error; double t_start, t_end; DATA_TYPE *data_gpu; DATA_TYPE *mean_gpu; DATA_TYPE *symmat_gpu; error=cudaMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1)); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (M+1)); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (M+1)); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (M+1) * (M+1), cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (M+1), cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y); dim3 grid1((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1); dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y); dim3 grid2((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), (size_t)(ceil((float)N) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X))); dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y); dim3 grid3((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), 1); // t_start = rtclock(); mean_kernel<<<grid1, block1>>>(mean_gpu,data_gpu); cudaThreadSynchronize(); reduce_kernel<<<grid2, block2>>>(mean_gpu,data_gpu); cudaThreadSynchronize(); covar_kernel<<<grid3, block3>>>(symmat_gpu,data_gpu); cudaThreadSynchronize(); // t_end = rtclock(); // fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); error=cudaMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } cudaFree(data_gpu); cudaFree(symmat_gpu); cudaFree(mean_gpu); } int main() { /* Prepare ctuning vars */ long ct_repeat=0; long ct_repeat_max=1; double t_start, t_end; DATA_TYPE* data; DATA_TYPE* symmat; DATA_TYPE* mean; DATA_TYPE* symmat_outputFromGpu; #ifdef OPENME openme_init(NULL,NULL,NULL,0); openme_callback("PROGRAM_START", NULL); #endif /* Run kernel. */ if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN")); data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); symmat = (DATA_TYPE*)malloc((M+1)*(M+1)*sizeof(DATA_TYPE)); mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE)); symmat_outputFromGpu = (DATA_TYPE*)malloc((M+1)*(M+1)*sizeof(DATA_TYPE)); srand(1); init_arrays(data); GPU_argv_init(); #ifdef OPENME openme_callback("ACC_KERNEL_START", NULL); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { covarianceCuda(data, symmat, mean, symmat_outputFromGpu); } #ifdef OPENME openme_callback("ACC_KERNEL_END", NULL); #endif srand(1); init_arrays(data); #ifdef OPENME openme_callback("KERNEL_START", NULL); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { covariance(data, symmat, mean); } #ifdef OPENME openme_callback("KERNEL_END", NULL); #endif compareResults(symmat, symmat_outputFromGpu); free(data); free(symmat); free(mean); free(symmat_outputFromGpu); #ifdef OPENME openme_callback("PROGRAM_END", NULL); #endif return 0; }
bafa9f17504506d5ff613dd4820d1957bc924b2c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // MIT License // // Copyright (c) 2018 Advanced Micro Devices, Inc. All Rights Reserved. // // Permission is hereby granted, free of charge, to any person // obtaining a copy of this software and associated documentation // files (the "Software"), to deal in the Software without // restriction, including without limitation the rights to use, copy, // modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // These test only check if the code compiles, we don't test // functionality yet. // Reference: Cuda Toolkit v 9.2.88 // 1.3 Single Presicion Mathematical Functions // 1.5 Single Presicion Intrinsics #include <stdio.h> #include <hip/hip_host_runtime_api.h> #define N 10 __global__ void testFloatMath(float *b) { int i = blockIdx.x; float f = (float) i; float dummy; float dummy2; int idummy; if (i<N) { // 1.3 Single Presicion Mathematical Functions b[i] = acosf(f); b[i] += acoshf(f); b[i] += asinf(f); b[i] += asinhf(f); b[i] += atan2f(f,f); b[i] += atanf(f); b[i] += atanhf(f); b[i] += cbrtf(f); b[i] += ceilf(f); //b[i] += copysign(f, -f); // Fixme: Add to cuda_open headers b[i] += cosf(f); b[i] += coshf(f); b[i] += cospif(f); b[i] += cyl_bessel_i0f(f); b[i] += cyl_bessel_i1f(f); b[i] += erfcf(f); b[i] += erfcinvf(f); b[i] += erfcxf(f); b[i] += erff(f); b[i] += erfinvf(f); b[i] += exp10f(f); b[i] += exp2f(f); b[i] += expf(f); b[i] += expm1f(f); b[i] += fabsf(f); b[i] += fdimf(f,f); b[i] += fdividef(f,f); b[i] += floorf(f); b[i] += fmaf(f,f,f); b[i] += fmaxf(f,f); b[i] += fminf(f,f); b[i] += fmodf(f,f); // b[i] += frexpf(f, &idummy); // Fixme: missing function _nv_frexpf b[i] += hypotf(f,f); b[i] += (float) ilogbf(f); b[i] += isfinite(f); b[i] += isinf(f); b[i] += isnan(f); b[i] += j0f(f); b[i] += j1f(f); // b[i] += jnf(1,f); // Fixme: missing function _nv_jnf b[i] += ldexpf(f,1); b[i] += lgammaf(f); b[i] += (float) llrintf(f); b[i] += (float) llroundf(f); b[i] += log10f(f); b[i] += log1pf(f); b[i] += log2f(f); b[i] += logbf(f); b[i] += logf(f); b[i] += (float) lrintf(f); b[i] += (float) lroundf(f); // b[i] += modff(f, &dummy); // Fixme: missing function _nv_modff // b[i] += nanf(""); // Fixme: Add to cuda_open headers b[i] += nearbyintf(f); b[i] += nextafterf(f,f); b[i] += norm3df(f,f,f); b[i] += norm4df(f,f,f,f); b[i] += normcdff(f); b[i] += normcdfinvf(f); // b[i] += normf(1,&f); // Fixme: missing function __nv_normf b[i] += powf(f,f); b[i] += rcbrtf(f); b[i] += remainderf(f,f); // b[i] += remquof(f,f, &idummy); // Fixme: missing function __nv_remquof b[i] += rhypotf(f,f); b[i] += rintf(f); // b[i] += rnorm3df(f,f,f); // Fixme: missing function __nv_rnorm3df // b[i] += rnorm4df(f,f,f,f); // Fixme: missing function __nv_rnorm4df // b[i] += rnormf(1, &f); // Fixme: missing function __nv_rnormf b[i] += roundf(f); b[i] += rsqrtf(f); //b[i] += scalblnf(f, 1); // Fixme: missing function __nv_scalbnf //b[i] += scalbnf(f, 1); // Fixme: missing function __nv_scalbnf b[i] += signbit(f); // sincosf(f, &dummy, &dummy2); // Fixme: missing function __nv_sincosf // sincospif(f, &dummy, &dummy2); // Fixme: missing function __nv_sincospif b[i] += sinf(f); b[i] += sinhf(f); b[i] += sinpif(f); b[i] += sqrtf(f); b[i] += tanf(f); b[i] += tanhf(f); b[i] += tgammaf(f); b[i] += truncf(f); b[i] += y0f(f); b[i] += y1f(f); // b[i] += ynf(1,f); // Fixme: missing function __nv_ynf // 1.5 Single Presicion Intrinsics b[i] += __cosf(f); b[i] += __exp10f(f); b[i] += __expf(f); // b[i] += __fadd_rd(f, f); // Fixme: missing function __nv_fadd_rd // b[i] += __fadd_rn(f, f); // Fixme: missing function __nv_fadd_rn // b[i] += __fadd_ru(f, f); // Fixme: missing function __nv_fadd_ru // b[i] += __fadd_rz(f, f); // Fixme: missing function __nv_fadd_rz // b[i] += __fdiv_rd(f, f); // Fixme: missing function __nv_fdiv_rd // b[i] += __fdiv_rn(f, f); // Fixme: missing function __nv_fdiv_rn // b[i] += __fdiv_ru(f, f); // Fixme: missing function __nv_fdiv_ru // b[i] += __fdiv_rz(f, f); // Fixme: missing function __nv_fdiv_rz b[i] += __fdividef(f, f); // b[i] += __fmaf_rd(f, f, f); // Fixme: missing function __nv_fmaf_rd // b[i] += __fmaf_rn(f, f, f); // Fixme: missing function __nv_fmaf_rn // b[i] += __fmaf_ru(f, f, f); // Fixme: missing function __nv_fmaf_ru // b[i] += __fmaf_rz(f, f, f); // Fixme: missing function __nv_fmaf_rz // b[i] += __fmul_rd(f, f); // Fixme: missing function: __nv_fmul_rd // b[i] += __fmul_rn(f, f); // Fixme: missing function: __nv_fmul_rn // b[i] += __fmul_ru(f, f); // Fixme: missing function: __nv_fmul_ru // b[i] += __fmul_rz(f, f); // Fixme: missing function: __nv_fmul_rz // b[i] += __frcp_rd(f); // Fixme: missing function: __nv_frcp_rd // b[i] += __frcp_rn(f); // Fixme: missing function: __nv_frcp_rn // b[i] += __frcp_ru(f); // Fixme: missing function: __nv_frcp_ru // b[i] += __frcp_rz(f); // Fixme: missing function: __nv_frcp_rz // b[i] += __fsqrt_rd(f); // Fixme: missing function: __nv_fsqrt_rd // b[i] += __fsqrt_rn(f); // Fixme: missing function: __nv_fsqrt_rn // b[i] += __fsqrt_ru(f); // Fixme: missing function: __nv_fsqrt_ru // b[i] += __fsqrt_rz(f); // Fixme: missing function: __nv_fsqrt_rz // b[i] += __fsub_rd(f, f); // Fixme: missinf function: __nv_fsub_rd b[i] += __log10f(f); b[i] += __log2f(f); b[i] += __logf(f); b[i] += __powf(f, f); b[i] += __saturatef(f); // __sincosf(f, &dummy, &dummy2); // Fixme: indirect call error to __nv_fast_sincosf b[i] += __sinf(f); b[i] += __tanf(f); } } void printArray(float *array) { printf("["); bool first = true; for (int i = 0; i<N; ++i) { if (first) { printf("%f", array[i]); first = false; } else { printf(", %f", array[i]); } } printf("]"); } void printHipError(hipError_t error) { printf("Hip Error: %s\n", hipGetErrorString(error)); } bool hipCallSuccessful(hipError_t error) { if (error != hipSuccess) printHipError(error); return error == hipSuccess; } bool deviceCanCompute(int deviceID) { bool canCompute = false; hipDeviceProp_t deviceProp; bool devicePropIsAvailable = hipCallSuccessful(hipGetDeviceProperties(&deviceProp, deviceID)); if (devicePropIsAvailable) { canCompute = deviceProp.computeMode != hipComputeModeProhibited; if (!canCompute) printf("Compute mode is prohibited\n"); } return canCompute; } bool deviceIsAvailable(int *deviceID) { return hipCallSuccessful(hipGetDevice(deviceID)); } // We always use device 0 bool haveComputeDevice() { int deviceID = 0; return deviceIsAvailable(&deviceID) && deviceCanCompute(deviceID); } int main() { float hostArray[N]; if (!haveComputeDevice()) { printf("No compute device available\n"); return 0; } for (int i = 0; i<N; ++i) hostArray[i] = 0.0; printf("Array content before kernel:\n"); printArray(hostArray); printf("\n"); float *deviceArray; if (!hipCallSuccessful(hipMalloc((void **)&deviceArray, N*sizeof(float)))) { printf("Unable to allocate device memory\n"); return 0; } hipLaunchKernelGGL((testFloatMath), dim3(N), dim3(1), 0, 0, deviceArray); if (hipCallSuccessful(hipMemcpy(hostArray, deviceArray, N * sizeof(float), hipMemcpyDeviceToHost))) { printf("Array content after kernel:\n"); printArray(hostArray); printf("\n"); } else { printf("Unable to copy memory from device to host\n"); } hipFree(deviceArray); return 0; }
bafa9f17504506d5ff613dd4820d1957bc924b2c.cu
// MIT License // // Copyright (c) 2018 Advanced Micro Devices, Inc. All Rights Reserved. // // Permission is hereby granted, free of charge, to any person // obtaining a copy of this software and associated documentation // files (the "Software"), to deal in the Software without // restriction, including without limitation the rights to use, copy, // modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS // BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // These test only check if the code compiles, we don't test // functionality yet. // Reference: Cuda Toolkit v 9.2.88 // 1.3 Single Presicion Mathematical Functions // 1.5 Single Presicion Intrinsics #include <stdio.h> #include <hip/hip_host_runtime_api.h> #define N 10 __global__ void testFloatMath(float *b) { int i = blockIdx.x; float f = (float) i; float dummy; float dummy2; int idummy; if (i<N) { // 1.3 Single Presicion Mathematical Functions b[i] = acosf(f); b[i] += acoshf(f); b[i] += asinf(f); b[i] += asinhf(f); b[i] += atan2f(f,f); b[i] += atanf(f); b[i] += atanhf(f); b[i] += cbrtf(f); b[i] += ceilf(f); //b[i] += copysign(f, -f); // Fixme: Add to cuda_open headers b[i] += cosf(f); b[i] += coshf(f); b[i] += cospif(f); b[i] += cyl_bessel_i0f(f); b[i] += cyl_bessel_i1f(f); b[i] += erfcf(f); b[i] += erfcinvf(f); b[i] += erfcxf(f); b[i] += erff(f); b[i] += erfinvf(f); b[i] += exp10f(f); b[i] += exp2f(f); b[i] += expf(f); b[i] += expm1f(f); b[i] += fabsf(f); b[i] += fdimf(f,f); b[i] += fdividef(f,f); b[i] += floorf(f); b[i] += fmaf(f,f,f); b[i] += fmaxf(f,f); b[i] += fminf(f,f); b[i] += fmodf(f,f); // b[i] += frexpf(f, &idummy); // Fixme: missing function _nv_frexpf b[i] += hypotf(f,f); b[i] += (float) ilogbf(f); b[i] += isfinite(f); b[i] += isinf(f); b[i] += isnan(f); b[i] += j0f(f); b[i] += j1f(f); // b[i] += jnf(1,f); // Fixme: missing function _nv_jnf b[i] += ldexpf(f,1); b[i] += lgammaf(f); b[i] += (float) llrintf(f); b[i] += (float) llroundf(f); b[i] += log10f(f); b[i] += log1pf(f); b[i] += log2f(f); b[i] += logbf(f); b[i] += logf(f); b[i] += (float) lrintf(f); b[i] += (float) lroundf(f); // b[i] += modff(f, &dummy); // Fixme: missing function _nv_modff // b[i] += nanf(""); // Fixme: Add to cuda_open headers b[i] += nearbyintf(f); b[i] += nextafterf(f,f); b[i] += norm3df(f,f,f); b[i] += norm4df(f,f,f,f); b[i] += normcdff(f); b[i] += normcdfinvf(f); // b[i] += normf(1,&f); // Fixme: missing function __nv_normf b[i] += powf(f,f); b[i] += rcbrtf(f); b[i] += remainderf(f,f); // b[i] += remquof(f,f, &idummy); // Fixme: missing function __nv_remquof b[i] += rhypotf(f,f); b[i] += rintf(f); // b[i] += rnorm3df(f,f,f); // Fixme: missing function __nv_rnorm3df // b[i] += rnorm4df(f,f,f,f); // Fixme: missing function __nv_rnorm4df // b[i] += rnormf(1, &f); // Fixme: missing function __nv_rnormf b[i] += roundf(f); b[i] += rsqrtf(f); //b[i] += scalblnf(f, 1); // Fixme: missing function __nv_scalbnf //b[i] += scalbnf(f, 1); // Fixme: missing function __nv_scalbnf b[i] += signbit(f); // sincosf(f, &dummy, &dummy2); // Fixme: missing function __nv_sincosf // sincospif(f, &dummy, &dummy2); // Fixme: missing function __nv_sincospif b[i] += sinf(f); b[i] += sinhf(f); b[i] += sinpif(f); b[i] += sqrtf(f); b[i] += tanf(f); b[i] += tanhf(f); b[i] += tgammaf(f); b[i] += truncf(f); b[i] += y0f(f); b[i] += y1f(f); // b[i] += ynf(1,f); // Fixme: missing function __nv_ynf // 1.5 Single Presicion Intrinsics b[i] += __cosf(f); b[i] += __exp10f(f); b[i] += __expf(f); // b[i] += __fadd_rd(f, f); // Fixme: missing function __nv_fadd_rd // b[i] += __fadd_rn(f, f); // Fixme: missing function __nv_fadd_rn // b[i] += __fadd_ru(f, f); // Fixme: missing function __nv_fadd_ru // b[i] += __fadd_rz(f, f); // Fixme: missing function __nv_fadd_rz // b[i] += __fdiv_rd(f, f); // Fixme: missing function __nv_fdiv_rd // b[i] += __fdiv_rn(f, f); // Fixme: missing function __nv_fdiv_rn // b[i] += __fdiv_ru(f, f); // Fixme: missing function __nv_fdiv_ru // b[i] += __fdiv_rz(f, f); // Fixme: missing function __nv_fdiv_rz b[i] += __fdividef(f, f); // b[i] += __fmaf_rd(f, f, f); // Fixme: missing function __nv_fmaf_rd // b[i] += __fmaf_rn(f, f, f); // Fixme: missing function __nv_fmaf_rn // b[i] += __fmaf_ru(f, f, f); // Fixme: missing function __nv_fmaf_ru // b[i] += __fmaf_rz(f, f, f); // Fixme: missing function __nv_fmaf_rz // b[i] += __fmul_rd(f, f); // Fixme: missing function: __nv_fmul_rd // b[i] += __fmul_rn(f, f); // Fixme: missing function: __nv_fmul_rn // b[i] += __fmul_ru(f, f); // Fixme: missing function: __nv_fmul_ru // b[i] += __fmul_rz(f, f); // Fixme: missing function: __nv_fmul_rz // b[i] += __frcp_rd(f); // Fixme: missing function: __nv_frcp_rd // b[i] += __frcp_rn(f); // Fixme: missing function: __nv_frcp_rn // b[i] += __frcp_ru(f); // Fixme: missing function: __nv_frcp_ru // b[i] += __frcp_rz(f); // Fixme: missing function: __nv_frcp_rz // b[i] += __fsqrt_rd(f); // Fixme: missing function: __nv_fsqrt_rd // b[i] += __fsqrt_rn(f); // Fixme: missing function: __nv_fsqrt_rn // b[i] += __fsqrt_ru(f); // Fixme: missing function: __nv_fsqrt_ru // b[i] += __fsqrt_rz(f); // Fixme: missing function: __nv_fsqrt_rz // b[i] += __fsub_rd(f, f); // Fixme: missinf function: __nv_fsub_rd b[i] += __log10f(f); b[i] += __log2f(f); b[i] += __logf(f); b[i] += __powf(f, f); b[i] += __saturatef(f); // __sincosf(f, &dummy, &dummy2); // Fixme: indirect call error to __nv_fast_sincosf b[i] += __sinf(f); b[i] += __tanf(f); } } void printArray(float *array) { printf("["); bool first = true; for (int i = 0; i<N; ++i) { if (first) { printf("%f", array[i]); first = false; } else { printf(", %f", array[i]); } } printf("]"); } void printHipError(hipError_t error) { printf("Hip Error: %s\n", hipGetErrorString(error)); } bool hipCallSuccessful(hipError_t error) { if (error != hipSuccess) printHipError(error); return error == hipSuccess; } bool deviceCanCompute(int deviceID) { bool canCompute = false; hipDeviceProp_t deviceProp; bool devicePropIsAvailable = hipCallSuccessful(hipGetDeviceProperties(&deviceProp, deviceID)); if (devicePropIsAvailable) { canCompute = deviceProp.computeMode != hipComputeModeProhibited; if (!canCompute) printf("Compute mode is prohibited\n"); } return canCompute; } bool deviceIsAvailable(int *deviceID) { return hipCallSuccessful(hipGetDevice(deviceID)); } // We always use device 0 bool haveComputeDevice() { int deviceID = 0; return deviceIsAvailable(&deviceID) && deviceCanCompute(deviceID); } int main() { float hostArray[N]; if (!haveComputeDevice()) { printf("No compute device available\n"); return 0; } for (int i = 0; i<N; ++i) hostArray[i] = 0.0; printf("Array content before kernel:\n"); printArray(hostArray); printf("\n"); float *deviceArray; if (!hipCallSuccessful(hipMalloc((void **)&deviceArray, N*sizeof(float)))) { printf("Unable to allocate device memory\n"); return 0; } hipLaunchKernelGGL((testFloatMath), dim3(N), dim3(1), 0, 0, deviceArray); if (hipCallSuccessful(hipMemcpy(hostArray, deviceArray, N * sizeof(float), hipMemcpyDeviceToHost))) { printf("Array content after kernel:\n"); printArray(hostArray); printf("\n"); } else { printf("Unable to copy memory from device to host\n"); } hipFree(deviceArray); return 0; }
166ad631007946d188ff852ad2089e5bd05d2d25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zlobpcg_residuals.cu normal z -> d, Tue Feb 9 16:05:41 2016 */ #include "magmasparse_internal.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 #define PRECISION_d // copied from dnrm2.cu in trunk/magmablas // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void sum_reduce( /*int n,*/ int i, magmaDouble_ptr x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } // end sum_reduce __global__ void magma_dlobpcg_res_kernel( magma_int_t num_rows, magma_int_t num_vecs, magmaDouble_ptr evals, double * X, double * R, magmaDouble_ptr res) { int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index if ( row < num_rows) { for( int i=0; i < num_vecs; i++ ) { R[row + i*num_rows] = R[row + i*num_rows] + MAGMA_D_MAKE( -evals[i], 0.0 ) * X[ row + i*num_rows ]; } } } /* magmablas_dnrm2_kernel( int m, double * da, int ldda, double * dxnorm ) { const int i = threadIdx.x; magmaDouble_ptr dx = da + blockIdx.x * ldda; __shared__ double sum[ BLOCK_SIZE ]; double re, lsum; // get norm of dx lsum = 0; for( int j = i; j < m; j += BLOCK_SIZE ) { #if (defined(PRECISION_s) || defined(PRECISION_d)) re = dx[j]; lsum += re*re; #else re = MAGMA_D_REAL( dx[j] ); double im = MAGMA_D_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[i] = lsum; sum_reduce< BLOCK_SIZE >( i, sum ); if (i==0) res[blockIdx.x] = sqrt(sum[0]); } */ /** Purpose ------- This routine computes for Block-LOBPCG, the set of residuals. R = Ax - x evalues It replaces: for(int i=0; i < n; i++) { magma_daxpy(m, MAGMA_D_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1); } The memory layout of x is: / x1[0] x2[0] x3[0] \ | x1[1] x2[1] x3[1] | x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] . | x1[3] x2[3] x3[3] | \ x1[4] x2[4] x3[4] / Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] num_vecs magma_int_t number of vectors @param[in] evalues magmaDouble_ptr array of eigenvalues/approximations @param[in] X magmaDouble_ptr block of eigenvector approximations @param[in] R magmaDouble_ptr block of residuals @param[in] res magmaDouble_ptr array of residuals @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_dlobpcg_res( magma_int_t num_rows, magma_int_t num_vecs, magmaDouble_ptr evalues, magmaDouble_ptr X, magmaDouble_ptr R, magmaDouble_ptr res, magma_queue_t queue ) { // every thread handles one row magma_int_t block_size = BLOCK_SIZE; dim3 threads( block_size ); dim3 grid( magma_ceildiv( num_rows, block_size ) ); hipLaunchKernelGGL(( magma_dlobpcg_res_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , num_rows, num_vecs, evalues, X, R, res ); return MAGMA_SUCCESS; }
166ad631007946d188ff852ad2089e5bd05d2d25.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zlobpcg_residuals.cu normal z -> d, Tue Feb 9 16:05:41 2016 */ #include "magmasparse_internal.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 #define PRECISION_d // copied from dnrm2.cu in trunk/magmablas // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void sum_reduce( /*int n,*/ int i, magmaDouble_ptr x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } // end sum_reduce __global__ void magma_dlobpcg_res_kernel( magma_int_t num_rows, magma_int_t num_vecs, magmaDouble_ptr evals, double * X, double * R, magmaDouble_ptr res) { int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index if ( row < num_rows) { for( int i=0; i < num_vecs; i++ ) { R[row + i*num_rows] = R[row + i*num_rows] + MAGMA_D_MAKE( -evals[i], 0.0 ) * X[ row + i*num_rows ]; } } } /* magmablas_dnrm2_kernel( int m, double * da, int ldda, double * dxnorm ) { const int i = threadIdx.x; magmaDouble_ptr dx = da + blockIdx.x * ldda; __shared__ double sum[ BLOCK_SIZE ]; double re, lsum; // get norm of dx lsum = 0; for( int j = i; j < m; j += BLOCK_SIZE ) { #if (defined(PRECISION_s) || defined(PRECISION_d)) re = dx[j]; lsum += re*re; #else re = MAGMA_D_REAL( dx[j] ); double im = MAGMA_D_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[i] = lsum; sum_reduce< BLOCK_SIZE >( i, sum ); if (i==0) res[blockIdx.x] = sqrt(sum[0]); } */ /** Purpose ------- This routine computes for Block-LOBPCG, the set of residuals. R = Ax - x evalues It replaces: for(int i=0; i < n; i++) { magma_daxpy(m, MAGMA_D_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1); } The memory layout of x is: / x1[0] x2[0] x3[0] \ | x1[1] x2[1] x3[1] | x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] . | x1[3] x2[3] x3[3] | \ x1[4] x2[4] x3[4] / Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] num_vecs magma_int_t number of vectors @param[in] evalues magmaDouble_ptr array of eigenvalues/approximations @param[in] X magmaDouble_ptr block of eigenvector approximations @param[in] R magmaDouble_ptr block of residuals @param[in] res magmaDouble_ptr array of residuals @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_dlobpcg_res( magma_int_t num_rows, magma_int_t num_vecs, magmaDouble_ptr evalues, magmaDouble_ptr X, magmaDouble_ptr R, magmaDouble_ptr res, magma_queue_t queue ) { // every thread handles one row magma_int_t block_size = BLOCK_SIZE; dim3 threads( block_size ); dim3 grid( magma_ceildiv( num_rows, block_size ) ); magma_dlobpcg_res_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( num_rows, num_vecs, evalues, X, R, res ); return MAGMA_SUCCESS; }
d1f8980e8dcfadc6be759f6911aac78920574786.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include "af/eigen_extension.h" #include <hip/hip_runtime.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include "af/helper.cuh" #include "af/dataset.h" #include "af/TSDFVolume.h" #include "af/MarchingCubes.h" #include <af/CameraModel.h> #include <fstream> #include <vector> #define STR1(x) #x #define STR(x) STR1(x) #define _VNAME(x) #x #define Print(x) std::cout<<_VNAME(x)<<":\n"<<x<<std::endl; typedef Eigen::Matrix<float, 6, 6> Mat6f; typedef Eigen::Matrix<float, 6, 1> Vec6f; bool depthToVertexMap(const Mat3f &K, const cv::Mat &depth, cv::Mat &vertexMap) { if (depth.type() != CV_32FC1 || depth.empty()) return false; int w = depth.cols; int h = depth.rows; vertexMap = cv::Mat::zeros(h, w, CV_32FC3); float fx = K(0, 0); float fy = K(1, 1); float cx = K(0, 2); float cy = K(1, 2); float fxInv = 1.0f / fx; float fyInv = 1.0f / fy; float* ptrVert = (float*)vertexMap.data; int tmp = 0; const float* ptrDepth = (const float*)depth.data; for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { float depthMeter = ptrDepth[y*w + x]; float x0 = (float(x) - cx) * fxInv; float y0 = (float(y) - cy) * fyInv; size_t off = (y*w + x) * 3; ptrVert[off] = x0 * depthMeter; ptrVert[off+1] = y0 * depthMeter; ptrVert[off+2] = depthMeter; } } return true; } Vec3f centroid(const cv::Mat &vertexMap) { Vec3f centroid(0.0, 0.0, 0.0); size_t cnt = 0; for (int y = 0; y < vertexMap.rows; ++y) { for (int x = 0; x < vertexMap.cols; ++x) { cv::Vec3f pt = vertexMap.at<cv::Vec3f>(y, x); if (pt.val[2] > 0.0) { Vec3f pt3(pt.val[0], pt.val[1], pt.val[2]); centroid += pt3; ++cnt; } } } centroid /= float(cnt); return centroid; } int main(int argc, char *argv[]) { // default input sequence in folder std::string dataFolder() = std::string(STR(SDF2SDF_SOURCE_DIR)) + "/data/umbrella/data"; // parse command line parameters const char *params = { "{i|input| |input rgb-d sequence}" "{f|frames|551|number of frames to process (0=all)}" "{n|iterations|100|max number of GD iterations}" }; cv::CommandLineParser cmd(argc, argv, params); // input sequence // download from http://campar.in.tum.de/personal/slavcheva/3d-printed-dataset/index.html std::string inputSequence = cmd.get<std::string>("input"); if (inputSequence.empty()) { inputSequence = dataFolder(); } std::cout << "input sequence: " << inputSequence << std::endl; // number of frames to process size_t frames = (size_t)cmd.get<int>("frames"); std::cout << "# frames: " << frames << std::endl; // max number of GD iterations size_t iterations = (size_t)cmd.get<int>("iterations"); std::cout << "iterations: " << iterations << std::endl; // initialize cuda context hipDeviceSynchronize(); CUDA_CHECK; // load camera intrinsics Eigen::Matrix3f K; Eigen::Matrix3f Kcolor; if (!loadIntrinsics(inputSequence + "/depthIntrinsics.txt", K)) { std::cerr << "No depth intrinsics file found!" << std::endl; return 1; } if (!loadIntrinsics(inputSequence + "/colorIntrinsics.txt", K)) { std::cerr << "No color intrinsics file found!" << std::endl; return 1; } std::cout << "K depth: " << std::endl << K << std::endl; std::cout << "K color: " << std::endl << Kcolor << std::endl; CameraModel::KDepth = K; CameraModel::KColor = Kcolor; // create tsdf volume Vec3i volDim(256, 256, 256); Vec3f volSize(0.2f, 0.2f, 0.2f); //Vec3f volSize(1.0f, 1.0f, 1.0f); Vec3f voxelSize = volSize.cwiseQuotient(volDim.cast<float>()); std::cout << "voxelSize: " << voxelSize.transpose() << std::endl; float delta = 0.02f; TSDFVolume* tsdfReference = new TSDFVolume(volDim, volSize, K); tsdfReference->setDelta(delta); TSDFVolume* tsdfCurrent; TSDFVolume* tsdfResult = new TSDFVolume(volDim, volSize, K); tsdfResult->setDelta(delta); // process frames Mat4f poseVolume = Mat4f::Identity(); cv::Mat color, depth, mask; std::ifstream myfile(inputSequence + "/synthetic_circle_poses.txt"); int iteration; std::vector<double> vec(16,0); Mat4f pose_gt; Mat4f pose_gt_global = Mat4f::Identity(); for (size_t i = 0; i < frames; ++i) { std::cout << "Frame " << i << "..." << std::endl; // load input frame if (!loadFrame(inputSequence, i, color, depth, mask)) { // std::cerr << "Frame " << i << " could not be loaded!" << std::endl; //return 1; break; } // filter depth values outside of mask filterDepth(mask, depth); // get initial volume pose from centroid of first depth map if (i == 0) { // initial pose for volume by computing centroid of first depth/vertex map cv::Mat vertMap; depthToVertexMap(K, depth, vertMap); Vec3f transCentroid = centroid(vertMap); poseVolume.topRightCorner<3,1>() = transCentroid; std::cout << "pose centroid" << std::endl << poseVolume << std::endl; tsdfReference->integrate(poseVolume, color, depth); tsdfResult->integrate(poseVolume, color, depth); myfile >> iteration; for(int i=0;i<16;++i){ myfile>>vec[i]; pose_gt(i/4, i%4) = vec[i]; } // Print(pose_gt); } else { tsdfCurrent = new TSDFVolume(volDim, volSize, K); tsdfCurrent->setDelta(delta); tsdfCurrent->integrate(poseVolume, color, depth); Mat4f found_pose = findPose(poseVolume, *tsdfReference, *tsdfCurrent, 0.00005, 0.002); pose_gt_global *= found_pose.inverse(); TSDFVolume* tsdf = new TSDFVolume(volDim, volSize, K); tsdf->setDelta(delta); tsdf->integrate(poseVolume, color, depth); construct_sdf(*tsdf, *tsdfResult, pose_gt_global); delete tsdf; TSDFVolume *temp = tsdfReference; tsdfReference = tsdfCurrent; tsdfCurrent = temp; delete tsdfCurrent; } } // extract mesh using marching cubes std::cout << "Extracting mesh..." << std::endl; MarchingCubes mc(volDim, volSize); Mesh outputMesh; mc.computeIsoSurface(outputMesh, tsdfResult->ptrTsdf(), tsdfResult->ptrTsdfWeights(), tsdfResult->ptrColorR(), tsdfResult->ptrColorG(), tsdfResult->ptrColorB()); // save mesh std::cout << "Saving mesh..." << std::endl; const std::string meshFilename = inputSequence + "/mesh3.ply"; if (!outputMesh.savePly(meshFilename)) { std::cerr << "Could not save mesh!" << std::endl; } // clean up //delete tsdfCurrent; delete tsdfReference; delete tsdfResult; cv::destroyAllWindows(); return 0; }
d1f8980e8dcfadc6be759f6911aac78920574786.cu
#include <iostream> #include <vector> #include "af/eigen_extension.h" #include <cuda_runtime.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include "af/helper.cuh" #include "af/dataset.h" #include "af/TSDFVolume.h" #include "af/MarchingCubes.h" #include <af/CameraModel.h> #include <fstream> #include <vector> #define STR1(x) #x #define STR(x) STR1(x) #define _VNAME(x) #x #define Print(x) std::cout<<_VNAME(x)<<":\n"<<x<<std::endl; typedef Eigen::Matrix<float, 6, 6> Mat6f; typedef Eigen::Matrix<float, 6, 1> Vec6f; bool depthToVertexMap(const Mat3f &K, const cv::Mat &depth, cv::Mat &vertexMap) { if (depth.type() != CV_32FC1 || depth.empty()) return false; int w = depth.cols; int h = depth.rows; vertexMap = cv::Mat::zeros(h, w, CV_32FC3); float fx = K(0, 0); float fy = K(1, 1); float cx = K(0, 2); float cy = K(1, 2); float fxInv = 1.0f / fx; float fyInv = 1.0f / fy; float* ptrVert = (float*)vertexMap.data; int tmp = 0; const float* ptrDepth = (const float*)depth.data; for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { float depthMeter = ptrDepth[y*w + x]; float x0 = (float(x) - cx) * fxInv; float y0 = (float(y) - cy) * fyInv; size_t off = (y*w + x) * 3; ptrVert[off] = x0 * depthMeter; ptrVert[off+1] = y0 * depthMeter; ptrVert[off+2] = depthMeter; } } return true; } Vec3f centroid(const cv::Mat &vertexMap) { Vec3f centroid(0.0, 0.0, 0.0); size_t cnt = 0; for (int y = 0; y < vertexMap.rows; ++y) { for (int x = 0; x < vertexMap.cols; ++x) { cv::Vec3f pt = vertexMap.at<cv::Vec3f>(y, x); if (pt.val[2] > 0.0) { Vec3f pt3(pt.val[0], pt.val[1], pt.val[2]); centroid += pt3; ++cnt; } } } centroid /= float(cnt); return centroid; } int main(int argc, char *argv[]) { // default input sequence in folder std::string dataFolder() = std::string(STR(SDF2SDF_SOURCE_DIR)) + "/data/umbrella/data"; // parse command line parameters const char *params = { "{i|input| |input rgb-d sequence}" "{f|frames|551|number of frames to process (0=all)}" "{n|iterations|100|max number of GD iterations}" }; cv::CommandLineParser cmd(argc, argv, params); // input sequence // download from http://campar.in.tum.de/personal/slavcheva/3d-printed-dataset/index.html std::string inputSequence = cmd.get<std::string>("input"); if (inputSequence.empty()) { inputSequence = dataFolder(); } std::cout << "input sequence: " << inputSequence << std::endl; // number of frames to process size_t frames = (size_t)cmd.get<int>("frames"); std::cout << "# frames: " << frames << std::endl; // max number of GD iterations size_t iterations = (size_t)cmd.get<int>("iterations"); std::cout << "iterations: " << iterations << std::endl; // initialize cuda context cudaDeviceSynchronize(); CUDA_CHECK; // load camera intrinsics Eigen::Matrix3f K; Eigen::Matrix3f Kcolor; if (!loadIntrinsics(inputSequence + "/depthIntrinsics.txt", K)) { std::cerr << "No depth intrinsics file found!" << std::endl; return 1; } if (!loadIntrinsics(inputSequence + "/colorIntrinsics.txt", K)) { std::cerr << "No color intrinsics file found!" << std::endl; return 1; } std::cout << "K depth: " << std::endl << K << std::endl; std::cout << "K color: " << std::endl << Kcolor << std::endl; CameraModel::KDepth = K; CameraModel::KColor = Kcolor; // create tsdf volume Vec3i volDim(256, 256, 256); Vec3f volSize(0.2f, 0.2f, 0.2f); //Vec3f volSize(1.0f, 1.0f, 1.0f); Vec3f voxelSize = volSize.cwiseQuotient(volDim.cast<float>()); std::cout << "voxelSize: " << voxelSize.transpose() << std::endl; float delta = 0.02f; TSDFVolume* tsdfReference = new TSDFVolume(volDim, volSize, K); tsdfReference->setDelta(delta); TSDFVolume* tsdfCurrent; TSDFVolume* tsdfResult = new TSDFVolume(volDim, volSize, K); tsdfResult->setDelta(delta); // process frames Mat4f poseVolume = Mat4f::Identity(); cv::Mat color, depth, mask; std::ifstream myfile(inputSequence + "/synthetic_circle_poses.txt"); int iteration; std::vector<double> vec(16,0); Mat4f pose_gt; Mat4f pose_gt_global = Mat4f::Identity(); for (size_t i = 0; i < frames; ++i) { std::cout << "Frame " << i << "..." << std::endl; // load input frame if (!loadFrame(inputSequence, i, color, depth, mask)) { // std::cerr << "Frame " << i << " could not be loaded!" << std::endl; //return 1; break; } // filter depth values outside of mask filterDepth(mask, depth); // get initial volume pose from centroid of first depth map if (i == 0) { // initial pose for volume by computing centroid of first depth/vertex map cv::Mat vertMap; depthToVertexMap(K, depth, vertMap); Vec3f transCentroid = centroid(vertMap); poseVolume.topRightCorner<3,1>() = transCentroid; std::cout << "pose centroid" << std::endl << poseVolume << std::endl; tsdfReference->integrate(poseVolume, color, depth); tsdfResult->integrate(poseVolume, color, depth); myfile >> iteration; for(int i=0;i<16;++i){ myfile>>vec[i]; pose_gt(i/4, i%4) = vec[i]; } // Print(pose_gt); } else { tsdfCurrent = new TSDFVolume(volDim, volSize, K); tsdfCurrent->setDelta(delta); tsdfCurrent->integrate(poseVolume, color, depth); Mat4f found_pose = findPose(poseVolume, *tsdfReference, *tsdfCurrent, 0.00005, 0.002); pose_gt_global *= found_pose.inverse(); TSDFVolume* tsdf = new TSDFVolume(volDim, volSize, K); tsdf->setDelta(delta); tsdf->integrate(poseVolume, color, depth); construct_sdf(*tsdf, *tsdfResult, pose_gt_global); delete tsdf; TSDFVolume *temp = tsdfReference; tsdfReference = tsdfCurrent; tsdfCurrent = temp; delete tsdfCurrent; } } // extract mesh using marching cubes std::cout << "Extracting mesh..." << std::endl; MarchingCubes mc(volDim, volSize); Mesh outputMesh; mc.computeIsoSurface(outputMesh, tsdfResult->ptrTsdf(), tsdfResult->ptrTsdfWeights(), tsdfResult->ptrColorR(), tsdfResult->ptrColorG(), tsdfResult->ptrColorB()); // save mesh std::cout << "Saving mesh..." << std::endl; const std::string meshFilename = inputSequence + "/mesh3.ply"; if (!outputMesh.savePly(meshFilename)) { std::cerr << "Could not save mesh!" << std::endl; } // clean up //delete tsdfCurrent; delete tsdfReference; delete tsdfResult; cv::destroyAllWindows(); return 0; }
fd4f82e2bcbe286c1db39850092f5142cddf3ba6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "./amsoftmax-inl.h" #include <math.h> namespace mshadow { namespace cuda { #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) template<typename DType> __global__ void AmSoftmaxForwardKernel(const Tensor<gpu, 2, DType> x, const Tensor<gpu, 2, DType> w, const Tensor<gpu, 1, DType> label, Tensor<gpu, 2, DType> out, Tensor<gpu, 2, DType> oout, const DType margin, const DType s) { const int n = x.size(0); //batch size const int feature_dim = x.size(1); //embedding size, 512 for example const int m = w.size(0);//num classes const DType cos_m = cos(margin); const DType sin_m = sin(margin); CUDA_KERNEL_LOOP(i, n) { const int yi = static_cast<int>(label[i]); const DType fo_i_yi = out[i][yi]; oout[i][0] = fo_i_yi; if(fo_i_yi>=0.0) { const DType cos_t = fo_i_yi / s; const DType sin_t = sqrt(1.0-cos_t*cos_t); out[i][yi] = fo_i_yi*cos_m - (s*sin_t*sin_m); } } } template<typename DType> inline void AmSoftmaxForward(const Tensor<gpu, 2, DType> &x, const Tensor<gpu, 2, DType> &w, const Tensor<gpu, 1, DType> &label, const Tensor<gpu, 2, DType> &out, const Tensor<gpu, 2, DType> &oout, const DType margin, const DType s) { const int n = x.size(0); const int m = w.size(0); dim3 dimBlock(kBaseThreadNum); dim3 dimGrid((n + kBaseThreadNum - 1) / kBaseThreadNum); hipLaunchKernelGGL(( AmSoftmaxForwardKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, x, w, label, out, oout, margin, s); } template<typename DType> __global__ void AmSoftmaxBackwardXKernel(const Tensor<gpu, 2, DType> x, const Tensor<gpu, 2, DType> w, const Tensor<gpu, 1, DType> label, const Tensor<gpu, 2, DType> out, const Tensor<gpu, 2, DType> oout, const Tensor<gpu, 2, DType> o_grad, Tensor<gpu, 2, DType> x_grad, const Tensor<gpu, 2, DType> workspace, const DType margin, const DType s) { const int nthreads = x.size(0) * x.size(1); //const int nthreads = x.size(0); const int feature_dim = x.size(1); const DType cos_m = cos(margin); const DType nsin_m = sin(margin)*-1.0; const DType ss = s*s; CUDA_KERNEL_LOOP(idx, nthreads) { const int i = idx / feature_dim; const int l = idx % feature_dim; //const int i = idx; const int yi = static_cast<int>(label[i]); if(oout[i][0]>=0.0) { //x_grad[i][l] -= o_grad[i][yi] * w[yi][l]; //c = 1-cost*cost, = sint*sint const DType cost = oout[i][0]/s; const DType c = 1.0-cost*cost; const DType dc_dx = -2.0/ss*oout[i][0]*w[yi][l]; const DType d_sint_dc = 1.0/(2*sqrt(c)); const DType d_sint_dx = dc_dx*d_sint_dc; const DType df_dx = cos_m*w[yi][l] + s*nsin_m*d_sint_dx; x_grad[i][l] += o_grad[i][yi] * (df_dx - w[yi][l]); } } } template<typename DType> __global__ void AmSoftmaxBackwardWKernel(const Tensor<gpu, 2, DType> x, const Tensor<gpu, 2, DType> w, const Tensor<gpu, 1, DType> label, const Tensor<gpu, 2, DType> out, const Tensor<gpu, 2, DType> oout, const Tensor<gpu, 2, DType> o_grad, Tensor<gpu, 2, DType> w_grad, const Tensor<gpu, 2, DType> workspace, const DType margin, const DType s) { const int nthreads = w.size(0) * w.size(1); const int n = x.size(0); const int feature_dim = w.size(1); const DType cos_m = cos(margin); const DType nsin_m = sin(margin)*-1.0; const DType ss = s*s; CUDA_KERNEL_LOOP(idx, nthreads) { const int j = idx / feature_dim; const int l = idx % feature_dim; DType dw = 0; for (int i = 0; i < n; ++i) { const int yi = static_cast<int>(label[i]); if (yi == j&&oout[i][0]>=0.0) { const DType cost = oout[i][0]/s; const DType c = 1.0-cost*cost; const DType dc_dw = -2.0/ss*oout[i][0]*x[i][l]; const DType d_sint_dc = 1.0/(2*sqrt(c)); const DType d_sint_dw = dc_dw*d_sint_dc; const DType df_dw = cos_m*x[i][l] + s*nsin_m*d_sint_dw; dw += o_grad[i][yi] * (df_dw - x[i][l]); } } w_grad[j][l] += dw; } } template<typename DType> inline void AmSoftmaxBackward(const Tensor<gpu, 2, DType> &x, const Tensor<gpu, 2, DType> &w, const Tensor<gpu, 1, DType> &label, const Tensor<gpu, 2, DType> &out, const Tensor<gpu, 2, DType> &oout, const Tensor<gpu, 2, DType> &o_grad, const Tensor<gpu, 2, DType> &x_grad, const Tensor<gpu, 2, DType> &w_grad, const Tensor<gpu, 2, DType> &workspace, const DType margin, const DType s) { const int n = x.size(0); const int feature_dim = x.size(1); const int m = w.size(0); dim3 dimBlock(kBaseThreadNum); dim3 dimGrid((n + kBaseThreadNum - 1) / kBaseThreadNum); dimGrid.x = ((n * feature_dim + kBaseThreadNum - 1) / kBaseThreadNum); hipLaunchKernelGGL(( AmSoftmaxBackwardXKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, x, w, label, out, oout, o_grad, x_grad, workspace, margin, s); dimGrid.x = ((m * feature_dim + kBaseThreadNum - 1) / kBaseThreadNum); hipLaunchKernelGGL(( AmSoftmaxBackwardWKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, x, w, label, out, oout, o_grad, w_grad, workspace, margin, s); } } // namespace cuda template<typename DType> inline void AmSoftmaxForward(const Tensor<gpu, 2, DType> &x, const Tensor<gpu, 2, DType> &w, const Tensor<gpu, 1, DType> &label, const Tensor<gpu, 2, DType> &out, const Tensor<gpu, 2, DType> &oout, const DType margin, const DType s) { cuda::AmSoftmaxForward(x, w, label, out, oout, margin, s); } template<typename DType> inline void AmSoftmaxBackward(const Tensor<gpu, 2, DType> &x, const Tensor<gpu, 2, DType> &w, const Tensor<gpu, 1, DType> &label, const Tensor<gpu, 2, DType> &out, const Tensor<gpu, 2, DType> &oout, const Tensor<gpu, 2, DType> &o_grad, const Tensor<gpu, 2, DType> &x_grad, const Tensor<gpu, 2, DType> &w_grad, const Tensor<gpu, 2, DType> &workspace, const DType margin, const DType s) { cuda::AmSoftmaxBackward(x, w, label, out, oout, o_grad, x_grad, w_grad, workspace, margin, s); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(AmSoftmaxParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new AmSoftmaxOp<gpu, DType>(param); }) return op; } } // namespace op } // namespace mxnet
fd4f82e2bcbe286c1db39850092f5142cddf3ba6.cu
#include "./amsoftmax-inl.h" #include <math.h> namespace mshadow { namespace cuda { #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) template<typename DType> __global__ void AmSoftmaxForwardKernel(const Tensor<gpu, 2, DType> x, const Tensor<gpu, 2, DType> w, const Tensor<gpu, 1, DType> label, Tensor<gpu, 2, DType> out, Tensor<gpu, 2, DType> oout, const DType margin, const DType s) { const int n = x.size(0); //batch size const int feature_dim = x.size(1); //embedding size, 512 for example const int m = w.size(0);//num classes const DType cos_m = cos(margin); const DType sin_m = sin(margin); CUDA_KERNEL_LOOP(i, n) { const int yi = static_cast<int>(label[i]); const DType fo_i_yi = out[i][yi]; oout[i][0] = fo_i_yi; if(fo_i_yi>=0.0) { const DType cos_t = fo_i_yi / s; const DType sin_t = sqrt(1.0-cos_t*cos_t); out[i][yi] = fo_i_yi*cos_m - (s*sin_t*sin_m); } } } template<typename DType> inline void AmSoftmaxForward(const Tensor<gpu, 2, DType> &x, const Tensor<gpu, 2, DType> &w, const Tensor<gpu, 1, DType> &label, const Tensor<gpu, 2, DType> &out, const Tensor<gpu, 2, DType> &oout, const DType margin, const DType s) { const int n = x.size(0); const int m = w.size(0); dim3 dimBlock(kBaseThreadNum); dim3 dimGrid((n + kBaseThreadNum - 1) / kBaseThreadNum); AmSoftmaxForwardKernel<<<dimGrid, dimBlock>>>(x, w, label, out, oout, margin, s); } template<typename DType> __global__ void AmSoftmaxBackwardXKernel(const Tensor<gpu, 2, DType> x, const Tensor<gpu, 2, DType> w, const Tensor<gpu, 1, DType> label, const Tensor<gpu, 2, DType> out, const Tensor<gpu, 2, DType> oout, const Tensor<gpu, 2, DType> o_grad, Tensor<gpu, 2, DType> x_grad, const Tensor<gpu, 2, DType> workspace, const DType margin, const DType s) { const int nthreads = x.size(0) * x.size(1); //const int nthreads = x.size(0); const int feature_dim = x.size(1); const DType cos_m = cos(margin); const DType nsin_m = sin(margin)*-1.0; const DType ss = s*s; CUDA_KERNEL_LOOP(idx, nthreads) { const int i = idx / feature_dim; const int l = idx % feature_dim; //const int i = idx; const int yi = static_cast<int>(label[i]); if(oout[i][0]>=0.0) { //x_grad[i][l] -= o_grad[i][yi] * w[yi][l]; //c = 1-cost*cost, = sint*sint const DType cost = oout[i][0]/s; const DType c = 1.0-cost*cost; const DType dc_dx = -2.0/ss*oout[i][0]*w[yi][l]; const DType d_sint_dc = 1.0/(2*sqrt(c)); const DType d_sint_dx = dc_dx*d_sint_dc; const DType df_dx = cos_m*w[yi][l] + s*nsin_m*d_sint_dx; x_grad[i][l] += o_grad[i][yi] * (df_dx - w[yi][l]); } } } template<typename DType> __global__ void AmSoftmaxBackwardWKernel(const Tensor<gpu, 2, DType> x, const Tensor<gpu, 2, DType> w, const Tensor<gpu, 1, DType> label, const Tensor<gpu, 2, DType> out, const Tensor<gpu, 2, DType> oout, const Tensor<gpu, 2, DType> o_grad, Tensor<gpu, 2, DType> w_grad, const Tensor<gpu, 2, DType> workspace, const DType margin, const DType s) { const int nthreads = w.size(0) * w.size(1); const int n = x.size(0); const int feature_dim = w.size(1); const DType cos_m = cos(margin); const DType nsin_m = sin(margin)*-1.0; const DType ss = s*s; CUDA_KERNEL_LOOP(idx, nthreads) { const int j = idx / feature_dim; const int l = idx % feature_dim; DType dw = 0; for (int i = 0; i < n; ++i) { const int yi = static_cast<int>(label[i]); if (yi == j&&oout[i][0]>=0.0) { const DType cost = oout[i][0]/s; const DType c = 1.0-cost*cost; const DType dc_dw = -2.0/ss*oout[i][0]*x[i][l]; const DType d_sint_dc = 1.0/(2*sqrt(c)); const DType d_sint_dw = dc_dw*d_sint_dc; const DType df_dw = cos_m*x[i][l] + s*nsin_m*d_sint_dw; dw += o_grad[i][yi] * (df_dw - x[i][l]); } } w_grad[j][l] += dw; } } template<typename DType> inline void AmSoftmaxBackward(const Tensor<gpu, 2, DType> &x, const Tensor<gpu, 2, DType> &w, const Tensor<gpu, 1, DType> &label, const Tensor<gpu, 2, DType> &out, const Tensor<gpu, 2, DType> &oout, const Tensor<gpu, 2, DType> &o_grad, const Tensor<gpu, 2, DType> &x_grad, const Tensor<gpu, 2, DType> &w_grad, const Tensor<gpu, 2, DType> &workspace, const DType margin, const DType s) { const int n = x.size(0); const int feature_dim = x.size(1); const int m = w.size(0); dim3 dimBlock(kBaseThreadNum); dim3 dimGrid((n + kBaseThreadNum - 1) / kBaseThreadNum); dimGrid.x = ((n * feature_dim + kBaseThreadNum - 1) / kBaseThreadNum); AmSoftmaxBackwardXKernel<<<dimGrid, dimBlock>>>(x, w, label, out, oout, o_grad, x_grad, workspace, margin, s); dimGrid.x = ((m * feature_dim + kBaseThreadNum - 1) / kBaseThreadNum); AmSoftmaxBackwardWKernel<<<dimGrid, dimBlock>>>(x, w, label, out, oout, o_grad, w_grad, workspace, margin, s); } } // namespace cuda template<typename DType> inline void AmSoftmaxForward(const Tensor<gpu, 2, DType> &x, const Tensor<gpu, 2, DType> &w, const Tensor<gpu, 1, DType> &label, const Tensor<gpu, 2, DType> &out, const Tensor<gpu, 2, DType> &oout, const DType margin, const DType s) { cuda::AmSoftmaxForward(x, w, label, out, oout, margin, s); } template<typename DType> inline void AmSoftmaxBackward(const Tensor<gpu, 2, DType> &x, const Tensor<gpu, 2, DType> &w, const Tensor<gpu, 1, DType> &label, const Tensor<gpu, 2, DType> &out, const Tensor<gpu, 2, DType> &oout, const Tensor<gpu, 2, DType> &o_grad, const Tensor<gpu, 2, DType> &x_grad, const Tensor<gpu, 2, DType> &w_grad, const Tensor<gpu, 2, DType> &workspace, const DType margin, const DType s) { cuda::AmSoftmaxBackward(x, w, label, out, oout, o_grad, x_grad, w_grad, workspace, margin, s); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(AmSoftmaxParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new AmSoftmaxOp<gpu, DType>(param); }) return op; } } // namespace op } // namespace mxnet
be981b19026e50bda3a765f424fafe23a4f5a62d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unistd.h> #include <rocblas.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <iostream> #include <stdlib.h> #define DSIZE 1000000 // nTPB should be a power-of-2 #define nTPB 1024 #define MAX_KERNEL_BLOCKS 65536 #define MAX_BLOCKS ((DSIZE/nTPB)+1) #define MIN(a,b) ((a>b)?b:a) #define FLOAT_MIN -1.0f #include <time.h> #include <sys/time.h> unsigned long long dtime_usec(unsigned long long prev){ #define USECPSEC 1000000ULL timeval tv1; gettimeofday(&tv1,0); return ((tv1.tv_sec * USECPSEC)+tv1.tv_usec) - prev; } __device__ volatile float blk_vals[MAX_BLOCKS]; __device__ volatile int blk_idxs[MAX_BLOCKS]; __device__ int blk_num = 0; template <typename T> __global__ void max_idx_kernel(const T *data, const int dsize, int *result){ __shared__ volatile T vals[nTPB]; __shared__ volatile int idxs[nTPB]; __shared__ volatile int last_block; int idx = threadIdx.x+blockDim.x*blockIdx.x; // idx is the current data's index last_block = 0; T my_val = FLOAT_MIN; int my_idx = -1; // sweep from global memory while (idx < dsize){ // idx is in the array if (data[idx] > my_val) { my_val = data[idx]; my_idx = idx; // find the max data } idx += blockDim.x*gridDim.x; // if the array is very long } // populate shared memory vals[threadIdx.x] = my_val; idxs[threadIdx.x] = my_idx; // load to the shared memory __syncthreads(); // sweep in shared memory for (int i = (nTPB>>1); i > 0; i>>=1) { // i = 512, 265, ... if (threadIdx.x < i) { // threadIdx.x is between 0 to 1023 if (vals[threadIdx.x] < vals[threadIdx.x + i]) { vals[threadIdx.x] = vals[threadIdx.x+i]; idxs[threadIdx.x] = idxs[threadIdx.x+i]; } } __syncthreads(); } // perform block-level reduction if (!threadIdx.x) { // threadIdx.x is 0 , and there stores the max value blk_vals[blockIdx.x] = vals[0]; blk_idxs[blockIdx.x] = idxs[0]; // store per block's max value into if (atomicAdd(&blk_num, 1) == gridDim.x - 1) // then I am the last block last_block = 1; } __syncthreads(); if (last_block){ idx = threadIdx.x; my_val = FLOAT_MIN; my_idx = -1; while (idx < gridDim.x){ if (blk_vals[idx] > my_val) { my_val = blk_vals[idx]; my_idx = blk_idxs[idx]; } idx += blockDim.x; } // populate shared memory vals[threadIdx.x] = my_val; idxs[threadIdx.x] = my_idx; __syncthreads(); // sweep in shared memory for (int i = (nTPB>>1); i > 0; i>>=1){ if (threadIdx.x < i) { if (vals[threadIdx.x] < vals[threadIdx.x + i]) { vals[threadIdx.x] = vals[threadIdx.x+i]; idxs[threadIdx.x] = idxs[threadIdx.x+i]; } } __syncthreads(); } if (!threadIdx.x) *result = idxs[0]; } } int main(){ int nrElements = DSIZE; float *d_vector, *h_vector; h_vector = new float[DSIZE]; for (int i = 0; i < DSIZE; i++) h_vector[i] = rand()/(float)RAND_MAX; h_vector[10] = 10; // create definite max element hipblasHandle_t my_handle; hipblasStatus_t my_status = hipblasCreate(&my_handle); hipMalloc(&d_vector, DSIZE*sizeof(float)); hipMemcpy(d_vector, h_vector, DSIZE*sizeof(float), hipMemcpyHostToDevice); int max_index = 0; int *d_max_index; hipMalloc(&d_max_index, sizeof(int)); unsigned long long dtime = dtime_usec(0); hipLaunchKernelGGL(( max_idx_kernel), dim3(MIN(MAX_KERNEL_BLOCKS, ((DSIZE+nTPB-1)/nTPB))), dim3(nTPB), 0, 0, d_vector, DSIZE, d_max_index); hipDeviceSynchronize(); dtime = dtime_usec(dtime); std::cout << "kernel time: " << dtime/(float)USECPSEC; hipMemcpy(&max_index, d_max_index, sizeof(int), hipMemcpyDeviceToHost); std::cout << " max index: " << max_index << std::endl; return 0; }
be981b19026e50bda3a765f424fafe23a4f5a62d.cu
#include <unistd.h> #include <cublas_v2.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <iostream> #include <stdlib.h> #define DSIZE 1000000 // nTPB should be a power-of-2 #define nTPB 1024 #define MAX_KERNEL_BLOCKS 65536 #define MAX_BLOCKS ((DSIZE/nTPB)+1) #define MIN(a,b) ((a>b)?b:a) #define FLOAT_MIN -1.0f #include <time.h> #include <sys/time.h> unsigned long long dtime_usec(unsigned long long prev){ #define USECPSEC 1000000ULL timeval tv1; gettimeofday(&tv1,0); return ((tv1.tv_sec * USECPSEC)+tv1.tv_usec) - prev; } __device__ volatile float blk_vals[MAX_BLOCKS]; __device__ volatile int blk_idxs[MAX_BLOCKS]; __device__ int blk_num = 0; template <typename T> __global__ void max_idx_kernel(const T *data, const int dsize, int *result){ __shared__ volatile T vals[nTPB]; __shared__ volatile int idxs[nTPB]; __shared__ volatile int last_block; int idx = threadIdx.x+blockDim.x*blockIdx.x; // idx is the current data's index last_block = 0; T my_val = FLOAT_MIN; int my_idx = -1; // sweep from global memory while (idx < dsize){ // idx is in the array if (data[idx] > my_val) { my_val = data[idx]; my_idx = idx; // find the max data } idx += blockDim.x*gridDim.x; // if the array is very long } // populate shared memory vals[threadIdx.x] = my_val; idxs[threadIdx.x] = my_idx; // load to the shared memory __syncthreads(); // sweep in shared memory for (int i = (nTPB>>1); i > 0; i>>=1) { // i = 512, 265, ... if (threadIdx.x < i) { // threadIdx.x is between 0 to 1023 if (vals[threadIdx.x] < vals[threadIdx.x + i]) { vals[threadIdx.x] = vals[threadIdx.x+i]; idxs[threadIdx.x] = idxs[threadIdx.x+i]; } } __syncthreads(); } // perform block-level reduction if (!threadIdx.x) { // threadIdx.x is 0 , and there stores the max value blk_vals[blockIdx.x] = vals[0]; blk_idxs[blockIdx.x] = idxs[0]; // store per block's max value into if (atomicAdd(&blk_num, 1) == gridDim.x - 1) // then I am the last block last_block = 1; } __syncthreads(); if (last_block){ idx = threadIdx.x; my_val = FLOAT_MIN; my_idx = -1; while (idx < gridDim.x){ if (blk_vals[idx] > my_val) { my_val = blk_vals[idx]; my_idx = blk_idxs[idx]; } idx += blockDim.x; } // populate shared memory vals[threadIdx.x] = my_val; idxs[threadIdx.x] = my_idx; __syncthreads(); // sweep in shared memory for (int i = (nTPB>>1); i > 0; i>>=1){ if (threadIdx.x < i) { if (vals[threadIdx.x] < vals[threadIdx.x + i]) { vals[threadIdx.x] = vals[threadIdx.x+i]; idxs[threadIdx.x] = idxs[threadIdx.x+i]; } } __syncthreads(); } if (!threadIdx.x) *result = idxs[0]; } } int main(){ int nrElements = DSIZE; float *d_vector, *h_vector; h_vector = new float[DSIZE]; for (int i = 0; i < DSIZE; i++) h_vector[i] = rand()/(float)RAND_MAX; h_vector[10] = 10; // create definite max element cublasHandle_t my_handle; cublasStatus_t my_status = cublasCreate(&my_handle); cudaMalloc(&d_vector, DSIZE*sizeof(float)); cudaMemcpy(d_vector, h_vector, DSIZE*sizeof(float), cudaMemcpyHostToDevice); int max_index = 0; int *d_max_index; cudaMalloc(&d_max_index, sizeof(int)); unsigned long long dtime = dtime_usec(0); max_idx_kernel<<<MIN(MAX_KERNEL_BLOCKS, ((DSIZE+nTPB-1)/nTPB)), nTPB>>>(d_vector, DSIZE, d_max_index); cudaDeviceSynchronize(); dtime = dtime_usec(dtime); std::cout << "kernel time: " << dtime/(float)USECPSEC; cudaMemcpy(&max_index, d_max_index, sizeof(int), cudaMemcpyDeviceToHost); std::cout << " max index: " << max_index << std::endl; return 0; }
c9d07cff3e5434220c78417b2f14704ed50a9db3.hip
// !!! This is a file automatically generated by hipify!!! /*! Count triangles using the per-edge binary search */ #include <iostream> #include <roctracer/roctx.h> #include <clara/clara.hpp> #include <fmt/format.h> #include "pangolin/algorithm/tc_task_gpu.cuh" #include "pangolin/configure.hpp" #include "pangolin/file/bmtx_stream.hpp" #include "pangolin/init.hpp" #include "pangolin/sparse/csr_binned.hpp" struct RunOptions { std::string path; //!< path for graph std::string sep; //!< seperator for output std::vector<int> gpus; int dimBlock; int iters; bool taskSync; //!< sync after each kernel call bool readMostly; bool accessedBy; bool prefetchAsync; }; template <typename NodeIndex, typename EdgeIndex> int run(RunOptions &opts) { using namespace pangolin; typedef typename pangolin::DiEdge<NodeIndex> Edge; typedef TaskGPUTC TC; typedef TC::Task Task; auto gpus = opts.gpus; if (gpus.empty()) { LOG(warn, "no GPUs provided on command line, using GPU 0"); gpus.push_back(0); } // make streams std::vector<Stream> streams; for (const auto gpu : gpus) { streams.push_back(Stream(gpu)); } // read data auto start = std::chrono::system_clock::now(); auto bmtx = pangolin::open_bmtx_stream(opts.path); LOG(info, "{}: rows={} cols={} entries={}", opts.path, bmtx.num_rows(), bmtx.num_cols(), bmtx.nnz()); std::vector<Edge> edges; { decltype(bmtx)::edge_type we; while (bmtx.readEdge(we)) { edges.push_back(Edge(we.src, we.dst)); } } double elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "read_data time {}s", elapsed); LOG(debug, "read {} edges", edges.size()); start = std::chrono::system_clock::now(); CSRBinned<NodeIndex, EdgeIndex> csr(bmtx.num_rows(), bmtx.nnz()); for (const auto &edge : edges) { csr.add_next_edge(edge); } csr.finish_edges(); elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "csr time {}s", elapsed); // Make counters std::vector<TC> counters; for (size_t i = 0; i < gpus.size(); ++i) { counters.push_back(std::move(TC(streams[i].ref()))); } // Build tasks std::vector<Task> tasks; for (size_t i = 0; i < csr.num_partitions(); ++i) { for (size_t j = 0; j < csr.num_partitions(); ++j) { for (size_t k = 0; k < csr.num_partitions(); ++k) { tasks.push_back({i, j, k}); } } } LOG(info, "{} tasks", tasks.size()); size_t gpuIdx = 0; for (const auto task : tasks) { auto &tc = counters[gpuIdx]; LOG(debug, "task {} {} {} on counter {}", task.i, task.j, task.k, gpuIdx); roctxRangePush(fmt::format("{} {} {}", task.i, task.j, task.k).c_str()); tc.count_async(csr.two_col_view(task.j, task.k), task); if (opts.taskSync) { tc.sync(); LOG(debug, "finished task {} {} {}", task.i, task.j, task.k, gpuIdx); } roctxRangePop(); gpuIdx = (gpuIdx + 1) % gpus.size(); } uint64_t total = 0; for (auto &counter : counters) { LOG(info, "waiting on GPU {}", counter.device()); counter.sync(); total += counter.count(); } fmt::print("{}\n", total); return 0; } void print_header(const RunOptions &opts) { fmt::print("benchmark{0}bs{0}graph{0}nodes{0}edges{0}tris", opts.sep); for (int i = 0; i < opts.iters; ++i) { fmt::print("{}readMostly{}", opts.sep, i); } for (int i = 0; i < opts.iters; ++i) { fmt::print("{}accessedBy{}", opts.sep, i); } for (int i = 0; i < opts.iters; ++i) { fmt::print("{}prefetchAsync{}", opts.sep, i); } for (int i = 0; i < opts.iters; ++i) { fmt::print("{}count{}", opts.sep, i); } for (int i = 0; i < opts.iters; ++i) { fmt::print("{}count_teps{}", opts.sep, i); } fmt::print("\n"); } int main(int argc, char **argv) { pangolin::init(); RunOptions opts; opts.sep = ","; opts.dimBlock = 512; opts.iters = 1; opts.readMostly = false; opts.accessedBy = false; opts.prefetchAsync = false; opts.taskSync = false; bool help = false; bool debug = false; bool verbose = false; bool onlyPrintHeader = false; bool wide = false; clara::Parser cli; cli = cli | clara::Help(help); cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr"); cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr"); cli = cli | clara::Opt(onlyPrintHeader)["--header"]("print the header for the times output and quit"); cli = cli | clara::Opt(wide)["--wide"]("64-bit node IDs"); cli = cli | clara::Opt(opts.gpus, "dev ids")["-g"]("gpus to use"); cli = cli | clara::Opt(opts.dimBlock, "block-dim")["--bs"]("Number of threads in a block"); cli = cli | clara::Opt(opts.readMostly)["--read-mostly"]("mark data as read-mostly by all gpus before kernel"); cli = cli | clara::Opt(opts.accessedBy)["--accessed-by"]("mark data as accessed-by all GPUs before kernel"); cli = cli | clara::Opt(opts.prefetchAsync)["--prefetch-async"]("prefetch data to all GPUs before kernel"); cli = cli | clara::Opt(opts.taskSync)["--task-sync"]("sync stream after each task"); cli = cli | clara::Opt(opts.iters, "N")["-n"]("number of counts"); cli = cli | clara::Arg(opts.path, "graph file")("Path to adjacency list").required(); auto result = cli.parse(clara::Args(argc, argv)); if (!result) { LOG(error, "Error in command line: {}", result.errorMessage()); exit(1); } if (help) { std::cout << cli; return 0; } // set logging level if (verbose) { pangolin::logger::set_level(pangolin::logger::Level::TRACE); } else if (debug) { pangolin::logger::set_level(pangolin::logger::Level::DEBUG); } // log command line before much else happens { std::string cmd; for (int i = 0; i < argc; ++i) { if (i != 0) { cmd += " "; } cmd += argv[i]; } LOG(debug, cmd); } LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH); LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC); LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH); LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES); #ifndef NDEBUG LOG(warn, "Not a release build"); #endif if (onlyPrintHeader) { print_header(opts); return 0; } if (wide) { return run<uint32_t, uint64_t>(opts); } else { return run<uint32_t, uint32_t>(opts); } }
c9d07cff3e5434220c78417b2f14704ed50a9db3.cu
/*! Count triangles using the per-edge binary search */ #include <iostream> #include <nvToolsExt.h> #include <clara/clara.hpp> #include <fmt/format.h> #include "pangolin/algorithm/tc_task_gpu.cuh" #include "pangolin/configure.hpp" #include "pangolin/file/bmtx_stream.hpp" #include "pangolin/init.hpp" #include "pangolin/sparse/csr_binned.hpp" struct RunOptions { std::string path; //!< path for graph std::string sep; //!< seperator for output std::vector<int> gpus; int dimBlock; int iters; bool taskSync; //!< sync after each kernel call bool readMostly; bool accessedBy; bool prefetchAsync; }; template <typename NodeIndex, typename EdgeIndex> int run(RunOptions &opts) { using namespace pangolin; typedef typename pangolin::DiEdge<NodeIndex> Edge; typedef TaskGPUTC TC; typedef TC::Task Task; auto gpus = opts.gpus; if (gpus.empty()) { LOG(warn, "no GPUs provided on command line, using GPU 0"); gpus.push_back(0); } // make streams std::vector<Stream> streams; for (const auto gpu : gpus) { streams.push_back(Stream(gpu)); } // read data auto start = std::chrono::system_clock::now(); auto bmtx = pangolin::open_bmtx_stream(opts.path); LOG(info, "{}: rows={} cols={} entries={}", opts.path, bmtx.num_rows(), bmtx.num_cols(), bmtx.nnz()); std::vector<Edge> edges; { decltype(bmtx)::edge_type we; while (bmtx.readEdge(we)) { edges.push_back(Edge(we.src, we.dst)); } } double elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "read_data time {}s", elapsed); LOG(debug, "read {} edges", edges.size()); start = std::chrono::system_clock::now(); CSRBinned<NodeIndex, EdgeIndex> csr(bmtx.num_rows(), bmtx.nnz()); for (const auto &edge : edges) { csr.add_next_edge(edge); } csr.finish_edges(); elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "csr time {}s", elapsed); // Make counters std::vector<TC> counters; for (size_t i = 0; i < gpus.size(); ++i) { counters.push_back(std::move(TC(streams[i].ref()))); } // Build tasks std::vector<Task> tasks; for (size_t i = 0; i < csr.num_partitions(); ++i) { for (size_t j = 0; j < csr.num_partitions(); ++j) { for (size_t k = 0; k < csr.num_partitions(); ++k) { tasks.push_back({i, j, k}); } } } LOG(info, "{} tasks", tasks.size()); size_t gpuIdx = 0; for (const auto task : tasks) { auto &tc = counters[gpuIdx]; LOG(debug, "task {} {} {} on counter {}", task.i, task.j, task.k, gpuIdx); nvtxRangePush(fmt::format("{} {} {}", task.i, task.j, task.k).c_str()); tc.count_async(csr.two_col_view(task.j, task.k), task); if (opts.taskSync) { tc.sync(); LOG(debug, "finished task {} {} {}", task.i, task.j, task.k, gpuIdx); } nvtxRangePop(); gpuIdx = (gpuIdx + 1) % gpus.size(); } uint64_t total = 0; for (auto &counter : counters) { LOG(info, "waiting on GPU {}", counter.device()); counter.sync(); total += counter.count(); } fmt::print("{}\n", total); return 0; } void print_header(const RunOptions &opts) { fmt::print("benchmark{0}bs{0}graph{0}nodes{0}edges{0}tris", opts.sep); for (int i = 0; i < opts.iters; ++i) { fmt::print("{}readMostly{}", opts.sep, i); } for (int i = 0; i < opts.iters; ++i) { fmt::print("{}accessedBy{}", opts.sep, i); } for (int i = 0; i < opts.iters; ++i) { fmt::print("{}prefetchAsync{}", opts.sep, i); } for (int i = 0; i < opts.iters; ++i) { fmt::print("{}count{}", opts.sep, i); } for (int i = 0; i < opts.iters; ++i) { fmt::print("{}count_teps{}", opts.sep, i); } fmt::print("\n"); } int main(int argc, char **argv) { pangolin::init(); RunOptions opts; opts.sep = ","; opts.dimBlock = 512; opts.iters = 1; opts.readMostly = false; opts.accessedBy = false; opts.prefetchAsync = false; opts.taskSync = false; bool help = false; bool debug = false; bool verbose = false; bool onlyPrintHeader = false; bool wide = false; clara::Parser cli; cli = cli | clara::Help(help); cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr"); cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr"); cli = cli | clara::Opt(onlyPrintHeader)["--header"]("print the header for the times output and quit"); cli = cli | clara::Opt(wide)["--wide"]("64-bit node IDs"); cli = cli | clara::Opt(opts.gpus, "dev ids")["-g"]("gpus to use"); cli = cli | clara::Opt(opts.dimBlock, "block-dim")["--bs"]("Number of threads in a block"); cli = cli | clara::Opt(opts.readMostly)["--read-mostly"]("mark data as read-mostly by all gpus before kernel"); cli = cli | clara::Opt(opts.accessedBy)["--accessed-by"]("mark data as accessed-by all GPUs before kernel"); cli = cli | clara::Opt(opts.prefetchAsync)["--prefetch-async"]("prefetch data to all GPUs before kernel"); cli = cli | clara::Opt(opts.taskSync)["--task-sync"]("sync stream after each task"); cli = cli | clara::Opt(opts.iters, "N")["-n"]("number of counts"); cli = cli | clara::Arg(opts.path, "graph file")("Path to adjacency list").required(); auto result = cli.parse(clara::Args(argc, argv)); if (!result) { LOG(error, "Error in command line: {}", result.errorMessage()); exit(1); } if (help) { std::cout << cli; return 0; } // set logging level if (verbose) { pangolin::logger::set_level(pangolin::logger::Level::TRACE); } else if (debug) { pangolin::logger::set_level(pangolin::logger::Level::DEBUG); } // log command line before much else happens { std::string cmd; for (int i = 0; i < argc; ++i) { if (i != 0) { cmd += " "; } cmd += argv[i]; } LOG(debug, cmd); } LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH); LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC); LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH); LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES); #ifndef NDEBUG LOG(warn, "Not a release build"); #endif if (onlyPrintHeader) { print_header(opts); return 0; } if (wide) { return run<uint32_t, uint64_t>(opts); } else { return run<uint32_t, uint32_t>(opts); } }
bdf9cdbbda39ef069193e8f455ca23a86eb16a43.hip
// !!! This is a file automatically generated by hipify!!! #include <backend/base/base_broadcast_v2.h> #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <kernels/gpu/operator_on_gpu.h> #include "kernels/gpu/cuda_context.h" #include "utils/ctxmgr_lite.h" #include "core/device_context.h" #ifdef TS_USE_CUBLAS #include "kernels/gpu/math_cublas.h" #else #include "kernels/gpu/math_gpu.h" #endif #include "kernels/gpu/gpu_kernel.h" #include "global/operator_factory.h" #include "global/fp16_operator_factory.h" #include "backend/name.h" #include <numeric> namespace ts { namespace gpu { class BroadcastV2 : public OperatorOnGPU<base::BroadcastV2> { public: using self = BroadcastV2; using supper = OperatorOnGPU<base::BroadcastV2>; BroadcastV2() = default; void broadcast(const Tensor &x, Tensor &out) override; void broad_with_bias(const Tensor &x, Tensor &out, int dim) override; void broadcast_with_scalar(const Tensor &x, Tensor &out) override; }; template<typename T> static __global__ void gpu_broadcast_kernel(int count, const T *C, T *out, GpuHypeShape C_shape, GpuHypeShape out_shape) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= count) return; int out_index = index; int in_index = 0; auto out_weight_it = out_shape.weights + 1; auto in_weight_it = C_shape.weights + 1; /* ============================================ */ auto in_shape_it = C_shape.shape; /* ============================================ */ for (int times = out_shape.dims - 1; times; --times) { auto coord = index / *out_weight_it; /* ============================================ */ coord %= *in_shape_it; ++in_shape_it; /* ============================================ */ in_index += coord * *in_weight_it; index %= *out_weight_it; ++out_weight_it; ++in_weight_it; } auto coord = index; /* ============================================ */ coord %= *in_shape_it; /* ============================================ */ in_index += coord; /* ++++++++++++++++++++++++++++++++++++++++++++ */ out[out_index] = C[in_index]; } template<typename T> static inline void gpu_broadcast_compute_run(const Tensor &C, Tensor &out) { auto gpu_hype_shape = MakeGPUHypeShape(C.device(), {C.sizes(), out.sizes()}); auto &C_hype_shape = gpu_hype_shape.second[0]; auto &out_hype_shape = gpu_hype_shape.second[1]; auto count = out.count(); RUN_KERNEL(gpu_broadcast_kernel<T>, CUDA_BLOCK(count, CUDA_THREAD_NUM), CUDA_THREAD_NUM, count, C.data<T>(), out.data<T>(), C_hype_shape, out_hype_shape); } template<typename T> static __global__ void gpu_broadcast_with_scalar_kernel(int N, T *dst, T val) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= N) return; dst[index] = val; } template<typename T> static inline void gpu_broadcast_with_scalar(const Tensor &x, Tensor &out) { auto val = x.data<T>()[0]; auto pout = out.data<T>(); auto count = out.count(); RUN_KERNEL(gpu_broadcast_with_scalar_kernel<T>, CUDA_BLOCK(count, CUDA_THREAD_NUM), CUDA_THREAD_NUM, count, pout, val); } template<typename T> static __global__ void gpu_broadcast_with_bias_kernel(int N, T *dst, int channels, int count, const T *px) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= N) return; auto c = index / count % channels; dst[index] = px[c]; } template<typename T> static inline void gpu_broadcast_with_bias(const Tensor &x, Tensor &out, int dim) { auto px = x.data<T>(); auto pout = out.data<T>(); auto &out_shape = out.sizes(); // auto number = std::accumulate(out_shape.begin(), out_shape.begin() + dim, 1, std::multiplies<int>()); auto count = std::accumulate(out_shape.begin() + dim + 1, out_shape.end(), 1, std::multiplies<int>()); auto channels = out_shape[dim]; auto N = out.count(); RUN_KERNEL(gpu_broadcast_with_bias_kernel<T>, CUDA_BLOCK(N, CUDA_THREAD_NUM), CUDA_THREAD_NUM, N, pout, channels, count, px); } void BroadcastV2::broadcast(const Tensor &x, Tensor &out) { DTYPE dtype = out.dtype(); switch (type_bytes(dtype)) { #define DECLARE_COMPUTE_RUN(WIDTH, TYPE) case WIDTH: { gpu_broadcast_compute_run<TYPE>(x, out); break; } DECLARE_COMPUTE_RUN(1, uint8_t) DECLARE_COMPUTE_RUN(2, uint16_t) DECLARE_COMPUTE_RUN(4, uint32_t) DECLARE_COMPUTE_RUN(8, uint64_t) #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void BroadcastV2::broad_with_bias(const Tensor &x, Tensor &out, int dim) { DTYPE dtype = out.dtype(); switch (type_bytes(dtype)) { #define DECLARE_COMPUTE_RUN(WIDTH, TYPE) case WIDTH: { gpu_broadcast_with_bias<TYPE>(x, out, dim); break; } DECLARE_COMPUTE_RUN(1, uint8_t) DECLARE_COMPUTE_RUN(2, uint16_t) DECLARE_COMPUTE_RUN(4, uint32_t) DECLARE_COMPUTE_RUN(8, uint64_t) #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void BroadcastV2::broadcast_with_scalar(const Tensor &x, Tensor &out) { DTYPE dtype = out.dtype(); switch (type_bytes(dtype)) { #define DECLARE_COMPUTE_RUN(WIDTH, TYPE) case WIDTH: { gpu_broadcast_with_scalar<TYPE>(x, out); break; } DECLARE_COMPUTE_RUN(1, uint8_t) DECLARE_COMPUTE_RUN(2, uint16_t) DECLARE_COMPUTE_RUN(4, uint32_t) DECLARE_COMPUTE_RUN(8, uint64_t) #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } } } using namespace ts; using namespace gpu; TS_REGISTER_OPERATOR(BroadcastV2, GPU, "broadcast") #ifdef TS_USE_CUDA_FP16 TS_REGISTER_FP16_OPERATOR(BroadcastV2, GPU, "broadcast") #endif
bdf9cdbbda39ef069193e8f455ca23a86eb16a43.cu
#include <backend/base/base_broadcast_v2.h> #include "device_launch_parameters.h" #include <cuda_runtime.h> #include <kernels/gpu/operator_on_gpu.h> #include "kernels/gpu/cuda_context.h" #include "utils/ctxmgr_lite.h" #include "core/device_context.h" #ifdef TS_USE_CUBLAS #include "kernels/gpu/math_cublas.h" #else #include "kernels/gpu/math_gpu.h" #endif #include "kernels/gpu/gpu_kernel.h" #include "global/operator_factory.h" #include "global/fp16_operator_factory.h" #include "backend/name.h" #include <numeric> namespace ts { namespace gpu { class BroadcastV2 : public OperatorOnGPU<base::BroadcastV2> { public: using self = BroadcastV2; using supper = OperatorOnGPU<base::BroadcastV2>; BroadcastV2() = default; void broadcast(const Tensor &x, Tensor &out) override; void broad_with_bias(const Tensor &x, Tensor &out, int dim) override; void broadcast_with_scalar(const Tensor &x, Tensor &out) override; }; template<typename T> static __global__ void gpu_broadcast_kernel(int count, const T *C, T *out, GpuHypeShape C_shape, GpuHypeShape out_shape) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= count) return; int out_index = index; int in_index = 0; auto out_weight_it = out_shape.weights + 1; auto in_weight_it = C_shape.weights + 1; /* ============================================ */ auto in_shape_it = C_shape.shape; /* ============================================ */ for (int times = out_shape.dims - 1; times; --times) { auto coord = index / *out_weight_it; /* ============================================ */ coord %= *in_shape_it; ++in_shape_it; /* ============================================ */ in_index += coord * *in_weight_it; index %= *out_weight_it; ++out_weight_it; ++in_weight_it; } auto coord = index; /* ============================================ */ coord %= *in_shape_it; /* ============================================ */ in_index += coord; /* ++++++++++++++++++++++++++++++++++++++++++++ */ out[out_index] = C[in_index]; } template<typename T> static inline void gpu_broadcast_compute_run(const Tensor &C, Tensor &out) { auto gpu_hype_shape = MakeGPUHypeShape(C.device(), {C.sizes(), out.sizes()}); auto &C_hype_shape = gpu_hype_shape.second[0]; auto &out_hype_shape = gpu_hype_shape.second[1]; auto count = out.count(); RUN_KERNEL(gpu_broadcast_kernel<T>, CUDA_BLOCK(count, CUDA_THREAD_NUM), CUDA_THREAD_NUM, count, C.data<T>(), out.data<T>(), C_hype_shape, out_hype_shape); } template<typename T> static __global__ void gpu_broadcast_with_scalar_kernel(int N, T *dst, T val) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= N) return; dst[index] = val; } template<typename T> static inline void gpu_broadcast_with_scalar(const Tensor &x, Tensor &out) { auto val = x.data<T>()[0]; auto pout = out.data<T>(); auto count = out.count(); RUN_KERNEL(gpu_broadcast_with_scalar_kernel<T>, CUDA_BLOCK(count, CUDA_THREAD_NUM), CUDA_THREAD_NUM, count, pout, val); } template<typename T> static __global__ void gpu_broadcast_with_bias_kernel(int N, T *dst, int channels, int count, const T *px) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= N) return; auto c = index / count % channels; dst[index] = px[c]; } template<typename T> static inline void gpu_broadcast_with_bias(const Tensor &x, Tensor &out, int dim) { auto px = x.data<T>(); auto pout = out.data<T>(); auto &out_shape = out.sizes(); // auto number = std::accumulate(out_shape.begin(), out_shape.begin() + dim, 1, std::multiplies<int>()); auto count = std::accumulate(out_shape.begin() + dim + 1, out_shape.end(), 1, std::multiplies<int>()); auto channels = out_shape[dim]; auto N = out.count(); RUN_KERNEL(gpu_broadcast_with_bias_kernel<T>, CUDA_BLOCK(N, CUDA_THREAD_NUM), CUDA_THREAD_NUM, N, pout, channels, count, px); } void BroadcastV2::broadcast(const Tensor &x, Tensor &out) { DTYPE dtype = out.dtype(); switch (type_bytes(dtype)) { #define DECLARE_COMPUTE_RUN(WIDTH, TYPE) case WIDTH: { gpu_broadcast_compute_run<TYPE>(x, out); break; } DECLARE_COMPUTE_RUN(1, uint8_t) DECLARE_COMPUTE_RUN(2, uint16_t) DECLARE_COMPUTE_RUN(4, uint32_t) DECLARE_COMPUTE_RUN(8, uint64_t) #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void BroadcastV2::broad_with_bias(const Tensor &x, Tensor &out, int dim) { DTYPE dtype = out.dtype(); switch (type_bytes(dtype)) { #define DECLARE_COMPUTE_RUN(WIDTH, TYPE) case WIDTH: { gpu_broadcast_with_bias<TYPE>(x, out, dim); break; } DECLARE_COMPUTE_RUN(1, uint8_t) DECLARE_COMPUTE_RUN(2, uint16_t) DECLARE_COMPUTE_RUN(4, uint32_t) DECLARE_COMPUTE_RUN(8, uint64_t) #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void BroadcastV2::broadcast_with_scalar(const Tensor &x, Tensor &out) { DTYPE dtype = out.dtype(); switch (type_bytes(dtype)) { #define DECLARE_COMPUTE_RUN(WIDTH, TYPE) case WIDTH: { gpu_broadcast_with_scalar<TYPE>(x, out); break; } DECLARE_COMPUTE_RUN(1, uint8_t) DECLARE_COMPUTE_RUN(2, uint16_t) DECLARE_COMPUTE_RUN(4, uint32_t) DECLARE_COMPUTE_RUN(8, uint64_t) #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } } } using namespace ts; using namespace gpu; TS_REGISTER_OPERATOR(BroadcastV2, GPU, "broadcast") #ifdef TS_USE_CUDA_FP16 TS_REGISTER_FP16_OPERATOR(BroadcastV2, GPU, "broadcast") #endif
2658bb16ec4a217ec036853b1bfb22582c4c4e33.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/hip/HIPApplyUtils.cuh> namespace { using namespace at; template<typename scalar_t> void kl_div_backward_kernel(const Tensor& grad_input, const Tensor& target, const Tensor& grad) { at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>( grad_input, target, grad, [] __device__( scalar_t& grad_input_val, const scalar_t& target_val, const scalar_t& grad_val) { if (target_val > 0) { grad_input_val = -target_val * grad_val; } }); } } // namespace namespace at { namespace native { Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction) { auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor grad_expand = grad.expand_as(input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() { kl_div_backward_kernel<scalar_t>(grad_input, target, grad_expand); }); if (reduction == at::Reduction::Mean) { return grad_input / input.numel(); } return grad_input; } }} // namespace at::native
2658bb16ec4a217ec036853b1bfb22582c4c4e33.cu
#include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> namespace { using namespace at; template<typename scalar_t> void kl_div_backward_kernel(const Tensor& grad_input, const Tensor& target, const Tensor& grad) { at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>( grad_input, target, grad, [] __device__( scalar_t& grad_input_val, const scalar_t& target_val, const scalar_t& grad_val) { if (target_val > 0) { grad_input_val = -target_val * grad_val; } }); } } // namespace namespace at { namespace native { Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction) { auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor grad_expand = grad.expand_as(input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() { kl_div_backward_kernel<scalar_t>(grad_input, target, grad_expand); }); if (reduction == at::Reduction::Mean) { return grad_input / input.numel(); } return grad_input; } }} // namespace at::native
9d6ea3a9538b8e5e8e2ba8eea58b28d6c4be8ded.hip
// !!! This is a file automatically generated by hipify!!! #include "gaussian_multi_gpu_rdma.h" void _report(hipError_t result, char const *const func, const char *const file, int const line) { if(result) { fprintf(stderr, "CUDA error at %s:%d code = %d (%s) \"%s\" \n", file, line, result, hipGetErrorString(result), func); // Make sure we call CUDA Device Reset before exiting hipDeviceReset(); exit(-1); } } int setupPeerToPeer(int GPUCount) { int canAccessPeer; for(int i = 0; i < GPUCount; i++) { checkCudaErrors( hipSetDevice(i) ); } for(int i = 0; i < GPUCount; i++) { checkCudaErrors( hipSetDevice(i) ); for(int j = 0; j < GPUCount; j++) { if(i == j) continue; checkCudaErrors( hipDeviceCanAccessPeer(&canAccessPeer, i, j) ); if(canAccessPeer) { printf("Can access memory of device %d from device %d\n", j, i); checkCudaErrors( hipDeviceEnablePeerAccess(j, 0) ); } else { printf("Can not access memory of device %d from device %d\n", j, i); return 0; } } } return 1; } int testPeerToPeer(int GPUCount) { char** buffer; int buffersize = 1024 * sizeof(char); buffer = (char**) malloc(GPUCount * sizeof(char*)); for(int i = 0; i < GPUCount; i++) { checkCudaErrors( hipSetDevice(i) ); checkCudaErrors( hipMalloc((void**)&buffer[i], buffersize) ); } for(int i = 0; i < GPUCount; i++) { for(int j = 0; j < GPUCount; j++) { if(i == j) continue; checkCudaErrors( hipMemcpyPeer(buffer[i], i, buffer[j], j, buffersize) ); } } for(int i = 0; i < GPUCount; i++) { checkCudaErrors( hipFree(buffer[i]) ); } return 1; }
9d6ea3a9538b8e5e8e2ba8eea58b28d6c4be8ded.cu
#include "gaussian_multi_gpu_rdma.h" void _report(cudaError_t result, char const *const func, const char *const file, int const line) { if(result) { fprintf(stderr, "CUDA error at %s:%d code = %d (%s) \"%s\" \n", file, line, result, cudaGetErrorString(result), func); // Make sure we call CUDA Device Reset before exiting cudaDeviceReset(); exit(-1); } } int setupPeerToPeer(int GPUCount) { int canAccessPeer; for(int i = 0; i < GPUCount; i++) { checkCudaErrors( cudaSetDevice(i) ); } for(int i = 0; i < GPUCount; i++) { checkCudaErrors( cudaSetDevice(i) ); for(int j = 0; j < GPUCount; j++) { if(i == j) continue; checkCudaErrors( cudaDeviceCanAccessPeer(&canAccessPeer, i, j) ); if(canAccessPeer) { printf("Can access memory of device %d from device %d\n", j, i); checkCudaErrors( cudaDeviceEnablePeerAccess(j, 0) ); } else { printf("Can not access memory of device %d from device %d\n", j, i); return 0; } } } return 1; } int testPeerToPeer(int GPUCount) { char** buffer; int buffersize = 1024 * sizeof(char); buffer = (char**) malloc(GPUCount * sizeof(char*)); for(int i = 0; i < GPUCount; i++) { checkCudaErrors( cudaSetDevice(i) ); checkCudaErrors( cudaMalloc((void**)&buffer[i], buffersize) ); } for(int i = 0; i < GPUCount; i++) { for(int j = 0; j < GPUCount; j++) { if(i == j) continue; checkCudaErrors( cudaMemcpyPeer(buffer[i], i, buffer[j], j, buffersize) ); } } for(int i = 0; i < GPUCount; i++) { checkCudaErrors( cudaFree(buffer[i]) ); } return 1; }
3b646a34765e610ba74294d78330e31dd5b8f788.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int *dev_idata, *dev_odata; hipMalloc((void**)&dev_idata, n * sizeof(int)); hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice); hipMalloc((void**)&dev_odata, n * sizeof(int)); thrust::device_ptr<int> dev_thrust_odata(dev_odata); thrust::device_ptr<int> dev_thrust_idata(dev_idata); timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: thrust::exclusive_scan(dev_thrust_idata, dev_thrust_idata + n, dev_thrust_odata); timer().endGpuTimer(); hipMemcpy(odata, thrust::raw_pointer_cast(dev_thrust_odata), n * sizeof(int), hipMemcpyDeviceToHost); } } }
3b646a34765e610ba74294d78330e31dd5b8f788.cu
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int *dev_idata, *dev_odata; cudaMalloc((void**)&dev_idata, n * sizeof(int)); cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_odata, n * sizeof(int)); thrust::device_ptr<int> dev_thrust_odata(dev_odata); thrust::device_ptr<int> dev_thrust_idata(dev_idata); timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: thrust::exclusive_scan(dev_thrust_idata, dev_thrust_idata + n, dev_thrust_odata); timer().endGpuTimer(); cudaMemcpy(odata, thrust::raw_pointer_cast(dev_thrust_odata), n * sizeof(int), cudaMemcpyDeviceToHost); } } }
c6f843dcfe2fe69091c4ee710ba63340b65b2923.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <helper_cuda.h> #include "cipher.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #define NUM_OF_THREADS 1024 // maximum number of threads per block // run on the kernel __global__ void addCalculateKernel(char *cipher_text, int text_len, unsigned char *key, int key_len_as_byts, int* d_global_all_ascii) { int tid = threadIdx.x; int charId = tid; // shared memory is allocated between threads in a block __shared__ int shared_non_ascii_counter; __shared__ int maximum_non_ascii_allowed; //__shared__ unsigned int key_as_int; if (tid == 0) { shared_non_ascii_counter = 0; maximum_non_ascii_allowed = text_len * MAX_PERCENTAGE_NON_PRINTABLE; // we are allowing some percentage of not-printable ascii if (maximum_non_ascii_allowed <= 0) { maximum_non_ascii_allowed = 1; } /* key_as_int = 0; for (int i = 0; i < key_len_as_byts; i++) { key_as_int *= 256; key_as_int += key[i]; } */ } // sync threads in the same block __syncthreads(); while (charId < text_len) { // out of the borders of the text if (shared_non_ascii_counter < maximum_non_ascii_allowed) { // checks if the number of non-ascii allowed not reached to the maximum cipher_text[charId] ^= key[charId % (key_len_as_byts)]; //if isn't printable ascii characters // 13 and 10 together are "\n". 32 to 126 are all the printable ascii characters. if (!(cipher_text[charId] == LINE_FEED || cipher_text[charId] == CARRIAGE_RETURN || (MIN_PRINTABLE <= cipher_text[charId] && cipher_text[charId] <= MAX_PRINTABLE))) { //if (key_as_int != 111) atomicAdd(&shared_non_ascii_counter, 1); // used only one block at a time //else // printf("key %u, char id: %d, not good int: %u\n", key_as_int, charId, (unsigned char)cipher_text[charId]); } } charId += NUM_OF_THREADS; } // sync threads __syncthreads(); if (tid == 0) { // checks if the number of non-ascii allowed reached to the maximum if (shared_non_ascii_counter > maximum_non_ascii_allowed) { *d_global_all_ascii = 0; } } } int cuda_calc_plain(char *cipher_text, int text_len, unsigned int key_as_int, int key_len_as_byts, char** plain_text) { char *d_temp_text = 0; unsigned char* d_temp_key = 0; int* d_global_all_ascii = 0; int cpu_all_ascii = 1; int num_block = 1;// text_len / NUM_OF_THREADS + 1; hipError_t cudaStatus; int i = key_len_as_byts-1; *plain_text = (char*)calloc(text_len, sizeof(char)); if (!(*plain_text)) { printf("calloc for plain_text faliled."); return -1; } unsigned char* key = (unsigned char*)calloc(key_len_as_byts, sizeof(unsigned char)); if (!key) { printf("calloc for key faliled."); return -1; } while (key_as_int > 0) { key[i] = (unsigned char) (key_as_int % 256); // get the first right byte into the key[i] key_as_int /= 256; // remove the first right byte i--; } // Choose which GPU to run on cudaStatus = hipSetDevice(0); // allocated space - create d_global_all_ascii for cuda cudaStatus = hipMalloc((void**) &d_global_all_ascii, sizeof(int)); if (cudaStatus != hipSuccess) { printf("hipMalloc all_ascii failed."); return -1; } //reset to 1 in all_ascii in gpu cudaStatus = hipMemcpy(d_global_all_ascii, &cpu_all_ascii, sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("hipMemcpy failed."); return -1; } // create d_temp_text for cuda cudaStatus = hipMalloc((void**) &d_temp_text, text_len * sizeof(char)); if (cudaStatus != hipSuccess) { printf("hipMalloc d_temp_text failed."); return -1; } //Copy cipher text from cpu to gpu cudaStatus = hipMemcpy(d_temp_text, cipher_text, text_len * sizeof(char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("hipMemcpy failed."); return -1; } // create d_temp_key for cuda cudaStatus = hipMalloc((void**) &d_temp_key, key_len_as_byts * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { printf("hipMalloc d_temp_key failed."); return -1; } //Copy key from cpu to gpu cudaStatus = hipMemcpy(d_temp_key, key, key_len_as_byts * sizeof(unsigned char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("hipMemcpy failed."); return -1; } //Launch a kernel on the GPU with 1024 threads for every block // num_block = text_len / NUM_OF_THREADS +1 // NUM_OF_THREADS = 1024 hipLaunchKernelGGL(( addCalculateKernel), dim3(num_block), dim3(NUM_OF_THREADS), 0, 0, d_temp_text, text_len, d_temp_key, key_len_as_byts, d_global_all_ascii); //Copy all_ascii from gpu to cpu cudaStatus = hipMemcpy(&cpu_all_ascii, d_global_all_ascii, sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { printf("hipMemcpy failed."); return -1; } if(cpu_all_ascii){ //Copy plain text from gpu to cpu cudaStatus = hipMemcpy(*plain_text, d_temp_text, text_len * sizeof(char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { printf("hipMemcpy failed."); return -1; } } // free memory hipFree(d_temp_text); hipFree(d_temp_key); hipFree(d_global_all_ascii); free(key); // return the status that says if all is printable ascii (0-no 1-yes) return cpu_all_ascii; }
c6f843dcfe2fe69091c4ee710ba63340b65b2923.cu
#include <cuda_runtime.h> #include <helper_cuda.h> #include "cipher.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #define NUM_OF_THREADS 1024 // maximum number of threads per block // run on the kernel __global__ void addCalculateKernel(char *cipher_text, int text_len, unsigned char *key, int key_len_as_byts, int* d_global_all_ascii) { int tid = threadIdx.x; int charId = tid; // shared memory is allocated between threads in a block __shared__ int shared_non_ascii_counter; __shared__ int maximum_non_ascii_allowed; //__shared__ unsigned int key_as_int; if (tid == 0) { shared_non_ascii_counter = 0; maximum_non_ascii_allowed = text_len * MAX_PERCENTAGE_NON_PRINTABLE; // we are allowing some percentage of not-printable ascii if (maximum_non_ascii_allowed <= 0) { maximum_non_ascii_allowed = 1; } /* key_as_int = 0; for (int i = 0; i < key_len_as_byts; i++) { key_as_int *= 256; key_as_int += key[i]; } */ } // sync threads in the same block __syncthreads(); while (charId < text_len) { // out of the borders of the text if (shared_non_ascii_counter < maximum_non_ascii_allowed) { // checks if the number of non-ascii allowed not reached to the maximum cipher_text[charId] ^= key[charId % (key_len_as_byts)]; //if isn't printable ascii characters // 13 and 10 together are "\n". 32 to 126 are all the printable ascii characters. if (!(cipher_text[charId] == LINE_FEED || cipher_text[charId] == CARRIAGE_RETURN || (MIN_PRINTABLE <= cipher_text[charId] && cipher_text[charId] <= MAX_PRINTABLE))) { //if (key_as_int != 111) atomicAdd(&shared_non_ascii_counter, 1); // used only one block at a time //else // printf("key %u, char id: %d, not good int: %u\n", key_as_int, charId, (unsigned char)cipher_text[charId]); } } charId += NUM_OF_THREADS; } // sync threads __syncthreads(); if (tid == 0) { // checks if the number of non-ascii allowed reached to the maximum if (shared_non_ascii_counter > maximum_non_ascii_allowed) { *d_global_all_ascii = 0; } } } int cuda_calc_plain(char *cipher_text, int text_len, unsigned int key_as_int, int key_len_as_byts, char** plain_text) { char *d_temp_text = 0; unsigned char* d_temp_key = 0; int* d_global_all_ascii = 0; int cpu_all_ascii = 1; int num_block = 1;// text_len / NUM_OF_THREADS + 1; cudaError_t cudaStatus; int i = key_len_as_byts-1; *plain_text = (char*)calloc(text_len, sizeof(char)); if (!(*plain_text)) { printf("calloc for plain_text faliled."); return -1; } unsigned char* key = (unsigned char*)calloc(key_len_as_byts, sizeof(unsigned char)); if (!key) { printf("calloc for key faliled."); return -1; } while (key_as_int > 0) { key[i] = (unsigned char) (key_as_int % 256); // get the first right byte into the key[i] key_as_int /= 256; // remove the first right byte i--; } // Choose which GPU to run on cudaStatus = cudaSetDevice(0); // allocated space - create d_global_all_ascii for cuda cudaStatus = cudaMalloc((void**) &d_global_all_ascii, sizeof(int)); if (cudaStatus != cudaSuccess) { printf("cudaMalloc all_ascii failed."); return -1; } //reset to 1 in all_ascii in gpu cudaStatus = cudaMemcpy(d_global_all_ascii, &cpu_all_ascii, sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed."); return -1; } // create d_temp_text for cuda cudaStatus = cudaMalloc((void**) &d_temp_text, text_len * sizeof(char)); if (cudaStatus != cudaSuccess) { printf("cudaMalloc d_temp_text failed."); return -1; } //Copy cipher text from cpu to gpu cudaStatus = cudaMemcpy(d_temp_text, cipher_text, text_len * sizeof(char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed."); return -1; } // create d_temp_key for cuda cudaStatus = cudaMalloc((void**) &d_temp_key, key_len_as_byts * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { printf("cudaMalloc d_temp_key failed."); return -1; } //Copy key from cpu to gpu cudaStatus = cudaMemcpy(d_temp_key, key, key_len_as_byts * sizeof(unsigned char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed."); return -1; } //Launch a kernel on the GPU with 1024 threads for every block // num_block = text_len / NUM_OF_THREADS +1 // NUM_OF_THREADS = 1024 addCalculateKernel<<<num_block, NUM_OF_THREADS>>>(d_temp_text, text_len, d_temp_key, key_len_as_byts, d_global_all_ascii); //Copy all_ascii from gpu to cpu cudaStatus = cudaMemcpy(&cpu_all_ascii, d_global_all_ascii, sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed."); return -1; } if(cpu_all_ascii){ //Copy plain text from gpu to cpu cudaStatus = cudaMemcpy(*plain_text, d_temp_text, text_len * sizeof(char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed."); return -1; } } // free memory cudaFree(d_temp_text); cudaFree(d_temp_key); cudaFree(d_global_all_ascii); free(key); // return the status that says if all is printable ascii (0-no 1-yes) return cpu_all_ascii; }
347fdee77ee191fd1ef1bea27cbb413296848bdd.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <vector> #include <cmath> #include <string> #include <fstream> // Solver Configuration #define __MPGOS_PERTHREAD_SOLVER_DDE4 //RK4 solver #define __MPGOS_PERTHREAD_PRECISION double #define __MPGOS_PERTHREAD_NT 1024 // NumberOfThreads #define __MPGOS_PERTHREAD_SD 1 // SystemDimension #define __MPGOS_PERTHREAD_DOD 1 // DenseDimension #define __MPGOS_PERTHREAD_NDELAY 2 // NumberOfDelays #define __MPGOS_PERTHREAD_NCP 1 // ControlParameters #define __MPGOS_PERTHREAD_NDO 2005 // NumberOfPointsOfDenseOutput #include "SingleSystem_PerThread_DataStructures.cuh" #include "logistic2_SystemDefinition.cuh" #include "SingleSystem_PerThread_Interface.cuh" using namespace std; #define PI 3.14159265358979323846 void Linspace(vector<double>&, double, double, int); void Discretize(vector<double>&, double f(double), vector<double>); double f0(double t) { if (t < -1.5) return std::cos(4 * PI * t); if (t < -std::sqrt(2.0)) return t * t; if (t < -1.101) return ::exp(t); if (t < -0.5) return 0; return t + 0.5; } double fd0(double t) { if (t < -1.5) return -4 * PI * std::sin(4 * PI * t); if (t < -std::sqrt(2.0)) return 2 * t; if (t < -1.101) return ::exp(t); if (t < -0.5) return 0; return 1.0; } int main() { //run configuration int NumberOfProblems = __MPGOS_PERTHREAD_NT; int BlockSize = 32; int CUDAdevice = 0; PrintPropertiesOfSpecificDevice(CUDAdevice); //parameters and initial conditions double tmin = 0.0; double tmax = 10.0; double dt = 0.001; double pmin = 0; double pmax = 2; vector<double> p(NumberOfProblems,0); Linspace(p,pmin,pmax, NumberOfProblems); //initial points int pointsInit = 200; double tInitMin = -2.0; double tInitMax = 0; vector<double> t0list(pointsInit,0); vector<double> x0list(pointsInit,0); vector<double> xd0list(pointsInit,0); Linspace(t0list,tInitMin,tInitMax, pointsInit); Discretize(x0list,f0,t0list); Discretize(xd0list,fd0,t0list); //initialize solver ProblemSolver Solver(CUDAdevice); //Set Solver options Solver.SolverOption(ThreadsPerBlock, BlockSize); Solver.SolverOption(InitialTimeStep, dt); Solver.SolverOption(ActiveNumberOfThreads, NumberOfProblems); Solver.SolverOption(DenseOutputTimeStep, -1); Solver.SolverOption(DenseOutputVariableIndex, 0, 0); Solver.SolverOption(Delay, 0, 0, 1.0); Solver.SolverOption(Delay, 1, 0, 2.0); //fill solver object for (int i = 0; i < NumberOfProblems; i++) { Solver.SetHost(i,TimeDomain,0,tmin); Solver.SetHost(i,TimeDomain,1,tmax); Solver.SetHost(i,ActualTime,tmin); Solver.SetHost(i,ActualState,0,f0(0.0)); Solver.SetHost(i,ControlParameters,0,p[i]); //fill initial dense output Solver.SetHost(i,DenseIndex,pointsInit); for (int j = 0; j < pointsInit; j++) { Solver.SetHost(i,DenseTime,j,t0list[j]); Solver.SetHost(i,DenseState,0,j,x0list[j]); Solver.SetHost(i,DenseDerivative,0,j,xd0list[j]); } } //synchronize Solver.SynchroniseFromHostToDevice(All); Solver.InsertSynchronisationPoint(); Solver.SynchroniseSolver(); //solve clock_t SimulationStart = clock(); Solver.Solve(); Solver.InsertSynchronisationPoint(); Solver.SynchroniseSolver(); clock_t SimulationTime = clock()-SimulationStart; std::cout << "Simulation Time: "<< 1000*SimulationTime/CLOCKS_PER_SEC << " ms"<<std::endl; //write back to CPU Solver.SynchroniseFromDeviceToHost(All); Solver.InsertSynchronisationPoint(); Solver.SynchroniseSolver(); //write to file ofstream DataFile("logistic2.txt"); DataFile.precision(8); DataFile.flags(ios::scientific); for (int i = 0; i < NumberOfProblems; i++) { DataFile.width(13); DataFile << p[i] << ','; DataFile.width(13); DataFile << Solver.GetHost<double>(i, ActualState, 0) << '\n'; } DataFile.flush(); DataFile.close(); cout << "Test finished!" << endl; } void Linspace(vector<double>& x, double B, double E, int N) { double Increment; x[0] = B; if ( N>1 ) { x[N-1] = E; Increment = (E-B)/(N-1); for (int i=1; i<N-1; i++) { x[i] = B + i*Increment; } } } void Discretize(vector<double>& y, double f(double), vector<double> x) { double Increment; for (int i = 0; i < x.size(); i++) { y[i] = f(x[i]); } }
347fdee77ee191fd1ef1bea27cbb413296848bdd.cu
#include <iostream> #include <iomanip> #include <vector> #include <cmath> #include <string> #include <fstream> // Solver Configuration #define __MPGOS_PERTHREAD_SOLVER_DDE4 //RK4 solver #define __MPGOS_PERTHREAD_PRECISION double #define __MPGOS_PERTHREAD_NT 1024 // NumberOfThreads #define __MPGOS_PERTHREAD_SD 1 // SystemDimension #define __MPGOS_PERTHREAD_DOD 1 // DenseDimension #define __MPGOS_PERTHREAD_NDELAY 2 // NumberOfDelays #define __MPGOS_PERTHREAD_NCP 1 // ControlParameters #define __MPGOS_PERTHREAD_NDO 2005 // NumberOfPointsOfDenseOutput #include "SingleSystem_PerThread_DataStructures.cuh" #include "logistic2_SystemDefinition.cuh" #include "SingleSystem_PerThread_Interface.cuh" using namespace std; #define PI 3.14159265358979323846 void Linspace(vector<double>&, double, double, int); void Discretize(vector<double>&, double f(double), vector<double>); double f0(double t) { if (t < -1.5) return std::cos(4 * PI * t); if (t < -std::sqrt(2.0)) return t * t; if (t < -1.101) return std::exp(t); if (t < -0.5) return 0; return t + 0.5; } double fd0(double t) { if (t < -1.5) return -4 * PI * std::sin(4 * PI * t); if (t < -std::sqrt(2.0)) return 2 * t; if (t < -1.101) return std::exp(t); if (t < -0.5) return 0; return 1.0; } int main() { //run configuration int NumberOfProblems = __MPGOS_PERTHREAD_NT; int BlockSize = 32; int CUDAdevice = 0; PrintPropertiesOfSpecificDevice(CUDAdevice); //parameters and initial conditions double tmin = 0.0; double tmax = 10.0; double dt = 0.001; double pmin = 0; double pmax = 2; vector<double> p(NumberOfProblems,0); Linspace(p,pmin,pmax, NumberOfProblems); //initial points int pointsInit = 200; double tInitMin = -2.0; double tInitMax = 0; vector<double> t0list(pointsInit,0); vector<double> x0list(pointsInit,0); vector<double> xd0list(pointsInit,0); Linspace(t0list,tInitMin,tInitMax, pointsInit); Discretize(x0list,f0,t0list); Discretize(xd0list,fd0,t0list); //initialize solver ProblemSolver Solver(CUDAdevice); //Set Solver options Solver.SolverOption(ThreadsPerBlock, BlockSize); Solver.SolverOption(InitialTimeStep, dt); Solver.SolverOption(ActiveNumberOfThreads, NumberOfProblems); Solver.SolverOption(DenseOutputTimeStep, -1); Solver.SolverOption(DenseOutputVariableIndex, 0, 0); Solver.SolverOption(Delay, 0, 0, 1.0); Solver.SolverOption(Delay, 1, 0, 2.0); //fill solver object for (int i = 0; i < NumberOfProblems; i++) { Solver.SetHost(i,TimeDomain,0,tmin); Solver.SetHost(i,TimeDomain,1,tmax); Solver.SetHost(i,ActualTime,tmin); Solver.SetHost(i,ActualState,0,f0(0.0)); Solver.SetHost(i,ControlParameters,0,p[i]); //fill initial dense output Solver.SetHost(i,DenseIndex,pointsInit); for (int j = 0; j < pointsInit; j++) { Solver.SetHost(i,DenseTime,j,t0list[j]); Solver.SetHost(i,DenseState,0,j,x0list[j]); Solver.SetHost(i,DenseDerivative,0,j,xd0list[j]); } } //synchronize Solver.SynchroniseFromHostToDevice(All); Solver.InsertSynchronisationPoint(); Solver.SynchroniseSolver(); //solve clock_t SimulationStart = clock(); Solver.Solve(); Solver.InsertSynchronisationPoint(); Solver.SynchroniseSolver(); clock_t SimulationTime = clock()-SimulationStart; std::cout << "Simulation Time: "<< 1000*SimulationTime/CLOCKS_PER_SEC << " ms"<<std::endl; //write back to CPU Solver.SynchroniseFromDeviceToHost(All); Solver.InsertSynchronisationPoint(); Solver.SynchroniseSolver(); //write to file ofstream DataFile("logistic2.txt"); DataFile.precision(8); DataFile.flags(ios::scientific); for (int i = 0; i < NumberOfProblems; i++) { DataFile.width(13); DataFile << p[i] << ','; DataFile.width(13); DataFile << Solver.GetHost<double>(i, ActualState, 0) << '\n'; } DataFile.flush(); DataFile.close(); cout << "Test finished!" << endl; } void Linspace(vector<double>& x, double B, double E, int N) { double Increment; x[0] = B; if ( N>1 ) { x[N-1] = E; Increment = (E-B)/(N-1); for (int i=1; i<N-1; i++) { x[i] = B + i*Increment; } } } void Discretize(vector<double>& y, double f(double), vector<double> x) { double Increment; for (int i = 0; i < x.size(); i++) { y[i] = f(x[i]); } }
ba17e6770e707c079296f4537a0146b75761e416.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <hip/hip_cooperative_groups.h> #include "reduction.h" using namespace cooperative_groups; /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n threads - only works for power-of-2 arrays */ /** Two warp level primitives are used here for this example https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ https://devblogs.nvidia.com/using-cuda-warp-level-primitives/ */ __global__ void atomic_reduction_kernel(float *data_out, float *data_in, int size) { int idx_x = blockIdx.x * blockDim.x + threadIdx.x; atomicAdd(&data_out[0], data_in[idx_x]); } void atomic_reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads) { int n_blocks = (size + n_threads - 1) / n_threads; hipLaunchKernelGGL(( atomic_reduction_kernel), dim3(n_blocks), dim3(n_threads), 0, 0, g_outPtr, g_inPtr, size); }
ba17e6770e707c079296f4537a0146b75761e416.cu
#include <stdio.h> #include <cooperative_groups.h> #include "reduction.h" using namespace cooperative_groups; /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n threads - only works for power-of-2 arrays */ /** Two warp level primitives are used here for this example https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ https://devblogs.nvidia.com/using-cuda-warp-level-primitives/ */ __global__ void atomic_reduction_kernel(float *data_out, float *data_in, int size) { int idx_x = blockIdx.x * blockDim.x + threadIdx.x; atomicAdd(&data_out[0], data_in[idx_x]); } void atomic_reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads) { int n_blocks = (size + n_threads - 1) / n_threads; atomic_reduction_kernel<<<n_blocks, n_threads>>>(g_outPtr, g_inPtr, size); }
e284fbce725bcf3bead918b033c41ddb16a89ed8.hip
// !!! This is a file automatically generated by hipify!!! // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // Created by mike on 2/10/20. // #include <hip/hip_runtime.h> #include <cmath> #include "gis/cuda/common/gpu_memory.h" #include "gis/cuda/functor/st_distance.h" namespace arctern { namespace gis { namespace cuda { namespace { inline DEVICE_RUNNABLE double Point2PointDistance(ConstGpuContext& left, ConstGpuContext& right, int index) { auto lv = left.get_value_ptr(index); auto rv = right.get_value_ptr(index); auto dx = (lv[0] - rv[0]); auto dy = (lv[1] - rv[1]); return sqrt(dx * dx + dy * dy); } __global__ void ST_DistanceKernel(ConstGpuContext left, ConstGpuContext right, double* result) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < left.size) { auto left_tag = left.get_tag(tid); auto right_tag = right.get_tag(tid); // handle 2d case only for now assert(left_tag.get_space_type() == WkbSpaceType::XY); assert(right_tag.get_space_type() == WkbSpaceType::XY); // handle point to point case only if (left_tag.get_category() == WkbCategory::kPoint && right_tag.get_category() == WkbCategory::kPoint) { result[tid] = Point2PointDistance(left, right, tid); } else { result[tid] = NAN; } } } } // namespace void ST_Distance(const GeometryVector& left_vec, const GeometryVector& right_vec, double* host_results) { assert(left_vec.size() == right_vec.size()); auto left_ctx_holder = left_vec.CreateReadGpuContext(); auto right_ctx_holder = right_vec.CreateReadGpuContext(); auto dev_result = GpuMakeUniqueArray<double>(left_vec.size()); { auto config = GetKernelExecConfig(left_vec.size()); hipLaunchKernelGGL(( ST_DistanceKernel), dim3(config.grid_dim), dim3(config.block_dim), 0, 0, *left_ctx_holder, *right_ctx_holder, dev_result.get()); } GpuMemcpy(host_results, dev_result.get(), left_vec.size()); } } // namespace cuda } // namespace gis } // namespace arctern
e284fbce725bcf3bead918b033c41ddb16a89ed8.cu
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // Created by mike on 2/10/20. // #include <cuda_runtime.h> #include <cmath> #include "gis/cuda/common/gpu_memory.h" #include "gis/cuda/functor/st_distance.h" namespace arctern { namespace gis { namespace cuda { namespace { inline DEVICE_RUNNABLE double Point2PointDistance(ConstGpuContext& left, ConstGpuContext& right, int index) { auto lv = left.get_value_ptr(index); auto rv = right.get_value_ptr(index); auto dx = (lv[0] - rv[0]); auto dy = (lv[1] - rv[1]); return sqrt(dx * dx + dy * dy); } __global__ void ST_DistanceKernel(ConstGpuContext left, ConstGpuContext right, double* result) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < left.size) { auto left_tag = left.get_tag(tid); auto right_tag = right.get_tag(tid); // handle 2d case only for now assert(left_tag.get_space_type() == WkbSpaceType::XY); assert(right_tag.get_space_type() == WkbSpaceType::XY); // handle point to point case only if (left_tag.get_category() == WkbCategory::kPoint && right_tag.get_category() == WkbCategory::kPoint) { result[tid] = Point2PointDistance(left, right, tid); } else { result[tid] = NAN; } } } } // namespace void ST_Distance(const GeometryVector& left_vec, const GeometryVector& right_vec, double* host_results) { assert(left_vec.size() == right_vec.size()); auto left_ctx_holder = left_vec.CreateReadGpuContext(); auto right_ctx_holder = right_vec.CreateReadGpuContext(); auto dev_result = GpuMakeUniqueArray<double>(left_vec.size()); { auto config = GetKernelExecConfig(left_vec.size()); ST_DistanceKernel<<<config.grid_dim, config.block_dim>>>( *left_ctx_holder, *right_ctx_holder, dev_result.get()); } GpuMemcpy(host_results, dev_result.get(), left_vec.size()); } } // namespace cuda } // namespace gis } // namespace arctern
37a34e0401c10c9d17561278d988029f81f607fd.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> namespace{ __device__ __forceinline__ float atomicMin(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fminf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } __device__ __forceinline__ double atomicMin(double* address, double val) { unsigned long long int* address_as_i = (unsigned long long int*) address; unsigned long long int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __double_as_longlong(fminf(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } template <typename scalar_t> __device__ __forceinline__ bool check_face_frontside(const scalar_t *face) { return (face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]); } template <typename scalar_t> struct point { public: scalar_t x; scalar_t y; __host__ __device__ scalar_t dot(point<scalar_t> p) { return this->x * p.x + this->y * p.y; }; __host__ __device__ point<scalar_t> operator-(point<scalar_t>& p) { point<scalar_t> np; np.x = this->x - p.x; np.y = this->y - p.y; return np; }; __host__ __device__ point<scalar_t> operator+(point<scalar_t>& p) { point<scalar_t> np; np.x = this->x + p.x; np.y = this->y + p.y; return np; }; __host__ __device__ point<scalar_t> operator*(scalar_t s) { point<scalar_t> np; np.x = s * this->x; np.y = s * this->y; return np; }; }; template <typename scalar_t> __device__ __forceinline__ bool check_pixel_inside(const scalar_t *w) { return w[0] <= 1 && w[0] >= 0 && w[1] <= 1 && w[1] >= 0 && w[2] <= 1 && w[2] >= 0; } template <typename scalar_t> __device__ __forceinline__ void barycentric_weight(scalar_t *w, point<scalar_t> p, point<scalar_t> p0, point<scalar_t> p1, point<scalar_t> p2) { // vectors point<scalar_t> v0, v1, v2; scalar_t s = p.dot(p); v0 = p2 - p0; v1 = p1 - p0; v2 = p - p0; // dot products scalar_t dot00 = v0.dot(v0); //v0.x * v0.x + v0.y * v0.y //np.dot(v0.T, v0) scalar_t dot01 = v0.dot(v1); //v0.x * v1.x + v0.y * v1.y //np.dot(v0.T, v1) scalar_t dot02 = v0.dot(v2); //v0.x * v2.x + v0.y * v2.y //np.dot(v0.T, v2) scalar_t dot11 = v1.dot(v1); //v1.x * v1.x + v1.y * v1.y //np.dot(v1.T, v1) scalar_t dot12 = v1.dot(v2); //v1.x * v2.x + v1.y * v2.y//np.dot(v1.T, v2) // barycentric coordinates scalar_t inverDeno; if(dot00*dot11 - dot01*dot01 == 0) inverDeno = 0; else inverDeno = 1/(dot00*dot11 - dot01*dot01); scalar_t u = (dot11*dot02 - dot01*dot12)*inverDeno; scalar_t v = (dot00*dot12 - dot01*dot02)*inverDeno; // weight w[0] = 1 - u - v; w[1] = v; w[2] = u; } template <typename scalar_t> __global__ void forward_rasterize_cuda_kernel( const scalar_t* __restrict__ face_vertices, //[bz, nf, 3, 3] scalar_t* depth_buffer, int* triangle_buffer, scalar_t* baryw_buffer, int batch_size, int h, int w, int ntri) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * ntri) { return; } int bn = i/ntri; const scalar_t* face = &face_vertices[i * 9]; scalar_t bw[3]; point<scalar_t> p0, p1, p2, p; p0.x = face[0]; p0.y=face[1]; p1.x = face[3]; p1.y=face[4]; p2.x = face[6]; p2.y=face[7]; int x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0); int x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1); int y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0); int y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1); for(int y = y_min; y <= y_max; y++) //h { for(int x = x_min; x <= x_max; x++) //w { p.x = x; p.y = y; barycentric_weight(bw, p, p0, p1, p2); if(((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0)) && check_face_frontside(face)) // if((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0)) { // const scalar_t zp = 1. / (bw[0] / face[2] + bw[1] / face[5] + bw[2] / face[8]); // printf("%f %f %f \n", (float)zp, (float)face[2], (float)bw[2]); atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); if(depth_buffer[bn*h*w + y*w + x] == zp) { // depth_min = zp; // atomic long long for two int // scalar_t tri_ind = i%ntri; // atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); triangle_buffer[bn*h*w + y*w + x] = (int)(i%ntri); for(int k=0; k<3; k++){ baryw_buffer[bn*h*w*3 + y*w*3 + x*3 + k] = bw[k]; } // buffers[bn*h*w*2 + y*w*2 + x*2 + 1] = p_depth; } } } } } template <typename scalar_t> __global__ void forward_rasterize_colors_cuda_kernel( const scalar_t* __restrict__ face_vertices, //[bz, nf, 3, 3] const scalar_t* __restrict__ face_colors, //[bz, nf, 3, 3] scalar_t* depth_buffer, int* triangle_buffer, scalar_t* images, int batch_size, int h, int w, int ntri) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * ntri) { return; } int bn = i/ntri; const scalar_t* face = &face_vertices[i * 9]; const scalar_t* color = &face_colors[i * 9]; scalar_t bw[3]; point<scalar_t> p0, p1, p2, p; p0.x = face[0]; p0.y=face[1]; p1.x = face[3]; p1.y=face[4]; p2.x = face[6]; p2.y=face[7]; scalar_t cl[3][3]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 3; dim++) { cl[num][dim] = color[3 * num + dim]; //[3p,3rgb] } } int x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0); int x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1); int y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0); int y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1); for(int y = y_min; y <= y_max; y++) //h { for(int x = x_min; x <= x_max; x++) //w { p.x = x; p.y = y; barycentric_weight(bw, p, p0, p1, p2); if(((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0)) && check_face_frontside(face)) // if((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0)) { scalar_t zp = 1. / (bw[0] / face[2] + bw[1] / face[5] + bw[2] / face[8]); atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); if(depth_buffer[bn*h*w + y*w + x] == zp) { // depth_min = zp; // atomic long long for two int // scalar_t tri_ind = i%ntri; // atomicAdd( (int*)&depth_buffer[bn*h*w + y*w + x], (int)zp); // atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); triangle_buffer[bn*h*w + y*w + x] = (int)(i%ntri); for(int k=0; k<3; k++){ // baryw_buffer[bn*h*w*3 + y*w*3 + x*3 + k] = bw[k]; images[bn*h*w*3 + y*w*3 + x*3 + k] = bw[0]*cl[0][k] + bw[1]*cl[1][k] + bw[2]*cl[2][k]; } // buffers[bn*h*w*2 + y*w*2 + x*2 + 1] = p_depth; } } } } } } std::vector<at::Tensor> forward_rasterize_cuda( at::Tensor face_vertices, at::Tensor depth_buffer, at::Tensor triangle_buffer, at::Tensor baryw_buffer, int h, int w){ const auto batch_size = face_vertices.size(0); const auto ntri = face_vertices.size(1); // print(channel_size) const int threads = 512; const dim3 blocks_1 ((batch_size * ntri - 1) / threads +1); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_cuda1", ([&] { hipLaunchKernelGGL(( forward_rasterize_cuda_kernel<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, face_vertices.data<scalar_t>(), depth_buffer.data<scalar_t>(), triangle_buffer.data<int>(), baryw_buffer.data<scalar_t>(), batch_size, h, w, ntri); })); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_cuda2", ([&] { hipLaunchKernelGGL(( forward_rasterize_cuda_kernel<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, face_vertices.data<scalar_t>(), depth_buffer.data<scalar_t>(), triangle_buffer.data<int>(), baryw_buffer.data<scalar_t>(), batch_size, h, w, ntri); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error in forward_rasterize_cuda_kernel: %s\n", hipGetErrorString(err)); return {depth_buffer, triangle_buffer, baryw_buffer}; } std::vector<at::Tensor> forward_rasterize_colors_cuda( at::Tensor face_vertices, at::Tensor face_colors, at::Tensor depth_buffer, at::Tensor triangle_buffer, at::Tensor images, int h, int w){ const auto batch_size = face_vertices.size(0); const auto ntri = face_vertices.size(1); // print(channel_size) const int threads = 512; const dim3 blocks_1 ((batch_size * ntri - 1) / threads +1); //initial AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_colors_cuda", ([&] { hipLaunchKernelGGL(( forward_rasterize_colors_cuda_kernel<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, face_vertices.data<scalar_t>(), face_colors.data<scalar_t>(), depth_buffer.data<scalar_t>(), triangle_buffer.data<int>(), images.data<scalar_t>(), batch_size, h, w, ntri); })); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_colors_cuda", ([&] { hipLaunchKernelGGL(( forward_rasterize_colors_cuda_kernel<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, face_vertices.data<scalar_t>(), face_colors.data<scalar_t>(), depth_buffer.data<scalar_t>(), triangle_buffer.data<int>(), images.data<scalar_t>(), batch_size, h, w, ntri); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error in forward_rasterize_cuda_kernel: %s\n", hipGetErrorString(err)); return {depth_buffer, triangle_buffer, images}; }
37a34e0401c10c9d17561278d988029f81f607fd.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> namespace{ __device__ __forceinline__ float atomicMin(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fminf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } __device__ __forceinline__ double atomicMin(double* address, double val) { unsigned long long int* address_as_i = (unsigned long long int*) address; unsigned long long int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __double_as_longlong(fminf(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } template <typename scalar_t> __device__ __forceinline__ bool check_face_frontside(const scalar_t *face) { return (face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]); } template <typename scalar_t> struct point { public: scalar_t x; scalar_t y; __host__ __device__ scalar_t dot(point<scalar_t> p) { return this->x * p.x + this->y * p.y; }; __host__ __device__ point<scalar_t> operator-(point<scalar_t>& p) { point<scalar_t> np; np.x = this->x - p.x; np.y = this->y - p.y; return np; }; __host__ __device__ point<scalar_t> operator+(point<scalar_t>& p) { point<scalar_t> np; np.x = this->x + p.x; np.y = this->y + p.y; return np; }; __host__ __device__ point<scalar_t> operator*(scalar_t s) { point<scalar_t> np; np.x = s * this->x; np.y = s * this->y; return np; }; }; template <typename scalar_t> __device__ __forceinline__ bool check_pixel_inside(const scalar_t *w) { return w[0] <= 1 && w[0] >= 0 && w[1] <= 1 && w[1] >= 0 && w[2] <= 1 && w[2] >= 0; } template <typename scalar_t> __device__ __forceinline__ void barycentric_weight(scalar_t *w, point<scalar_t> p, point<scalar_t> p0, point<scalar_t> p1, point<scalar_t> p2) { // vectors point<scalar_t> v0, v1, v2; scalar_t s = p.dot(p); v0 = p2 - p0; v1 = p1 - p0; v2 = p - p0; // dot products scalar_t dot00 = v0.dot(v0); //v0.x * v0.x + v0.y * v0.y //np.dot(v0.T, v0) scalar_t dot01 = v0.dot(v1); //v0.x * v1.x + v0.y * v1.y //np.dot(v0.T, v1) scalar_t dot02 = v0.dot(v2); //v0.x * v2.x + v0.y * v2.y //np.dot(v0.T, v2) scalar_t dot11 = v1.dot(v1); //v1.x * v1.x + v1.y * v1.y //np.dot(v1.T, v1) scalar_t dot12 = v1.dot(v2); //v1.x * v2.x + v1.y * v2.y//np.dot(v1.T, v2) // barycentric coordinates scalar_t inverDeno; if(dot00*dot11 - dot01*dot01 == 0) inverDeno = 0; else inverDeno = 1/(dot00*dot11 - dot01*dot01); scalar_t u = (dot11*dot02 - dot01*dot12)*inverDeno; scalar_t v = (dot00*dot12 - dot01*dot02)*inverDeno; // weight w[0] = 1 - u - v; w[1] = v; w[2] = u; } template <typename scalar_t> __global__ void forward_rasterize_cuda_kernel( const scalar_t* __restrict__ face_vertices, //[bz, nf, 3, 3] scalar_t* depth_buffer, int* triangle_buffer, scalar_t* baryw_buffer, int batch_size, int h, int w, int ntri) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * ntri) { return; } int bn = i/ntri; const scalar_t* face = &face_vertices[i * 9]; scalar_t bw[3]; point<scalar_t> p0, p1, p2, p; p0.x = face[0]; p0.y=face[1]; p1.x = face[3]; p1.y=face[4]; p2.x = face[6]; p2.y=face[7]; int x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0); int x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1); int y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0); int y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1); for(int y = y_min; y <= y_max; y++) //h { for(int x = x_min; x <= x_max; x++) //w { p.x = x; p.y = y; barycentric_weight(bw, p, p0, p1, p2); if(((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0)) && check_face_frontside(face)) // if((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0)) { // const scalar_t zp = 1. / (bw[0] / face[2] + bw[1] / face[5] + bw[2] / face[8]); // printf("%f %f %f \n", (float)zp, (float)face[2], (float)bw[2]); atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); if(depth_buffer[bn*h*w + y*w + x] == zp) { // depth_min = zp; // atomic long long for two int // scalar_t tri_ind = i%ntri; // atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); triangle_buffer[bn*h*w + y*w + x] = (int)(i%ntri); for(int k=0; k<3; k++){ baryw_buffer[bn*h*w*3 + y*w*3 + x*3 + k] = bw[k]; } // buffers[bn*h*w*2 + y*w*2 + x*2 + 1] = p_depth; } } } } } template <typename scalar_t> __global__ void forward_rasterize_colors_cuda_kernel( const scalar_t* __restrict__ face_vertices, //[bz, nf, 3, 3] const scalar_t* __restrict__ face_colors, //[bz, nf, 3, 3] scalar_t* depth_buffer, int* triangle_buffer, scalar_t* images, int batch_size, int h, int w, int ntri) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * ntri) { return; } int bn = i/ntri; const scalar_t* face = &face_vertices[i * 9]; const scalar_t* color = &face_colors[i * 9]; scalar_t bw[3]; point<scalar_t> p0, p1, p2, p; p0.x = face[0]; p0.y=face[1]; p1.x = face[3]; p1.y=face[4]; p2.x = face[6]; p2.y=face[7]; scalar_t cl[3][3]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 3; dim++) { cl[num][dim] = color[3 * num + dim]; //[3p,3rgb] } } int x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0); int x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1); int y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0); int y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1); for(int y = y_min; y <= y_max; y++) //h { for(int x = x_min; x <= x_max; x++) //w { p.x = x; p.y = y; barycentric_weight(bw, p, p0, p1, p2); if(((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0)) && check_face_frontside(face)) // if((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0)) { scalar_t zp = 1. / (bw[0] / face[2] + bw[1] / face[5] + bw[2] / face[8]); atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); if(depth_buffer[bn*h*w + y*w + x] == zp) { // depth_min = zp; // atomic long long for two int // scalar_t tri_ind = i%ntri; // atomicAdd( (int*)&depth_buffer[bn*h*w + y*w + x], (int)zp); // atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); triangle_buffer[bn*h*w + y*w + x] = (int)(i%ntri); for(int k=0; k<3; k++){ // baryw_buffer[bn*h*w*3 + y*w*3 + x*3 + k] = bw[k]; images[bn*h*w*3 + y*w*3 + x*3 + k] = bw[0]*cl[0][k] + bw[1]*cl[1][k] + bw[2]*cl[2][k]; } // buffers[bn*h*w*2 + y*w*2 + x*2 + 1] = p_depth; } } } } } } std::vector<at::Tensor> forward_rasterize_cuda( at::Tensor face_vertices, at::Tensor depth_buffer, at::Tensor triangle_buffer, at::Tensor baryw_buffer, int h, int w){ const auto batch_size = face_vertices.size(0); const auto ntri = face_vertices.size(1); // print(channel_size) const int threads = 512; const dim3 blocks_1 ((batch_size * ntri - 1) / threads +1); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_cuda1", ([&] { forward_rasterize_cuda_kernel<scalar_t><<<blocks_1, threads>>>( face_vertices.data<scalar_t>(), depth_buffer.data<scalar_t>(), triangle_buffer.data<int>(), baryw_buffer.data<scalar_t>(), batch_size, h, w, ntri); })); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_cuda2", ([&] { forward_rasterize_cuda_kernel<scalar_t><<<blocks_1, threads>>>( face_vertices.data<scalar_t>(), depth_buffer.data<scalar_t>(), triangle_buffer.data<int>(), baryw_buffer.data<scalar_t>(), batch_size, h, w, ntri); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in forward_rasterize_cuda_kernel: %s\n", cudaGetErrorString(err)); return {depth_buffer, triangle_buffer, baryw_buffer}; } std::vector<at::Tensor> forward_rasterize_colors_cuda( at::Tensor face_vertices, at::Tensor face_colors, at::Tensor depth_buffer, at::Tensor triangle_buffer, at::Tensor images, int h, int w){ const auto batch_size = face_vertices.size(0); const auto ntri = face_vertices.size(1); // print(channel_size) const int threads = 512; const dim3 blocks_1 ((batch_size * ntri - 1) / threads +1); //initial AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_colors_cuda", ([&] { forward_rasterize_colors_cuda_kernel<scalar_t><<<blocks_1, threads>>>( face_vertices.data<scalar_t>(), face_colors.data<scalar_t>(), depth_buffer.data<scalar_t>(), triangle_buffer.data<int>(), images.data<scalar_t>(), batch_size, h, w, ntri); })); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_colors_cuda", ([&] { forward_rasterize_colors_cuda_kernel<scalar_t><<<blocks_1, threads>>>( face_vertices.data<scalar_t>(), face_colors.data<scalar_t>(), depth_buffer.data<scalar_t>(), triangle_buffer.data<int>(), images.data<scalar_t>(), batch_size, h, w, ntri); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in forward_rasterize_cuda_kernel: %s\n", cudaGetErrorString(err)); return {depth_buffer, triangle_buffer, images}; }
d4c80f35f5adfd15bdc3429c629bc66c5a78bddd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************** * * Copyright (C) 2015 Culham Centre for Fusion Energy, * United Kingdom Atomic Energy Authority, Oxfordshire OX14 3DB, UK * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************** * * Program: SPILADY - A Spin-Lattice Dynamics Simulation Program * Version: 1.0 * Date: Aug 2015 * Author: Pui-Wai (Leo) MA * Contact: [email protected] * Address: Culham Centre for Fusion Energy, OX14 3DB, United Kingdom * ********************************************************************************/ #ifdef GPU #include "spilady.h" #include "prototype_GPU.h" /********************************************************************** * GPU prototype ***********************************************************************/ __global__ void LP1ForEn(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d); __global__ void LP2ForEn(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, struct cell_struct *first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , double *bf_ptr_d, double *sf_ptr_d, double *pr_ptr_d , double *dbf_ptr_d, double *dsf_ptr_d, double *dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL ,double *Jij_ptr_d, double *dJij_ptr_d #endif #if defined SDHL || defined SLDHL ,double *LandauA_ptr_d, double *LandauB_ptr_d, double *LandauC_ptr_d, double *LandauD_ptr_d #endif #if defined SLDHL ,double *dLandauA_ptr_d, double *dLandauB_ptr_d, double *dLandauC_ptr_d, double *dLandauD_ptr_d #endif ); __global__ void LP3ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d); #ifdef localvol __global__ void LP4ForEn_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *sum_volume_ptr_d); __global__ void LP4ForEn_part2(double *sum_volume_ptr_d); __global__ void LP5ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d, double volume_factor); #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC __global__ void LP6ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d); __global__ void LP7ForEn_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *virial_ptr_d); __global__ void LP7ForEn_part2(double *virial_ptr_d); #endif __device__ void inner_loop_d(struct varGPU *var_ptr_d, struct atom_struct *atom_ptr, struct cell_struct *first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , double *bf_ptr_d, double *sf_ptr_d, double *pr_ptr_d , double *dbf_ptr_d, double *dsf_ptr_d, double *dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL ,double *Jij_ptr_d, double *dJij_ptr_d #endif #if defined SDHL || defined SLDHL ,double *LandauA_ptr_d, double *LandauB_ptr_d, double *LandauC_ptr_d, double *LandauD_ptr_d #endif #if defined SLDHL ,double *dLandauA_ptr_d, double *dLandauB_ptr_d, double *dLandauC_ptr_d, double *dLandauD_ptr_d #endif ); /********************************************************************** * CPU codes ***********************************************************************/ void calculate_force_energy_GPU(){ hipLaunchKernelGGL(( LP1ForEn), dim3(no_of_blocks), dim3(no_of_threads), 0, 0, var_ptr_d, first_atom_ptr_d); hipLaunchKernelGGL(( LP2ForEn), dim3(no_of_blocks), dim3(no_of_threads), 0, 0, var_ptr_d , first_atom_ptr_d, first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , bf_ptr_d, sf_ptr_d, pr_ptr_d , dbf_ptr_d, dsf_ptr_d, dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL , Jij_ptr_d, dJij_ptr_d #endif #if defined SDHL || defined SLDHL , LandauA_ptr_d, LandauB_ptr_d, LandauC_ptr_d, LandauD_ptr_d #endif #if defined SLDHL , dLandauA_ptr_d, dLandauB_ptr_d, dLandauC_ptr_d, dLandauD_ptr_d #endif ); hipLaunchKernelGGL(( LP3ForEn), dim3(no_of_blocks), dim3(no_of_threads), 0, 0, var_ptr_d, first_atom_ptr_d); #ifdef localvol double sum_volume= 0e0; double* sum_volume_ptr_d; hipMalloc((void**)&sum_volume_ptr_d, no_of_MP*no_of_threads*sizeof(double)); hipLaunchKernelGGL(( LP4ForEn_part1), dim3(no_of_MP), dim3(no_of_threads), 0, 0, var_ptr_d, first_atom_ptr_d, sum_volume_ptr_d); hipLaunchKernelGGL(( LP4ForEn_part2), dim3(no_of_MP), dim3(no_of_threads), 0, 0, sum_volume_ptr_d); hipMemcpy(&sum_volume, sum_volume_ptr_d, sizeof(double), hipMemcpyDeviceToHost); hipFree(sum_volume_ptr_d); hipMemcpy(&box_volume, &(var_ptr_d->box_volume), sizeof(double), hipMemcpyDeviceToHost); double volume_factor = box_volume/sum_volume; hipLaunchKernelGGL(( LP5ForEn), dim3(no_of_blocks), dim3(no_of_threads), 0, 0, var_ptr_d, first_atom_ptr_d, volume_factor); #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC hipLaunchKernelGGL(( LP6ForEn), dim3(no_of_blocks), dim3(no_of_threads), 0, 0, var_ptr_d, first_atom_ptr_d); double *virial_ptr_d; hipMalloc((void**)&virial_ptr_d, no_of_MP*no_of_threads*sizeof(double)); hipLaunchKernelGGL(( LP7ForEn_part1), dim3(no_of_MP), dim3(no_of_threads), 0, 0, var_ptr_d, first_atom_ptr_d, virial_ptr_d); hipLaunchKernelGGL(( LP7ForEn_part2), dim3(no_of_MP), dim3(no_of_threads), 0, 0, virial_ptr_d); hipMemcpy(&virial, virial_ptr_d, sizeof(double), hipMemcpyDeviceToHost); hipFree(virial_ptr_d); #endif } void calculate_force_energy(){ calculate_force_energy_GPU(); } /************************************************************************** * GPU codes **************************************************************************/ __global__ void LP1ForEn(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < var_ptr_d->natom){ struct atom_struct *atom_ptr; atom_ptr = first_atom_ptr_d + i; #if defined MD || defined SLDH || defined SLDHL || defined SLDNC atom_ptr->f = vec_zero_d(); atom_ptr->pe = 0e0; atom_ptr->vir = 0e0; atom_ptr->stress11 = 0e0; atom_ptr->stress22 = 0e0; atom_ptr->stress33 = 0e0; atom_ptr->stress12 = 0e0; atom_ptr->stress23 = 0e0; atom_ptr->stress31 = 0e0; #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL atom_ptr->me = 0e0; atom_ptr->me0 = 0e0; #endif #ifdef localvol atom_ptr->sum_rij_m1 = 0e0; //Sum rij^-1 atom_ptr->sum_rij_m2 = 0e0; //Sum rij^-2 #endif } } __global__ void LP2ForEn(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, struct cell_struct *first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , double *bf_ptr_d, double *sf_ptr_d, double *pr_ptr_d , double *dbf_ptr_d, double *dsf_ptr_d, double *dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL ,double *Jij_ptr_d, double *dJij_ptr_d #endif #if defined SDHL || defined SLDHL ,double *LandauA_ptr_d, double *LandauB_ptr_d, double *LandauC_ptr_d, double *LandauD_ptr_d #endif #if defined SLDHL ,double *dLandauA_ptr_d, double *dLandauB_ptr_d, double *dLandauC_ptr_d, double *dLandauD_ptr_d #endif ) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < var_ptr_d->natom) inner_loop_d(var_ptr_d, first_atom_ptr_d + i, first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , bf_ptr_d, sf_ptr_d, pr_ptr_d , dbf_ptr_d, dsf_ptr_d, dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL , Jij_ptr_d, dJij_ptr_d #endif #if defined SDHL || defined SLDHL , LandauA_ptr_d, LandauB_ptr_d, LandauC_ptr_d, LandauD_ptr_d #endif #if defined SLDHL , dLandauA_ptr_d, dLandauB_ptr_d, dLandauC_ptr_d, dLandauD_ptr_d #endif ); } __global__ void LP3ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < var_ptr_d->natom){ struct atom_struct *atom_ptr; atom_ptr = first_atom_ptr_d + i; #ifdef localvol double local_radius = 0.5e0*atom_ptr->sum_rij_m1/atom_ptr->sum_rij_m2; atom_ptr->local_volume = 4e0*Pi_num/3e0*pow(local_radius, 3e0); //it is only an estimation!!! #else atom_ptr->local_volume = var_ptr_d->box_volume/var_ptr_d->natom; #endif } } #ifdef localvol __global__ void LP4ForEn_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *sum_volume_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; *(sum_volume_ptr_d + i) = 0.0; int area = blockDim.x*gridDim.x; int k = (var_ptr_d->natom - 1)/area + 1; for (int j = 0; j < k; ++j){ int m = i + j*area; if (m < var_ptr_d->natom) { *(sum_volume_ptr_d + i) += (first_atom_ptr_d + m)->local_volume; } } __syncthreads(); } __global__ void LP4ForEn_part2(double *sum_volume_ptr_d){ int depth = blockIdx.x*blockDim.x; if (threadIdx.x == 0){ for (int j = 1; j < blockDim.x; ++j) *(sum_volume_ptr_d + depth) += *(sum_volume_ptr_d + depth + j); } __threadfence(); if (blockIdx.x == 0 && threadIdx.x == 0){ for (int j = 1; j < gridDim.x; ++j) *sum_volume_ptr_d += *(sum_volume_ptr_d + j*blockDim.x); } } __global__ void LP5ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d, double volume_factor){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < var_ptr_d->natom) (first_atom_ptr_d + i)->local_volume *= volume_factor; } #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC __global__ void LP6ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < var_ptr_d->natom){ struct atom_struct *atom_ptr; atom_ptr = first_atom_ptr_d + i; atom_ptr->stress11 = (pow(atom_ptr->p.x,2)/var_ptr_d->atmass + atom_ptr->stress11/2e0)/atom_ptr->local_volume; atom_ptr->stress22 = (pow(atom_ptr->p.y,2)/var_ptr_d->atmass + atom_ptr->stress22/2e0)/atom_ptr->local_volume; atom_ptr->stress33 = (pow(atom_ptr->p.z,2)/var_ptr_d->atmass + atom_ptr->stress33/2e0)/atom_ptr->local_volume; atom_ptr->stress12 = ((atom_ptr->p.x*atom_ptr->p.y)/var_ptr_d->atmass + atom_ptr->stress12/2e0)/atom_ptr->local_volume; atom_ptr->stress23 = ((atom_ptr->p.y*atom_ptr->p.z)/var_ptr_d->atmass + atom_ptr->stress23/2e0)/atom_ptr->local_volume; atom_ptr->stress31 = ((atom_ptr->p.z*atom_ptr->p.x)/var_ptr_d->atmass + atom_ptr->stress31/2e0)/atom_ptr->local_volume; } } __global__ void LP7ForEn_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *virial_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; *(virial_ptr_d + i) = 0.0; int area = blockDim.x*gridDim.x; int k = (var_ptr_d->natom - 1)/area + 1; for (int j = 0; j < k; ++j){ int m = i + j*area; if (m < var_ptr_d->natom) { *(virial_ptr_d + i) += (first_atom_ptr_d + m)->vir; } } __syncthreads(); } __global__ void LP7ForEn_part2(double *virial_ptr_d){ int depth = blockIdx.x*blockDim.x; if (threadIdx.x == 0){ for (int j = 1; j < blockDim.x; ++j) *(virial_ptr_d + depth) += *(virial_ptr_d + depth + j); } __threadfence(); if (blockIdx.x == 0 && threadIdx.x == 0){ for (int j = 1; j < gridDim.x; ++j) *virial_ptr_d += *(virial_ptr_d + j*blockDim.x); } } #endif __device__ void inner_loop_d(struct varGPU *var_ptr_d, struct atom_struct *atom_ptr, struct cell_struct *first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , double *bf_ptr_d, double *sf_ptr_d, double *pr_ptr_d , double *dbf_ptr_d, double *dsf_ptr_d, double *dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL ,double *Jij_ptr_d, double *dJij_ptr_d #endif #if defined SDHL || defined SLDHL ,double *LandauA_ptr_d, double *LandauB_ptr_d, double *LandauC_ptr_d, double *LandauD_ptr_d #endif #if defined SLDHL ,double *dLandauA_ptr_d, double *dLandauB_ptr_d, double *dLandauC_ptr_d, double *dLandauD_ptr_d #endif ) { #if defined SDH || defined SDHL || defined SLDH || defined SLDHL || defined SLDNC double si_sq = vec_sq_d(atom_ptr->s); #endif struct atom_struct *work_ptr; struct cell_struct *ccell_ptr; struct cell_struct *wcell_ptr; ccell_ptr = first_cell_ptr_d + atom_ptr->new_cell_index; for (int i = 0; i <= 26; ++i){ if (i == 26) wcell_ptr = ccell_ptr; else wcell_ptr = first_cell_ptr_d + (ccell_ptr->neigh_cell[i]); work_ptr = wcell_ptr->head_ptr; while (work_ptr != NULL){ vector rij = vec_sub_d(atom_ptr->r, work_ptr->r); //find image of j closest to i find_image_d(rij, var_ptr_d); double rsq = vec_sq_d(rij); if (rsq < var_ptr_d->rcut_max_sq && atom_ptr != work_ptr){ double rij0 = sqrt(rsq); #ifdef localvol if (rij0 < var_ptr_d->rcut_vol){ atom_ptr->sum_rij_m1 += 1e0/rij0; atom_ptr->sum_rij_m2 += 1e0/rsq; } #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC double pair_enr = 0e0; double dudr = 0e0; #endif #if defined SLDH || defined SLDHL double dudr_spin = 0e0; #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL || defined SLDNC double sj_sq = vec_sq_d(work_ptr->s); #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC if (rij0 < var_ptr_d->rcut_pot){ double dsmallf_rij = dsmallf_d(rij0, dsf_ptr_d, var_ptr_d); dudr = (dbigf_d(atom_ptr->rho, dbf_ptr_d, var_ptr_d) + dbigf_d(work_ptr->rho, dbf_ptr_d, var_ptr_d))*dsmallf_rij + dpair_d(rij0, dpr_ptr_d, var_ptr_d); #if defined SLDHL dudr += (dLandauA_d(atom_ptr->rho, dLandauA_ptr_d, var_ptr_d)*si_sq + dLandauB_d(atom_ptr->rho, dLandauB_ptr_d, var_ptr_d)*pow(si_sq,2) + dLandauC_d(atom_ptr->rho, dLandauC_ptr_d, var_ptr_d)*pow(si_sq,3) + dLandauD_d(atom_ptr->rho, dLandauD_ptr_d, var_ptr_d)*pow(si_sq,4))*dsmallf_rij; dudr += (dLandauA_d(work_ptr->rho, dLandauA_ptr_d, var_ptr_d)*sj_sq + dLandauB_d(work_ptr->rho, dLandauB_ptr_d, var_ptr_d)*pow(sj_sq,2) + dLandauC_d(work_ptr->rho, dLandauC_ptr_d, var_ptr_d)*pow(sj_sq,3) + dLandauD_d(work_ptr->rho, dLandauD_ptr_d, var_ptr_d)*pow(sj_sq,4))*dsmallf_rij; #endif pair_enr = pair_d(rij0, pr_ptr_d, var_ptr_d); atom_ptr->pe += 0.5e0*pair_enr; } #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL if (rij0 < var_ptr_d->rcut_mag){ double si_dot_sj = vec_dot_d(atom_ptr->s, work_ptr->s); //Si.Sj double si_times_sj = vec_length_d(atom_ptr->s)*vec_length_d(work_ptr->s); //|Si|.|Sj| #if defined SLDH || defined SLDHL double dJijdr = dJij_d(rij0, dJij_ptr_d, var_ptr_d); dudr_spin = -dJijdr*(si_dot_sj - si_times_sj); // -dJdr_ij(Si dot Sj - |Si||Sj|) #endif double Jij_half = Jij_d(rij0, Jij_ptr_d, var_ptr_d)/2e0; double J_times = Jij_half*si_times_sj; double J_dot = -Jij_half*si_dot_sj; atom_ptr->me0 += J_times; atom_ptr->me += J_dot; } #if defined SLDH || defined SLDHL dudr += dudr_spin; #endif #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC double force = -dudr/rij0; vector fij = vec_times_d(force, rij); atom_ptr->f = vec_add_d(atom_ptr->f, fij); atom_ptr->stress11 += fij.x*rij.x; atom_ptr->stress22 += fij.y*rij.y; atom_ptr->stress33 += fij.z*rij.z; atom_ptr->stress12 += fij.x*rij.y; atom_ptr->stress23 += fij.y*rij.z; atom_ptr->stress31 += fij.z*rij.x; atom_ptr->vir += -force*rsq/2e0; #endif } work_ptr = work_ptr->next_atom_ptr; } } #if defined MD || defined SLDH || defined SLDHL atom_ptr->pe += bigf_d(atom_ptr->rho, bf_ptr_d, var_ptr_d); #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL #ifdef extfield atom_ptr->me -= vec_dot_d(atom_ptr->s, atom_ptr->Hext); #endif #ifdef SLDHL atom_ptr->me += LandauA_d(atom_ptr->rho, LandauA_ptr_d, var_ptr_d)*si_sq + LandauB_d(atom_ptr->rho, LandauB_ptr_d, var_ptr_d)*pow(si_sq,2) + LandauC_d(atom_ptr->rho, LandauC_ptr_d, var_ptr_d)*pow(si_sq,3) + LandauD_d(atom_ptr->rho, LandauD_ptr_d, var_ptr_d)*pow(si_sq,4); #endif #ifdef SDHL atom_ptr->me += LandauA_d(1, LandauA_ptr_d, var_ptr_d)*si_sq + LandauB_d(1, LandauB_ptr_d, var_ptr_d)*pow(si_sq,2) + LandauC_d(1, LandauC_ptr_d, var_ptr_d)*pow(si_sq,3) + LandauD_d(1, LandauD_ptr_d, var_ptr_d)*pow(si_sq,4); #endif #endif } #endif
d4c80f35f5adfd15bdc3429c629bc66c5a78bddd.cu
/******************************************************************************** * * Copyright (C) 2015 Culham Centre for Fusion Energy, * United Kingdom Atomic Energy Authority, Oxfordshire OX14 3DB, UK * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************** * * Program: SPILADY - A Spin-Lattice Dynamics Simulation Program * Version: 1.0 * Date: Aug 2015 * Author: Pui-Wai (Leo) MA * Contact: [email protected] * Address: Culham Centre for Fusion Energy, OX14 3DB, United Kingdom * ********************************************************************************/ #ifdef GPU #include "spilady.h" #include "prototype_GPU.h" /********************************************************************** * GPU prototype ***********************************************************************/ __global__ void LP1ForEn(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d); __global__ void LP2ForEn(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, struct cell_struct *first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , double *bf_ptr_d, double *sf_ptr_d, double *pr_ptr_d , double *dbf_ptr_d, double *dsf_ptr_d, double *dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL ,double *Jij_ptr_d, double *dJij_ptr_d #endif #if defined SDHL || defined SLDHL ,double *LandauA_ptr_d, double *LandauB_ptr_d, double *LandauC_ptr_d, double *LandauD_ptr_d #endif #if defined SLDHL ,double *dLandauA_ptr_d, double *dLandauB_ptr_d, double *dLandauC_ptr_d, double *dLandauD_ptr_d #endif ); __global__ void LP3ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d); #ifdef localvol __global__ void LP4ForEn_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *sum_volume_ptr_d); __global__ void LP4ForEn_part2(double *sum_volume_ptr_d); __global__ void LP5ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d, double volume_factor); #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC __global__ void LP6ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d); __global__ void LP7ForEn_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *virial_ptr_d); __global__ void LP7ForEn_part2(double *virial_ptr_d); #endif __device__ void inner_loop_d(struct varGPU *var_ptr_d, struct atom_struct *atom_ptr, struct cell_struct *first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , double *bf_ptr_d, double *sf_ptr_d, double *pr_ptr_d , double *dbf_ptr_d, double *dsf_ptr_d, double *dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL ,double *Jij_ptr_d, double *dJij_ptr_d #endif #if defined SDHL || defined SLDHL ,double *LandauA_ptr_d, double *LandauB_ptr_d, double *LandauC_ptr_d, double *LandauD_ptr_d #endif #if defined SLDHL ,double *dLandauA_ptr_d, double *dLandauB_ptr_d, double *dLandauC_ptr_d, double *dLandauD_ptr_d #endif ); /********************************************************************** * CPU codes ***********************************************************************/ void calculate_force_energy_GPU(){ LP1ForEn<<<no_of_blocks, no_of_threads>>>(var_ptr_d, first_atom_ptr_d); LP2ForEn<<<no_of_blocks, no_of_threads>>>(var_ptr_d , first_atom_ptr_d, first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , bf_ptr_d, sf_ptr_d, pr_ptr_d , dbf_ptr_d, dsf_ptr_d, dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL , Jij_ptr_d, dJij_ptr_d #endif #if defined SDHL || defined SLDHL , LandauA_ptr_d, LandauB_ptr_d, LandauC_ptr_d, LandauD_ptr_d #endif #if defined SLDHL , dLandauA_ptr_d, dLandauB_ptr_d, dLandauC_ptr_d, dLandauD_ptr_d #endif ); LP3ForEn<<<no_of_blocks, no_of_threads>>>(var_ptr_d, first_atom_ptr_d); #ifdef localvol double sum_volume= 0e0; double* sum_volume_ptr_d; cudaMalloc((void**)&sum_volume_ptr_d, no_of_MP*no_of_threads*sizeof(double)); LP4ForEn_part1<<<no_of_MP, no_of_threads>>>(var_ptr_d, first_atom_ptr_d, sum_volume_ptr_d); LP4ForEn_part2<<<no_of_MP, no_of_threads>>>(sum_volume_ptr_d); cudaMemcpy(&sum_volume, sum_volume_ptr_d, sizeof(double), cudaMemcpyDeviceToHost); cudaFree(sum_volume_ptr_d); cudaMemcpy(&box_volume, &(var_ptr_d->box_volume), sizeof(double), cudaMemcpyDeviceToHost); double volume_factor = box_volume/sum_volume; LP5ForEn<<<no_of_blocks, no_of_threads>>>(var_ptr_d, first_atom_ptr_d, volume_factor); #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC LP6ForEn<<<no_of_blocks, no_of_threads>>>(var_ptr_d, first_atom_ptr_d); double *virial_ptr_d; cudaMalloc((void**)&virial_ptr_d, no_of_MP*no_of_threads*sizeof(double)); LP7ForEn_part1<<<no_of_MP, no_of_threads>>>(var_ptr_d, first_atom_ptr_d, virial_ptr_d); LP7ForEn_part2<<<no_of_MP, no_of_threads>>>(virial_ptr_d); cudaMemcpy(&virial, virial_ptr_d, sizeof(double), cudaMemcpyDeviceToHost); cudaFree(virial_ptr_d); #endif } void calculate_force_energy(){ calculate_force_energy_GPU(); } /************************************************************************** * GPU codes **************************************************************************/ __global__ void LP1ForEn(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < var_ptr_d->natom){ struct atom_struct *atom_ptr; atom_ptr = first_atom_ptr_d + i; #if defined MD || defined SLDH || defined SLDHL || defined SLDNC atom_ptr->f = vec_zero_d(); atom_ptr->pe = 0e0; atom_ptr->vir = 0e0; atom_ptr->stress11 = 0e0; atom_ptr->stress22 = 0e0; atom_ptr->stress33 = 0e0; atom_ptr->stress12 = 0e0; atom_ptr->stress23 = 0e0; atom_ptr->stress31 = 0e0; #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL atom_ptr->me = 0e0; atom_ptr->me0 = 0e0; #endif #ifdef localvol atom_ptr->sum_rij_m1 = 0e0; //Sum rij^-1 atom_ptr->sum_rij_m2 = 0e0; //Sum rij^-2 #endif } } __global__ void LP2ForEn(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, struct cell_struct *first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , double *bf_ptr_d, double *sf_ptr_d, double *pr_ptr_d , double *dbf_ptr_d, double *dsf_ptr_d, double *dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL ,double *Jij_ptr_d, double *dJij_ptr_d #endif #if defined SDHL || defined SLDHL ,double *LandauA_ptr_d, double *LandauB_ptr_d, double *LandauC_ptr_d, double *LandauD_ptr_d #endif #if defined SLDHL ,double *dLandauA_ptr_d, double *dLandauB_ptr_d, double *dLandauC_ptr_d, double *dLandauD_ptr_d #endif ) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < var_ptr_d->natom) inner_loop_d(var_ptr_d, first_atom_ptr_d + i, first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , bf_ptr_d, sf_ptr_d, pr_ptr_d , dbf_ptr_d, dsf_ptr_d, dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL , Jij_ptr_d, dJij_ptr_d #endif #if defined SDHL || defined SLDHL , LandauA_ptr_d, LandauB_ptr_d, LandauC_ptr_d, LandauD_ptr_d #endif #if defined SLDHL , dLandauA_ptr_d, dLandauB_ptr_d, dLandauC_ptr_d, dLandauD_ptr_d #endif ); } __global__ void LP3ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < var_ptr_d->natom){ struct atom_struct *atom_ptr; atom_ptr = first_atom_ptr_d + i; #ifdef localvol double local_radius = 0.5e0*atom_ptr->sum_rij_m1/atom_ptr->sum_rij_m2; atom_ptr->local_volume = 4e0*Pi_num/3e0*pow(local_radius, 3e0); //it is only an estimation!!! #else atom_ptr->local_volume = var_ptr_d->box_volume/var_ptr_d->natom; #endif } } #ifdef localvol __global__ void LP4ForEn_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *sum_volume_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; *(sum_volume_ptr_d + i) = 0.0; int area = blockDim.x*gridDim.x; int k = (var_ptr_d->natom - 1)/area + 1; for (int j = 0; j < k; ++j){ int m = i + j*area; if (m < var_ptr_d->natom) { *(sum_volume_ptr_d + i) += (first_atom_ptr_d + m)->local_volume; } } __syncthreads(); } __global__ void LP4ForEn_part2(double *sum_volume_ptr_d){ int depth = blockIdx.x*blockDim.x; if (threadIdx.x == 0){ for (int j = 1; j < blockDim.x; ++j) *(sum_volume_ptr_d + depth) += *(sum_volume_ptr_d + depth + j); } __threadfence(); if (blockIdx.x == 0 && threadIdx.x == 0){ for (int j = 1; j < gridDim.x; ++j) *sum_volume_ptr_d += *(sum_volume_ptr_d + j*blockDim.x); } } __global__ void LP5ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d, double volume_factor){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < var_ptr_d->natom) (first_atom_ptr_d + i)->local_volume *= volume_factor; } #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC __global__ void LP6ForEn(struct varGPU *var_ptr_d, struct atom_struct* first_atom_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < var_ptr_d->natom){ struct atom_struct *atom_ptr; atom_ptr = first_atom_ptr_d + i; atom_ptr->stress11 = (pow(atom_ptr->p.x,2)/var_ptr_d->atmass + atom_ptr->stress11/2e0)/atom_ptr->local_volume; atom_ptr->stress22 = (pow(atom_ptr->p.y,2)/var_ptr_d->atmass + atom_ptr->stress22/2e0)/atom_ptr->local_volume; atom_ptr->stress33 = (pow(atom_ptr->p.z,2)/var_ptr_d->atmass + atom_ptr->stress33/2e0)/atom_ptr->local_volume; atom_ptr->stress12 = ((atom_ptr->p.x*atom_ptr->p.y)/var_ptr_d->atmass + atom_ptr->stress12/2e0)/atom_ptr->local_volume; atom_ptr->stress23 = ((atom_ptr->p.y*atom_ptr->p.z)/var_ptr_d->atmass + atom_ptr->stress23/2e0)/atom_ptr->local_volume; atom_ptr->stress31 = ((atom_ptr->p.z*atom_ptr->p.x)/var_ptr_d->atmass + atom_ptr->stress31/2e0)/atom_ptr->local_volume; } } __global__ void LP7ForEn_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *virial_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; *(virial_ptr_d + i) = 0.0; int area = blockDim.x*gridDim.x; int k = (var_ptr_d->natom - 1)/area + 1; for (int j = 0; j < k; ++j){ int m = i + j*area; if (m < var_ptr_d->natom) { *(virial_ptr_d + i) += (first_atom_ptr_d + m)->vir; } } __syncthreads(); } __global__ void LP7ForEn_part2(double *virial_ptr_d){ int depth = blockIdx.x*blockDim.x; if (threadIdx.x == 0){ for (int j = 1; j < blockDim.x; ++j) *(virial_ptr_d + depth) += *(virial_ptr_d + depth + j); } __threadfence(); if (blockIdx.x == 0 && threadIdx.x == 0){ for (int j = 1; j < gridDim.x; ++j) *virial_ptr_d += *(virial_ptr_d + j*blockDim.x); } } #endif __device__ void inner_loop_d(struct varGPU *var_ptr_d, struct atom_struct *atom_ptr, struct cell_struct *first_cell_ptr_d #if defined MD || defined SLDH || defined SLDHL || defined SLDNC , double *bf_ptr_d, double *sf_ptr_d, double *pr_ptr_d , double *dbf_ptr_d, double *dsf_ptr_d, double *dpr_ptr_d #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL ,double *Jij_ptr_d, double *dJij_ptr_d #endif #if defined SDHL || defined SLDHL ,double *LandauA_ptr_d, double *LandauB_ptr_d, double *LandauC_ptr_d, double *LandauD_ptr_d #endif #if defined SLDHL ,double *dLandauA_ptr_d, double *dLandauB_ptr_d, double *dLandauC_ptr_d, double *dLandauD_ptr_d #endif ) { #if defined SDH || defined SDHL || defined SLDH || defined SLDHL || defined SLDNC double si_sq = vec_sq_d(atom_ptr->s); #endif struct atom_struct *work_ptr; struct cell_struct *ccell_ptr; struct cell_struct *wcell_ptr; ccell_ptr = first_cell_ptr_d + atom_ptr->new_cell_index; for (int i = 0; i <= 26; ++i){ if (i == 26) wcell_ptr = ccell_ptr; else wcell_ptr = first_cell_ptr_d + (ccell_ptr->neigh_cell[i]); work_ptr = wcell_ptr->head_ptr; while (work_ptr != NULL){ vector rij = vec_sub_d(atom_ptr->r, work_ptr->r); //find image of j closest to i find_image_d(rij, var_ptr_d); double rsq = vec_sq_d(rij); if (rsq < var_ptr_d->rcut_max_sq && atom_ptr != work_ptr){ double rij0 = sqrt(rsq); #ifdef localvol if (rij0 < var_ptr_d->rcut_vol){ atom_ptr->sum_rij_m1 += 1e0/rij0; atom_ptr->sum_rij_m2 += 1e0/rsq; } #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC double pair_enr = 0e0; double dudr = 0e0; #endif #if defined SLDH || defined SLDHL double dudr_spin = 0e0; #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL || defined SLDNC double sj_sq = vec_sq_d(work_ptr->s); #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC if (rij0 < var_ptr_d->rcut_pot){ double dsmallf_rij = dsmallf_d(rij0, dsf_ptr_d, var_ptr_d); dudr = (dbigf_d(atom_ptr->rho, dbf_ptr_d, var_ptr_d) + dbigf_d(work_ptr->rho, dbf_ptr_d, var_ptr_d))*dsmallf_rij + dpair_d(rij0, dpr_ptr_d, var_ptr_d); #if defined SLDHL dudr += (dLandauA_d(atom_ptr->rho, dLandauA_ptr_d, var_ptr_d)*si_sq + dLandauB_d(atom_ptr->rho, dLandauB_ptr_d, var_ptr_d)*pow(si_sq,2) + dLandauC_d(atom_ptr->rho, dLandauC_ptr_d, var_ptr_d)*pow(si_sq,3) + dLandauD_d(atom_ptr->rho, dLandauD_ptr_d, var_ptr_d)*pow(si_sq,4))*dsmallf_rij; dudr += (dLandauA_d(work_ptr->rho, dLandauA_ptr_d, var_ptr_d)*sj_sq + dLandauB_d(work_ptr->rho, dLandauB_ptr_d, var_ptr_d)*pow(sj_sq,2) + dLandauC_d(work_ptr->rho, dLandauC_ptr_d, var_ptr_d)*pow(sj_sq,3) + dLandauD_d(work_ptr->rho, dLandauD_ptr_d, var_ptr_d)*pow(sj_sq,4))*dsmallf_rij; #endif pair_enr = pair_d(rij0, pr_ptr_d, var_ptr_d); atom_ptr->pe += 0.5e0*pair_enr; } #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL if (rij0 < var_ptr_d->rcut_mag){ double si_dot_sj = vec_dot_d(atom_ptr->s, work_ptr->s); //Si.Sj double si_times_sj = vec_length_d(atom_ptr->s)*vec_length_d(work_ptr->s); //|Si|.|Sj| #if defined SLDH || defined SLDHL double dJijdr = dJij_d(rij0, dJij_ptr_d, var_ptr_d); dudr_spin = -dJijdr*(si_dot_sj - si_times_sj); // -dJdr_ij(Si dot Sj - |Si||Sj|) #endif double Jij_half = Jij_d(rij0, Jij_ptr_d, var_ptr_d)/2e0; double J_times = Jij_half*si_times_sj; double J_dot = -Jij_half*si_dot_sj; atom_ptr->me0 += J_times; atom_ptr->me += J_dot; } #if defined SLDH || defined SLDHL dudr += dudr_spin; #endif #endif #if defined MD || defined SLDH || defined SLDHL || defined SLDNC double force = -dudr/rij0; vector fij = vec_times_d(force, rij); atom_ptr->f = vec_add_d(atom_ptr->f, fij); atom_ptr->stress11 += fij.x*rij.x; atom_ptr->stress22 += fij.y*rij.y; atom_ptr->stress33 += fij.z*rij.z; atom_ptr->stress12 += fij.x*rij.y; atom_ptr->stress23 += fij.y*rij.z; atom_ptr->stress31 += fij.z*rij.x; atom_ptr->vir += -force*rsq/2e0; #endif } work_ptr = work_ptr->next_atom_ptr; } } #if defined MD || defined SLDH || defined SLDHL atom_ptr->pe += bigf_d(atom_ptr->rho, bf_ptr_d, var_ptr_d); #endif #if defined SDH || defined SDHL || defined SLDH || defined SLDHL #ifdef extfield atom_ptr->me -= vec_dot_d(atom_ptr->s, atom_ptr->Hext); #endif #ifdef SLDHL atom_ptr->me += LandauA_d(atom_ptr->rho, LandauA_ptr_d, var_ptr_d)*si_sq + LandauB_d(atom_ptr->rho, LandauB_ptr_d, var_ptr_d)*pow(si_sq,2) + LandauC_d(atom_ptr->rho, LandauC_ptr_d, var_ptr_d)*pow(si_sq,3) + LandauD_d(atom_ptr->rho, LandauD_ptr_d, var_ptr_d)*pow(si_sq,4); #endif #ifdef SDHL atom_ptr->me += LandauA_d(1, LandauA_ptr_d, var_ptr_d)*si_sq + LandauB_d(1, LandauB_ptr_d, var_ptr_d)*pow(si_sq,2) + LandauC_d(1, LandauC_ptr_d, var_ptr_d)*pow(si_sq,3) + LandauD_d(1, LandauD_ptr_d, var_ptr_d)*pow(si_sq,4); #endif #endif } #endif
7627f2a1e7afe99c3e907baab254cfb3abf3a91b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* The matrix addition example on CUDA This program first reads two matrices from two text files namely matA.txt and matB.txt Then it does matrix addition on the two matrices Finally it saves the answer matrix in a text file named ans.txt The program also measures the time taken for matrix operation on CUDA */ #include <stdio.h> //define file names here #define MATRIXA "matA.txt" #define MATRIXB "matB.txt" #define MATRIXANS "ans.txt" //define the sizes of the matrices here #define ROWS 16 #define COLS 16 #define SIZE ROWS*COLS //kernel that does the matrix addition. Just add each element to the respective one __global__ void addMatrix(int *ans_cuda,int *matA_cuda,int *matB_cuda){ int row = threadIdx.y; int col = threadIdx.x; int position = row*COLS + col; ans_cuda[position]=matA_cuda[position]+matB_cuda[position]; } int main(){ //open the files FILE *filematA = fopen(MATRIXA, "r"); FILE *filematB = fopen(MATRIXB, "r"); FILE *fileans = fopen(MATRIXANS, "w"); //allocate matrices int matA[SIZE]; int matB[SIZE]; int ans[SIZE]; //read the input matrices from file int row,col; for(row=0;row<ROWS;row++){ for(col=0;col<COLS;col++){ int position = row*COLS + col; fscanf(filematA, "%d", &matA[position]); fscanf(filematB, "%d", &matB[position]); ans[position]=0; } } /*************************CUDA STUFF STARTS HERE************************/ //variables for time measurements hipEvent_t start,stop; float elapsedtime; //pointers for cuda memory locations int *matA_cuda; int *matB_cuda; int *ans_cuda; //the moment at which we start measuring the time hipEventCreate(&start); hipEventRecord(start,0); //allocate memory in cuda hipMalloc((void **)&matA_cuda,sizeof(int)*SIZE); hipMalloc((void **)&matB_cuda,sizeof(int)*SIZE); hipMalloc((void **)&ans_cuda,sizeof(int)*SIZE); //copy contents from ram to cuda hipMemcpy(matA_cuda, matA, sizeof(int)*SIZE, hipMemcpyHostToDevice); hipMemcpy(matB_cuda, matB, sizeof(int)*SIZE, hipMemcpyHostToDevice); //thread configuration dim3 numBlocks(1,1); dim3 threadsPerBlock(COLS,ROWS); //do the matrix addition on CUDA hipLaunchKernelGGL(( addMatrix), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, ans_cuda,matA_cuda,matB_cuda); //copy the answer back hipMemcpy(ans, ans_cuda, sizeof(int)*SIZE, hipMemcpyDeviceToHost); //the moment at which we stop measuring time hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); //free the memory we allocated on CUDA hipFree(matA_cuda); hipFree(matB_cuda); hipFree(ans_cuda); /*************************CUDA STUFF ENDS HERE************************/ //write the answer to the text file for(row=0;row<ROWS;row++){ for(col=0;col<COLS;col++){ int position = row*COLS + col; fprintf(fileans, "%d ", ans[position]); } fprintf(fileans, "\n"); } //Find and print the elapsed time hipEventElapsedTime(&elapsedtime,start,stop); printf("Time spent for operation is %.10f seconds\n",elapsedtime/(float)1000); fclose(filematA); fclose(filematB); fclose(fileans); return 0; }
7627f2a1e7afe99c3e907baab254cfb3abf3a91b.cu
/* The matrix addition example on CUDA This program first reads two matrices from two text files namely matA.txt and matB.txt Then it does matrix addition on the two matrices Finally it saves the answer matrix in a text file named ans.txt The program also measures the time taken for matrix operation on CUDA */ #include <stdio.h> //define file names here #define MATRIXA "matA.txt" #define MATRIXB "matB.txt" #define MATRIXANS "ans.txt" //define the sizes of the matrices here #define ROWS 16 #define COLS 16 #define SIZE ROWS*COLS //kernel that does the matrix addition. Just add each element to the respective one __global__ void addMatrix(int *ans_cuda,int *matA_cuda,int *matB_cuda){ int row = threadIdx.y; int col = threadIdx.x; int position = row*COLS + col; ans_cuda[position]=matA_cuda[position]+matB_cuda[position]; } int main(){ //open the files FILE *filematA = fopen(MATRIXA, "r"); FILE *filematB = fopen(MATRIXB, "r"); FILE *fileans = fopen(MATRIXANS, "w"); //allocate matrices int matA[SIZE]; int matB[SIZE]; int ans[SIZE]; //read the input matrices from file int row,col; for(row=0;row<ROWS;row++){ for(col=0;col<COLS;col++){ int position = row*COLS + col; fscanf(filematA, "%d", &matA[position]); fscanf(filematB, "%d", &matB[position]); ans[position]=0; } } /*************************CUDA STUFF STARTS HERE************************/ //variables for time measurements cudaEvent_t start,stop; float elapsedtime; //pointers for cuda memory locations int *matA_cuda; int *matB_cuda; int *ans_cuda; //the moment at which we start measuring the time cudaEventCreate(&start); cudaEventRecord(start,0); //allocate memory in cuda cudaMalloc((void **)&matA_cuda,sizeof(int)*SIZE); cudaMalloc((void **)&matB_cuda,sizeof(int)*SIZE); cudaMalloc((void **)&ans_cuda,sizeof(int)*SIZE); //copy contents from ram to cuda cudaMemcpy(matA_cuda, matA, sizeof(int)*SIZE, cudaMemcpyHostToDevice); cudaMemcpy(matB_cuda, matB, sizeof(int)*SIZE, cudaMemcpyHostToDevice); //thread configuration dim3 numBlocks(1,1); dim3 threadsPerBlock(COLS,ROWS); //do the matrix addition on CUDA addMatrix<<<numBlocks,threadsPerBlock>>>(ans_cuda,matA_cuda,matB_cuda); //copy the answer back cudaMemcpy(ans, ans_cuda, sizeof(int)*SIZE, cudaMemcpyDeviceToHost); //the moment at which we stop measuring time cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); //free the memory we allocated on CUDA cudaFree(matA_cuda); cudaFree(matB_cuda); cudaFree(ans_cuda); /*************************CUDA STUFF ENDS HERE************************/ //write the answer to the text file for(row=0;row<ROWS;row++){ for(col=0;col<COLS;col++){ int position = row*COLS + col; fprintf(fileans, "%d ", ans[position]); } fprintf(fileans, "\n"); } //Find and print the elapsed time cudaEventElapsedTime(&elapsedtime,start,stop); printf("Time spent for operation is %.10f seconds\n",elapsedtime/(float)1000); fclose(filematA); fclose(filematB); fclose(fileans); return 0; }
9192471326f97ece0002427be0bdc88edcf88cae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/cpm/util/math_functions.hpp" #define NUMBER_THREADS_PER_BLOCK_1D 32 namespace caffe { __global__ void fill_image(const float* src_pointer, int w, int h, float* dst_pointer, int boxsize, const float* info, int p) { // get pixel location (x,y) within (boxsize, boxsize) int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if(x < boxsize && y < boxsize){ int xr_center = int(info[2*(p+1)] + 0.5); int yr_center = int(info[2*(p+1)+1] + 0.5); int x_src = xr_center - boxsize/2 + x; int y_src = yr_center - boxsize/2 + y; int offset_dst = boxsize * boxsize; int offset_src = w * h; if(x_src >= 0 && x_src < w && y_src >= 0 && y_src < h){ dst_pointer[ y * boxsize + x] = src_pointer[ y_src * w + x_src]; dst_pointer[offset_dst + y * boxsize + x] = src_pointer[offset_src + y_src * w + x_src]; dst_pointer[offset_dst * 2 + y * boxsize + x] = src_pointer[offset_src * 2 + y_src * w + x_src]; } else { dst_pointer[ y * boxsize + x] = 0; dst_pointer[offset_dst + y * boxsize + x] = 0; dst_pointer[offset_dst * 2 + y * boxsize + x] = 0; } } } __global__ void fill_gassian(float* dst_pointer, int boxsize, float sigma){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if(x < boxsize && y < boxsize){ float center_x, center_y; center_x = center_y = boxsize / 2; float d2 = (x - center_x) * (x - center_x) + (y - center_y) * (y - center_y); float exponent = d2 / 2.0 / sigma / sigma; if(exponent > 4.6052){ //ln(100) = -ln(1%) dst_pointer[y * boxsize + x] = 0; } else { dst_pointer[y * boxsize + x] = exp(-exponent); } } } void fill_pose_net(const float* image, int width, int height, float* dst, int boxsize, const float* peak_pointer_gpu, vector<int> num_people, int limit){ //image in width * height * 3 * N //dst in boxsize * boxsize * 4 * (P1+P2+...+PN) //peak_pointer_gpu in 2 * 11 * 1 * N //num_people has length P, indicating P1, ..., PN CHECK(0) << "FIX THIS FUNCTION"; int N = num_people.size(); int count = 0; bool full = false; int offset_src = width * height * 3; int offset_dst_2 = boxsize * boxsize; int offset_info = 22; dim3 threadsPerBlock(NUMBER_THREADS_PER_BLOCK_1D, NUMBER_THREADS_PER_BLOCK_1D); dim3 numBlocks(updiv(boxsize, threadsPerBlock.x), updiv(boxsize, threadsPerBlock.y)); for(int i = 0; i < N; i++){ //LOG(ERROR) << "copying " << num_people[i] << " people."; for(int p = 0; p < num_people[i]; p++){ hipLaunchKernelGGL(( fill_image), dim3(threadsPerBlock), dim3(numBlocks), 0, 0, image + i * offset_src, width, height, dst + count * (4 * offset_dst_2), boxsize, peak_pointer_gpu + i * offset_info, p); //src, w, h, dst, boxsize, info, p hipLaunchKernelGGL(( fill_gassian), dim3(threadsPerBlock), dim3(numBlocks), 0, 0, dst + count * (4 * offset_dst_2) + 3 * offset_dst_2, boxsize, 21); //dst, boxsize count++; if(count >= limit){ full = true; break; } } if(full) break; } hipDeviceSynchronize(); } } // namespace caffe
9192471326f97ece0002427be0bdc88edcf88cae.cu
#include "caffe/cpm/util/math_functions.hpp" #define NUMBER_THREADS_PER_BLOCK_1D 32 namespace caffe { __global__ void fill_image(const float* src_pointer, int w, int h, float* dst_pointer, int boxsize, const float* info, int p) { // get pixel location (x,y) within (boxsize, boxsize) int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if(x < boxsize && y < boxsize){ int xr_center = int(info[2*(p+1)] + 0.5); int yr_center = int(info[2*(p+1)+1] + 0.5); int x_src = xr_center - boxsize/2 + x; int y_src = yr_center - boxsize/2 + y; int offset_dst = boxsize * boxsize; int offset_src = w * h; if(x_src >= 0 && x_src < w && y_src >= 0 && y_src < h){ dst_pointer[ y * boxsize + x] = src_pointer[ y_src * w + x_src]; dst_pointer[offset_dst + y * boxsize + x] = src_pointer[offset_src + y_src * w + x_src]; dst_pointer[offset_dst * 2 + y * boxsize + x] = src_pointer[offset_src * 2 + y_src * w + x_src]; } else { dst_pointer[ y * boxsize + x] = 0; dst_pointer[offset_dst + y * boxsize + x] = 0; dst_pointer[offset_dst * 2 + y * boxsize + x] = 0; } } } __global__ void fill_gassian(float* dst_pointer, int boxsize, float sigma){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if(x < boxsize && y < boxsize){ float center_x, center_y; center_x = center_y = boxsize / 2; float d2 = (x - center_x) * (x - center_x) + (y - center_y) * (y - center_y); float exponent = d2 / 2.0 / sigma / sigma; if(exponent > 4.6052){ //ln(100) = -ln(1%) dst_pointer[y * boxsize + x] = 0; } else { dst_pointer[y * boxsize + x] = exp(-exponent); } } } void fill_pose_net(const float* image, int width, int height, float* dst, int boxsize, const float* peak_pointer_gpu, vector<int> num_people, int limit){ //image in width * height * 3 * N //dst in boxsize * boxsize * 4 * (P1+P2+...+PN) //peak_pointer_gpu in 2 * 11 * 1 * N //num_people has length P, indicating P1, ..., PN CHECK(0) << "FIX THIS FUNCTION"; int N = num_people.size(); int count = 0; bool full = false; int offset_src = width * height * 3; int offset_dst_2 = boxsize * boxsize; int offset_info = 22; dim3 threadsPerBlock(NUMBER_THREADS_PER_BLOCK_1D, NUMBER_THREADS_PER_BLOCK_1D); dim3 numBlocks(updiv(boxsize, threadsPerBlock.x), updiv(boxsize, threadsPerBlock.y)); for(int i = 0; i < N; i++){ //LOG(ERROR) << "copying " << num_people[i] << " people."; for(int p = 0; p < num_people[i]; p++){ fill_image<<<threadsPerBlock, numBlocks>>>(image + i * offset_src, width, height, dst + count * (4 * offset_dst_2), boxsize, peak_pointer_gpu + i * offset_info, p); //src, w, h, dst, boxsize, info, p fill_gassian<<<threadsPerBlock, numBlocks>>>(dst + count * (4 * offset_dst_2) + 3 * offset_dst_2, boxsize, 21); //dst, boxsize count++; if(count >= limit){ full = true; break; } } if(full) break; } cudaDeviceSynchronize(); } } // namespace caffe
a847ec2d7d19381ea08b0349d8acf60a4ce3e136.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "des_kernel_encrypt.h" #include "des_kernel_salt_instances.h" #ifdef DESGPU_COMPILE_ALL_SALTS void des_25_encrypt_salt3200(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 20, 5, 6, 23, 24, 15, 16, 17, 18, 19, 20, 19, 4, 21, 22, 7, 8, 63, 32, 33, 34, 35, 36, 35, 52, 37, 38, 55, 56, 47, 48, 49, 50, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3201(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 20, 5, 6, 23, 24, 31, 16, 17, 18, 19, 20, 19, 4, 21, 22, 7, 8, 47, 32, 33, 34, 35, 36, 35, 52, 37, 38, 55, 56, 63, 48, 49, 50, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3202(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 20, 5, 6, 23, 24, 15, 0, 17, 18, 19, 20, 19, 4, 21, 22, 7, 8, 63, 48, 33, 34, 35, 36, 35, 52, 37, 38, 55, 56, 47, 32, 49, 50, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3203(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 20, 5, 6, 23, 24, 31, 0, 17, 18, 19, 20, 19, 4, 21, 22, 7, 8, 47, 48, 33, 34, 35, 36, 35, 52, 37, 38, 55, 56, 63, 32, 49, 50, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3204(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 20, 5, 6, 23, 24, 15, 16, 1, 18, 19, 20, 19, 4, 21, 22, 7, 8, 63, 32, 49, 34, 35, 36, 35, 52, 37, 38, 55, 56, 47, 48, 33, 50, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3205(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 20, 5, 6, 23, 24, 31, 16, 1, 18, 19, 20, 19, 4, 21, 22, 7, 8, 47, 32, 49, 34, 35, 36, 35, 52, 37, 38, 55, 56, 63, 48, 33, 50, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3206(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 20, 5, 6, 23, 24, 15, 0, 1, 18, 19, 20, 19, 4, 21, 22, 7, 8, 63, 48, 49, 34, 35, 36, 35, 52, 37, 38, 55, 56, 47, 32, 33, 50, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3207(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 20, 5, 6, 23, 24, 31, 0, 1, 18, 19, 20, 19, 4, 21, 22, 7, 8, 47, 48, 49, 34, 35, 36, 35, 52, 37, 38, 55, 56, 63, 32, 33, 50, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3208(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 20, 5, 6, 23, 24, 15, 16, 17, 2, 19, 20, 19, 4, 21, 22, 7, 8, 63, 32, 33, 50, 35, 36, 35, 52, 37, 38, 55, 56, 47, 48, 49, 34, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3209(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 20, 5, 6, 23, 24, 31, 16, 17, 2, 19, 20, 19, 4, 21, 22, 7, 8, 47, 32, 33, 50, 35, 36, 35, 52, 37, 38, 55, 56, 63, 48, 49, 34, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3210(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 20, 5, 6, 23, 24, 15, 0, 17, 2, 19, 20, 19, 4, 21, 22, 7, 8, 63, 48, 33, 50, 35, 36, 35, 52, 37, 38, 55, 56, 47, 32, 49, 34, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3211(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 20, 5, 6, 23, 24, 31, 0, 17, 2, 19, 20, 19, 4, 21, 22, 7, 8, 47, 48, 33, 50, 35, 36, 35, 52, 37, 38, 55, 56, 63, 32, 49, 34, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3212(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 20, 5, 6, 23, 24, 15, 16, 1, 2, 19, 20, 19, 4, 21, 22, 7, 8, 63, 32, 49, 50, 35, 36, 35, 52, 37, 38, 55, 56, 47, 48, 33, 34, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3213(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 20, 5, 6, 23, 24, 31, 16, 1, 2, 19, 20, 19, 4, 21, 22, 7, 8, 47, 32, 49, 50, 35, 36, 35, 52, 37, 38, 55, 56, 63, 48, 33, 34, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3214(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 20, 5, 6, 23, 24, 15, 0, 1, 2, 19, 20, 19, 4, 21, 22, 7, 8, 63, 48, 49, 50, 35, 36, 35, 52, 37, 38, 55, 56, 47, 32, 33, 34, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3215(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 20, 5, 6, 23, 24, 31, 0, 1, 2, 19, 20, 19, 4, 21, 22, 7, 8, 47, 48, 49, 50, 35, 36, 35, 52, 37, 38, 55, 56, 63, 32, 33, 34, 51, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3216(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 20, 5, 6, 23, 24, 15, 16, 17, 18, 3, 20, 19, 4, 21, 22, 7, 8, 63, 32, 33, 34, 51, 36, 35, 52, 37, 38, 55, 56, 47, 48, 49, 50, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3217(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 20, 5, 6, 23, 24, 31, 16, 17, 18, 3, 20, 19, 4, 21, 22, 7, 8, 47, 32, 33, 34, 51, 36, 35, 52, 37, 38, 55, 56, 63, 48, 49, 50, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3218(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 20, 5, 6, 23, 24, 15, 0, 17, 18, 3, 20, 19, 4, 21, 22, 7, 8, 63, 48, 33, 34, 51, 36, 35, 52, 37, 38, 55, 56, 47, 32, 49, 50, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3219(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 20, 5, 6, 23, 24, 31, 0, 17, 18, 3, 20, 19, 4, 21, 22, 7, 8, 47, 48, 33, 34, 51, 36, 35, 52, 37, 38, 55, 56, 63, 32, 49, 50, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3220(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 20, 5, 6, 23, 24, 15, 16, 1, 18, 3, 20, 19, 4, 21, 22, 7, 8, 63, 32, 49, 34, 51, 36, 35, 52, 37, 38, 55, 56, 47, 48, 33, 50, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3221(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 20, 5, 6, 23, 24, 31, 16, 1, 18, 3, 20, 19, 4, 21, 22, 7, 8, 47, 32, 49, 34, 51, 36, 35, 52, 37, 38, 55, 56, 63, 48, 33, 50, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3222(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 20, 5, 6, 23, 24, 15, 0, 1, 18, 3, 20, 19, 4, 21, 22, 7, 8, 63, 48, 49, 34, 51, 36, 35, 52, 37, 38, 55, 56, 47, 32, 33, 50, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3223(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 20, 5, 6, 23, 24, 31, 0, 1, 18, 3, 20, 19, 4, 21, 22, 7, 8, 47, 48, 49, 34, 51, 36, 35, 52, 37, 38, 55, 56, 63, 32, 33, 50, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3224(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 20, 5, 6, 23, 24, 15, 16, 17, 2, 3, 20, 19, 4, 21, 22, 7, 8, 63, 32, 33, 50, 51, 36, 35, 52, 37, 38, 55, 56, 47, 48, 49, 34, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3225(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 20, 5, 6, 23, 24, 31, 16, 17, 2, 3, 20, 19, 4, 21, 22, 7, 8, 47, 32, 33, 50, 51, 36, 35, 52, 37, 38, 55, 56, 63, 48, 49, 34, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3226(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 20, 5, 6, 23, 24, 15, 0, 17, 2, 3, 20, 19, 4, 21, 22, 7, 8, 63, 48, 33, 50, 51, 36, 35, 52, 37, 38, 55, 56, 47, 32, 49, 34, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3227(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 20, 5, 6, 23, 24, 31, 0, 17, 2, 3, 20, 19, 4, 21, 22, 7, 8, 47, 48, 33, 50, 51, 36, 35, 52, 37, 38, 55, 56, 63, 32, 49, 34, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3228(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 20, 5, 6, 23, 24, 15, 16, 1, 2, 3, 20, 19, 4, 21, 22, 7, 8, 63, 32, 49, 50, 51, 36, 35, 52, 37, 38, 55, 56, 47, 48, 33, 34, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3229(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 20, 5, 6, 23, 24, 31, 16, 1, 2, 3, 20, 19, 4, 21, 22, 7, 8, 47, 32, 49, 50, 51, 36, 35, 52, 37, 38, 55, 56, 63, 48, 33, 34, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3230(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 20, 5, 6, 23, 24, 15, 0, 1, 2, 3, 20, 19, 4, 21, 22, 7, 8, 63, 48, 49, 50, 51, 36, 35, 52, 37, 38, 55, 56, 47, 32, 33, 34, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3231(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 20, 5, 6, 23, 24, 31, 0, 1, 2, 3, 20, 19, 4, 21, 22, 7, 8, 47, 48, 49, 50, 51, 36, 35, 52, 37, 38, 55, 56, 63, 32, 33, 34, 35, 52, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3232(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 20, 5, 6, 23, 24, 15, 16, 17, 18, 19, 4, 19, 4, 21, 22, 7, 8, 63, 32, 33, 34, 35, 52, 35, 52, 37, 38, 55, 56, 47, 48, 49, 50, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3233(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 20, 5, 6, 23, 24, 31, 16, 17, 18, 19, 4, 19, 4, 21, 22, 7, 8, 47, 32, 33, 34, 35, 52, 35, 52, 37, 38, 55, 56, 63, 48, 49, 50, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3234(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 20, 5, 6, 23, 24, 15, 0, 17, 18, 19, 4, 19, 4, 21, 22, 7, 8, 63, 48, 33, 34, 35, 52, 35, 52, 37, 38, 55, 56, 47, 32, 49, 50, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3235(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 20, 5, 6, 23, 24, 31, 0, 17, 18, 19, 4, 19, 4, 21, 22, 7, 8, 47, 48, 33, 34, 35, 52, 35, 52, 37, 38, 55, 56, 63, 32, 49, 50, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3236(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 20, 5, 6, 23, 24, 15, 16, 1, 18, 19, 4, 19, 4, 21, 22, 7, 8, 63, 32, 49, 34, 35, 52, 35, 52, 37, 38, 55, 56, 47, 48, 33, 50, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3237(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 20, 5, 6, 23, 24, 31, 16, 1, 18, 19, 4, 19, 4, 21, 22, 7, 8, 47, 32, 49, 34, 35, 52, 35, 52, 37, 38, 55, 56, 63, 48, 33, 50, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3238(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 20, 5, 6, 23, 24, 15, 0, 1, 18, 19, 4, 19, 4, 21, 22, 7, 8, 63, 48, 49, 34, 35, 52, 35, 52, 37, 38, 55, 56, 47, 32, 33, 50, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3239(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 20, 5, 6, 23, 24, 31, 0, 1, 18, 19, 4, 19, 4, 21, 22, 7, 8, 47, 48, 49, 34, 35, 52, 35, 52, 37, 38, 55, 56, 63, 32, 33, 50, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3240(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 20, 5, 6, 23, 24, 15, 16, 17, 2, 19, 4, 19, 4, 21, 22, 7, 8, 63, 32, 33, 50, 35, 52, 35, 52, 37, 38, 55, 56, 47, 48, 49, 34, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3241(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 20, 5, 6, 23, 24, 31, 16, 17, 2, 19, 4, 19, 4, 21, 22, 7, 8, 47, 32, 33, 50, 35, 52, 35, 52, 37, 38, 55, 56, 63, 48, 49, 34, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3242(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 20, 5, 6, 23, 24, 15, 0, 17, 2, 19, 4, 19, 4, 21, 22, 7, 8, 63, 48, 33, 50, 35, 52, 35, 52, 37, 38, 55, 56, 47, 32, 49, 34, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3243(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 20, 5, 6, 23, 24, 31, 0, 17, 2, 19, 4, 19, 4, 21, 22, 7, 8, 47, 48, 33, 50, 35, 52, 35, 52, 37, 38, 55, 56, 63, 32, 49, 34, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3244(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 20, 5, 6, 23, 24, 15, 16, 1, 2, 19, 4, 19, 4, 21, 22, 7, 8, 63, 32, 49, 50, 35, 52, 35, 52, 37, 38, 55, 56, 47, 48, 33, 34, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3245(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 20, 5, 6, 23, 24, 31, 16, 1, 2, 19, 4, 19, 4, 21, 22, 7, 8, 47, 32, 49, 50, 35, 52, 35, 52, 37, 38, 55, 56, 63, 48, 33, 34, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3246(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 20, 5, 6, 23, 24, 15, 0, 1, 2, 19, 4, 19, 4, 21, 22, 7, 8, 63, 48, 49, 50, 35, 52, 35, 52, 37, 38, 55, 56, 47, 32, 33, 34, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3247(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 20, 5, 6, 23, 24, 31, 0, 1, 2, 19, 4, 19, 4, 21, 22, 7, 8, 47, 48, 49, 50, 35, 52, 35, 52, 37, 38, 55, 56, 63, 32, 33, 34, 51, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3248(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 20, 5, 6, 23, 24, 15, 16, 17, 18, 3, 4, 19, 4, 21, 22, 7, 8, 63, 32, 33, 34, 51, 52, 35, 52, 37, 38, 55, 56, 47, 48, 49, 50, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3249(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 20, 5, 6, 23, 24, 31, 16, 17, 18, 3, 4, 19, 4, 21, 22, 7, 8, 47, 32, 33, 34, 51, 52, 35, 52, 37, 38, 55, 56, 63, 48, 49, 50, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3250(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 20, 5, 6, 23, 24, 15, 0, 17, 18, 3, 4, 19, 4, 21, 22, 7, 8, 63, 48, 33, 34, 51, 52, 35, 52, 37, 38, 55, 56, 47, 32, 49, 50, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3251(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 20, 5, 6, 23, 24, 31, 0, 17, 18, 3, 4, 19, 4, 21, 22, 7, 8, 47, 48, 33, 34, 51, 52, 35, 52, 37, 38, 55, 56, 63, 32, 49, 50, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3252(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 20, 5, 6, 23, 24, 15, 16, 1, 18, 3, 4, 19, 4, 21, 22, 7, 8, 63, 32, 49, 34, 51, 52, 35, 52, 37, 38, 55, 56, 47, 48, 33, 50, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3253(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 20, 5, 6, 23, 24, 31, 16, 1, 18, 3, 4, 19, 4, 21, 22, 7, 8, 47, 32, 49, 34, 51, 52, 35, 52, 37, 38, 55, 56, 63, 48, 33, 50, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3254(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 20, 5, 6, 23, 24, 15, 0, 1, 18, 3, 4, 19, 4, 21, 22, 7, 8, 63, 48, 49, 34, 51, 52, 35, 52, 37, 38, 55, 56, 47, 32, 33, 50, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3255(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 20, 5, 6, 23, 24, 31, 0, 1, 18, 3, 4, 19, 4, 21, 22, 7, 8, 47, 48, 49, 34, 51, 52, 35, 52, 37, 38, 55, 56, 63, 32, 33, 50, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3256(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 20, 5, 6, 23, 24, 15, 16, 17, 2, 3, 4, 19, 4, 21, 22, 7, 8, 63, 32, 33, 50, 51, 52, 35, 52, 37, 38, 55, 56, 47, 48, 49, 34, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3257(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 20, 5, 6, 23, 24, 31, 16, 17, 2, 3, 4, 19, 4, 21, 22, 7, 8, 47, 32, 33, 50, 51, 52, 35, 52, 37, 38, 55, 56, 63, 48, 49, 34, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3258(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 20, 5, 6, 23, 24, 15, 0, 17, 2, 3, 4, 19, 4, 21, 22, 7, 8, 63, 48, 33, 50, 51, 52, 35, 52, 37, 38, 55, 56, 47, 32, 49, 34, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3259(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 20, 5, 6, 23, 24, 31, 0, 17, 2, 3, 4, 19, 4, 21, 22, 7, 8, 47, 48, 33, 50, 51, 52, 35, 52, 37, 38, 55, 56, 63, 32, 49, 34, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3260(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 20, 5, 6, 23, 24, 15, 16, 1, 2, 3, 4, 19, 4, 21, 22, 7, 8, 63, 32, 49, 50, 51, 52, 35, 52, 37, 38, 55, 56, 47, 48, 33, 34, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3261(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 20, 5, 6, 23, 24, 31, 16, 1, 2, 3, 4, 19, 4, 21, 22, 7, 8, 47, 32, 49, 50, 51, 52, 35, 52, 37, 38, 55, 56, 63, 48, 33, 34, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3262(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 20, 5, 6, 23, 24, 15, 0, 1, 2, 3, 4, 19, 4, 21, 22, 7, 8, 63, 48, 49, 50, 51, 52, 35, 52, 37, 38, 55, 56, 47, 32, 33, 34, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3263(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 20, 5, 6, 23, 24, 31, 0, 1, 2, 3, 4, 19, 4, 21, 22, 7, 8, 47, 48, 49, 50, 51, 52, 35, 52, 37, 38, 55, 56, 63, 32, 33, 34, 35, 36, 51, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3264(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 20, 5, 6, 23, 24, 15, 16, 17, 18, 19, 20, 3, 4, 21, 22, 7, 8, 63, 32, 33, 34, 35, 36, 51, 52, 37, 38, 55, 56, 47, 48, 49, 50, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3265(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 20, 5, 6, 23, 24, 31, 16, 17, 18, 19, 20, 3, 4, 21, 22, 7, 8, 47, 32, 33, 34, 35, 36, 51, 52, 37, 38, 55, 56, 63, 48, 49, 50, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3266(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 20, 5, 6, 23, 24, 15, 0, 17, 18, 19, 20, 3, 4, 21, 22, 7, 8, 63, 48, 33, 34, 35, 36, 51, 52, 37, 38, 55, 56, 47, 32, 49, 50, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3267(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 20, 5, 6, 23, 24, 31, 0, 17, 18, 19, 20, 3, 4, 21, 22, 7, 8, 47, 48, 33, 34, 35, 36, 51, 52, 37, 38, 55, 56, 63, 32, 49, 50, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3268(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 20, 5, 6, 23, 24, 15, 16, 1, 18, 19, 20, 3, 4, 21, 22, 7, 8, 63, 32, 49, 34, 35, 36, 51, 52, 37, 38, 55, 56, 47, 48, 33, 50, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3269(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 20, 5, 6, 23, 24, 31, 16, 1, 18, 19, 20, 3, 4, 21, 22, 7, 8, 47, 32, 49, 34, 35, 36, 51, 52, 37, 38, 55, 56, 63, 48, 33, 50, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3270(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 20, 5, 6, 23, 24, 15, 0, 1, 18, 19, 20, 3, 4, 21, 22, 7, 8, 63, 48, 49, 34, 35, 36, 51, 52, 37, 38, 55, 56, 47, 32, 33, 50, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3271(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 20, 5, 6, 23, 24, 31, 0, 1, 18, 19, 20, 3, 4, 21, 22, 7, 8, 47, 48, 49, 34, 35, 36, 51, 52, 37, 38, 55, 56, 63, 32, 33, 50, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3272(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 20, 5, 6, 23, 24, 15, 16, 17, 2, 19, 20, 3, 4, 21, 22, 7, 8, 63, 32, 33, 50, 35, 36, 51, 52, 37, 38, 55, 56, 47, 48, 49, 34, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3273(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 20, 5, 6, 23, 24, 31, 16, 17, 2, 19, 20, 3, 4, 21, 22, 7, 8, 47, 32, 33, 50, 35, 36, 51, 52, 37, 38, 55, 56, 63, 48, 49, 34, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3274(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 20, 5, 6, 23, 24, 15, 0, 17, 2, 19, 20, 3, 4, 21, 22, 7, 8, 63, 48, 33, 50, 35, 36, 51, 52, 37, 38, 55, 56, 47, 32, 49, 34, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3275(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 20, 5, 6, 23, 24, 31, 0, 17, 2, 19, 20, 3, 4, 21, 22, 7, 8, 47, 48, 33, 50, 35, 36, 51, 52, 37, 38, 55, 56, 63, 32, 49, 34, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3276(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 20, 5, 6, 23, 24, 15, 16, 1, 2, 19, 20, 3, 4, 21, 22, 7, 8, 63, 32, 49, 50, 35, 36, 51, 52, 37, 38, 55, 56, 47, 48, 33, 34, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3277(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 20, 5, 6, 23, 24, 31, 16, 1, 2, 19, 20, 3, 4, 21, 22, 7, 8, 47, 32, 49, 50, 35, 36, 51, 52, 37, 38, 55, 56, 63, 48, 33, 34, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3278(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 20, 5, 6, 23, 24, 15, 0, 1, 2, 19, 20, 3, 4, 21, 22, 7, 8, 63, 48, 49, 50, 35, 36, 51, 52, 37, 38, 55, 56, 47, 32, 33, 34, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3279(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 20, 5, 6, 23, 24, 31, 0, 1, 2, 19, 20, 3, 4, 21, 22, 7, 8, 47, 48, 49, 50, 35, 36, 51, 52, 37, 38, 55, 56, 63, 32, 33, 34, 51, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3280(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 20, 5, 6, 23, 24, 15, 16, 17, 18, 3, 20, 3, 4, 21, 22, 7, 8, 63, 32, 33, 34, 51, 36, 51, 52, 37, 38, 55, 56, 47, 48, 49, 50, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3281(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 20, 5, 6, 23, 24, 31, 16, 17, 18, 3, 20, 3, 4, 21, 22, 7, 8, 47, 32, 33, 34, 51, 36, 51, 52, 37, 38, 55, 56, 63, 48, 49, 50, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3282(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 20, 5, 6, 23, 24, 15, 0, 17, 18, 3, 20, 3, 4, 21, 22, 7, 8, 63, 48, 33, 34, 51, 36, 51, 52, 37, 38, 55, 56, 47, 32, 49, 50, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3283(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 20, 5, 6, 23, 24, 31, 0, 17, 18, 3, 20, 3, 4, 21, 22, 7, 8, 47, 48, 33, 34, 51, 36, 51, 52, 37, 38, 55, 56, 63, 32, 49, 50, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3284(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 20, 5, 6, 23, 24, 15, 16, 1, 18, 3, 20, 3, 4, 21, 22, 7, 8, 63, 32, 49, 34, 51, 36, 51, 52, 37, 38, 55, 56, 47, 48, 33, 50, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3285(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 20, 5, 6, 23, 24, 31, 16, 1, 18, 3, 20, 3, 4, 21, 22, 7, 8, 47, 32, 49, 34, 51, 36, 51, 52, 37, 38, 55, 56, 63, 48, 33, 50, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3286(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 20, 5, 6, 23, 24, 15, 0, 1, 18, 3, 20, 3, 4, 21, 22, 7, 8, 63, 48, 49, 34, 51, 36, 51, 52, 37, 38, 55, 56, 47, 32, 33, 50, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3287(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 20, 5, 6, 23, 24, 31, 0, 1, 18, 3, 20, 3, 4, 21, 22, 7, 8, 47, 48, 49, 34, 51, 36, 51, 52, 37, 38, 55, 56, 63, 32, 33, 50, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3288(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 20, 5, 6, 23, 24, 15, 16, 17, 2, 3, 20, 3, 4, 21, 22, 7, 8, 63, 32, 33, 50, 51, 36, 51, 52, 37, 38, 55, 56, 47, 48, 49, 34, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3289(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 20, 5, 6, 23, 24, 31, 16, 17, 2, 3, 20, 3, 4, 21, 22, 7, 8, 47, 32, 33, 50, 51, 36, 51, 52, 37, 38, 55, 56, 63, 48, 49, 34, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3290(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 20, 5, 6, 23, 24, 15, 0, 17, 2, 3, 20, 3, 4, 21, 22, 7, 8, 63, 48, 33, 50, 51, 36, 51, 52, 37, 38, 55, 56, 47, 32, 49, 34, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3291(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 20, 5, 6, 23, 24, 31, 0, 17, 2, 3, 20, 3, 4, 21, 22, 7, 8, 47, 48, 33, 50, 51, 36, 51, 52, 37, 38, 55, 56, 63, 32, 49, 34, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3292(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 20, 5, 6, 23, 24, 15, 16, 1, 2, 3, 20, 3, 4, 21, 22, 7, 8, 63, 32, 49, 50, 51, 36, 51, 52, 37, 38, 55, 56, 47, 48, 33, 34, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3293(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 20, 5, 6, 23, 24, 31, 16, 1, 2, 3, 20, 3, 4, 21, 22, 7, 8, 47, 32, 49, 50, 51, 36, 51, 52, 37, 38, 55, 56, 63, 48, 33, 34, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3294(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 20, 5, 6, 23, 24, 15, 0, 1, 2, 3, 20, 3, 4, 21, 22, 7, 8, 63, 48, 49, 50, 51, 36, 51, 52, 37, 38, 55, 56, 47, 32, 33, 34, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3295(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 20, 5, 6, 23, 24, 31, 0, 1, 2, 3, 20, 3, 4, 21, 22, 7, 8, 47, 48, 49, 50, 51, 36, 51, 52, 37, 38, 55, 56, 63, 32, 33, 34, 35, 52, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3296(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 20, 5, 6, 23, 24, 15, 16, 17, 18, 19, 4, 3, 4, 21, 22, 7, 8, 63, 32, 33, 34, 35, 52, 51, 52, 37, 38, 55, 56, 47, 48, 49, 50, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3297(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 20, 5, 6, 23, 24, 31, 16, 17, 18, 19, 4, 3, 4, 21, 22, 7, 8, 47, 32, 33, 34, 35, 52, 51, 52, 37, 38, 55, 56, 63, 48, 49, 50, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3298(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 20, 5, 6, 23, 24, 15, 0, 17, 18, 19, 4, 3, 4, 21, 22, 7, 8, 63, 48, 33, 34, 35, 52, 51, 52, 37, 38, 55, 56, 47, 32, 49, 50, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3299(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 20, 5, 6, 23, 24, 31, 0, 17, 18, 19, 4, 3, 4, 21, 22, 7, 8, 47, 48, 33, 34, 35, 52, 51, 52, 37, 38, 55, 56, 63, 32, 49, 50, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3300(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 20, 5, 6, 23, 24, 15, 16, 1, 18, 19, 4, 3, 4, 21, 22, 7, 8, 63, 32, 49, 34, 35, 52, 51, 52, 37, 38, 55, 56, 47, 48, 33, 50, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3301(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 20, 5, 6, 23, 24, 31, 16, 1, 18, 19, 4, 3, 4, 21, 22, 7, 8, 47, 32, 49, 34, 35, 52, 51, 52, 37, 38, 55, 56, 63, 48, 33, 50, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3302(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 20, 5, 6, 23, 24, 15, 0, 1, 18, 19, 4, 3, 4, 21, 22, 7, 8, 63, 48, 49, 34, 35, 52, 51, 52, 37, 38, 55, 56, 47, 32, 33, 50, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3303(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 20, 5, 6, 23, 24, 31, 0, 1, 18, 19, 4, 3, 4, 21, 22, 7, 8, 47, 48, 49, 34, 35, 52, 51, 52, 37, 38, 55, 56, 63, 32, 33, 50, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3304(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 20, 5, 6, 23, 24, 15, 16, 17, 2, 19, 4, 3, 4, 21, 22, 7, 8, 63, 32, 33, 50, 35, 52, 51, 52, 37, 38, 55, 56, 47, 48, 49, 34, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3305(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 20, 5, 6, 23, 24, 31, 16, 17, 2, 19, 4, 3, 4, 21, 22, 7, 8, 47, 32, 33, 50, 35, 52, 51, 52, 37, 38, 55, 56, 63, 48, 49, 34, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3306(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 20, 5, 6, 23, 24, 15, 0, 17, 2, 19, 4, 3, 4, 21, 22, 7, 8, 63, 48, 33, 50, 35, 52, 51, 52, 37, 38, 55, 56, 47, 32, 49, 34, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3307(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 20, 5, 6, 23, 24, 31, 0, 17, 2, 19, 4, 3, 4, 21, 22, 7, 8, 47, 48, 33, 50, 35, 52, 51, 52, 37, 38, 55, 56, 63, 32, 49, 34, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3308(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 20, 5, 6, 23, 24, 15, 16, 1, 2, 19, 4, 3, 4, 21, 22, 7, 8, 63, 32, 49, 50, 35, 52, 51, 52, 37, 38, 55, 56, 47, 48, 33, 34, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3309(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 20, 5, 6, 23, 24, 31, 16, 1, 2, 19, 4, 3, 4, 21, 22, 7, 8, 47, 32, 49, 50, 35, 52, 51, 52, 37, 38, 55, 56, 63, 48, 33, 34, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3310(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 20, 5, 6, 23, 24, 15, 0, 1, 2, 19, 4, 3, 4, 21, 22, 7, 8, 63, 48, 49, 50, 35, 52, 51, 52, 37, 38, 55, 56, 47, 32, 33, 34, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3311(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 20, 5, 6, 23, 24, 31, 0, 1, 2, 19, 4, 3, 4, 21, 22, 7, 8, 47, 48, 49, 50, 35, 52, 51, 52, 37, 38, 55, 56, 63, 32, 33, 34, 51, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3312(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 20, 5, 6, 23, 24, 15, 16, 17, 18, 3, 4, 3, 4, 21, 22, 7, 8, 63, 32, 33, 34, 51, 52, 51, 52, 37, 38, 55, 56, 47, 48, 49, 50, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3313(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 20, 5, 6, 23, 24, 31, 16, 17, 18, 3, 4, 3, 4, 21, 22, 7, 8, 47, 32, 33, 34, 51, 52, 51, 52, 37, 38, 55, 56, 63, 48, 49, 50, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3314(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 20, 5, 6, 23, 24, 15, 0, 17, 18, 3, 4, 3, 4, 21, 22, 7, 8, 63, 48, 33, 34, 51, 52, 51, 52, 37, 38, 55, 56, 47, 32, 49, 50, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3315(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 20, 5, 6, 23, 24, 31, 0, 17, 18, 3, 4, 3, 4, 21, 22, 7, 8, 47, 48, 33, 34, 51, 52, 51, 52, 37, 38, 55, 56, 63, 32, 49, 50, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3316(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 20, 5, 6, 23, 24, 15, 16, 1, 18, 3, 4, 3, 4, 21, 22, 7, 8, 63, 32, 49, 34, 51, 52, 51, 52, 37, 38, 55, 56, 47, 48, 33, 50, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3317(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 20, 5, 6, 23, 24, 31, 16, 1, 18, 3, 4, 3, 4, 21, 22, 7, 8, 47, 32, 49, 34, 51, 52, 51, 52, 37, 38, 55, 56, 63, 48, 33, 50, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3318(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 20, 5, 6, 23, 24, 15, 0, 1, 18, 3, 4, 3, 4, 21, 22, 7, 8, 63, 48, 49, 34, 51, 52, 51, 52, 37, 38, 55, 56, 47, 32, 33, 50, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3319(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 20, 5, 6, 23, 24, 31, 0, 1, 18, 3, 4, 3, 4, 21, 22, 7, 8, 47, 48, 49, 34, 51, 52, 51, 52, 37, 38, 55, 56, 63, 32, 33, 50, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3320(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 20, 5, 6, 23, 24, 15, 16, 17, 2, 3, 4, 3, 4, 21, 22, 7, 8, 63, 32, 33, 50, 51, 52, 51, 52, 37, 38, 55, 56, 47, 48, 49, 34, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3321(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 20, 5, 6, 23, 24, 31, 16, 17, 2, 3, 4, 3, 4, 21, 22, 7, 8, 47, 32, 33, 50, 51, 52, 51, 52, 37, 38, 55, 56, 63, 48, 49, 34, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3322(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 20, 5, 6, 23, 24, 15, 0, 17, 2, 3, 4, 3, 4, 21, 22, 7, 8, 63, 48, 33, 50, 51, 52, 51, 52, 37, 38, 55, 56, 47, 32, 49, 34, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3323(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 20, 5, 6, 23, 24, 31, 0, 17, 2, 3, 4, 3, 4, 21, 22, 7, 8, 47, 48, 33, 50, 51, 52, 51, 52, 37, 38, 55, 56, 63, 32, 49, 34, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3324(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 20, 5, 6, 23, 24, 15, 16, 1, 2, 3, 4, 3, 4, 21, 22, 7, 8, 63, 32, 49, 50, 51, 52, 51, 52, 37, 38, 55, 56, 47, 48, 33, 34, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3325(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 20, 5, 6, 23, 24, 31, 16, 1, 2, 3, 4, 3, 4, 21, 22, 7, 8, 47, 32, 49, 50, 51, 52, 51, 52, 37, 38, 55, 56, 63, 48, 33, 34, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3326(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 20, 5, 6, 23, 24, 15, 0, 1, 2, 3, 4, 3, 4, 21, 22, 7, 8, 63, 48, 49, 50, 51, 52, 51, 52, 37, 38, 55, 56, 47, 32, 33, 34, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3327(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 20, 5, 6, 23, 24, 31, 0, 1, 2, 3, 4, 3, 4, 21, 22, 7, 8, 47, 48, 49, 50, 51, 52, 51, 52, 37, 38, 55, 56, 63, 32, 33, 34, 35, 36, 35, 36, 53, 54, 39, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } #endif // DESGPU_COMPILE_ALL_SALTS
a847ec2d7d19381ea08b0349d8acf60a4ce3e136.cu
#include "des_kernel_encrypt.h" #include "des_kernel_salt_instances.h" #ifdef DESGPU_COMPILE_ALL_SALTS void des_25_encrypt_salt3200(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 20, 5, 6, 23, 24, 15, 16, 17, 18, 19, 20, 19, 4, 21, 22, 7, 8, 63, 32, 33, 34, 35, 36, 35, 52, 37, 38, 55, 56, 47, 48, 49, 50, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3201(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 20, 5, 6, 23, 24, 31, 16, 17, 18, 19, 20, 19, 4, 21, 22, 7, 8, 47, 32, 33, 34, 35, 36, 35, 52, 37, 38, 55, 56, 63, 48, 49, 50, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3202(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 20, 5, 6, 23, 24, 15, 0, 17, 18, 19, 20, 19, 4, 21, 22, 7, 8, 63, 48, 33, 34, 35, 36, 35, 52, 37, 38, 55, 56, 47, 32, 49, 50, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3203(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 20, 5, 6, 23, 24, 31, 0, 17, 18, 19, 20, 19, 4, 21, 22, 7, 8, 47, 48, 33, 34, 35, 36, 35, 52, 37, 38, 55, 56, 63, 32, 49, 50, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3204(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 20, 5, 6, 23, 24, 15, 16, 1, 18, 19, 20, 19, 4, 21, 22, 7, 8, 63, 32, 49, 34, 35, 36, 35, 52, 37, 38, 55, 56, 47, 48, 33, 50, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3205(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 20, 5, 6, 23, 24, 31, 16, 1, 18, 19, 20, 19, 4, 21, 22, 7, 8, 47, 32, 49, 34, 35, 36, 35, 52, 37, 38, 55, 56, 63, 48, 33, 50, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3206(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 20, 5, 6, 23, 24, 15, 0, 1, 18, 19, 20, 19, 4, 21, 22, 7, 8, 63, 48, 49, 34, 35, 36, 35, 52, 37, 38, 55, 56, 47, 32, 33, 50, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3207(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 20, 5, 6, 23, 24, 31, 0, 1, 18, 19, 20, 19, 4, 21, 22, 7, 8, 47, 48, 49, 34, 35, 36, 35, 52, 37, 38, 55, 56, 63, 32, 33, 50, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3208(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 20, 5, 6, 23, 24, 15, 16, 17, 2, 19, 20, 19, 4, 21, 22, 7, 8, 63, 32, 33, 50, 35, 36, 35, 52, 37, 38, 55, 56, 47, 48, 49, 34, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3209(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 20, 5, 6, 23, 24, 31, 16, 17, 2, 19, 20, 19, 4, 21, 22, 7, 8, 47, 32, 33, 50, 35, 36, 35, 52, 37, 38, 55, 56, 63, 48, 49, 34, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3210(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 20, 5, 6, 23, 24, 15, 0, 17, 2, 19, 20, 19, 4, 21, 22, 7, 8, 63, 48, 33, 50, 35, 36, 35, 52, 37, 38, 55, 56, 47, 32, 49, 34, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3211(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 20, 5, 6, 23, 24, 31, 0, 17, 2, 19, 20, 19, 4, 21, 22, 7, 8, 47, 48, 33, 50, 35, 36, 35, 52, 37, 38, 55, 56, 63, 32, 49, 34, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3212(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 20, 5, 6, 23, 24, 15, 16, 1, 2, 19, 20, 19, 4, 21, 22, 7, 8, 63, 32, 49, 50, 35, 36, 35, 52, 37, 38, 55, 56, 47, 48, 33, 34, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3213(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 20, 5, 6, 23, 24, 31, 16, 1, 2, 19, 20, 19, 4, 21, 22, 7, 8, 47, 32, 49, 50, 35, 36, 35, 52, 37, 38, 55, 56, 63, 48, 33, 34, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3214(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 20, 5, 6, 23, 24, 15, 0, 1, 2, 19, 20, 19, 4, 21, 22, 7, 8, 63, 48, 49, 50, 35, 36, 35, 52, 37, 38, 55, 56, 47, 32, 33, 34, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3215(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 20, 5, 6, 23, 24, 31, 0, 1, 2, 19, 20, 19, 4, 21, 22, 7, 8, 47, 48, 49, 50, 35, 36, 35, 52, 37, 38, 55, 56, 63, 32, 33, 34, 51, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3216(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 20, 5, 6, 23, 24, 15, 16, 17, 18, 3, 20, 19, 4, 21, 22, 7, 8, 63, 32, 33, 34, 51, 36, 35, 52, 37, 38, 55, 56, 47, 48, 49, 50, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3217(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 20, 5, 6, 23, 24, 31, 16, 17, 18, 3, 20, 19, 4, 21, 22, 7, 8, 47, 32, 33, 34, 51, 36, 35, 52, 37, 38, 55, 56, 63, 48, 49, 50, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3218(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 20, 5, 6, 23, 24, 15, 0, 17, 18, 3, 20, 19, 4, 21, 22, 7, 8, 63, 48, 33, 34, 51, 36, 35, 52, 37, 38, 55, 56, 47, 32, 49, 50, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3219(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 20, 5, 6, 23, 24, 31, 0, 17, 18, 3, 20, 19, 4, 21, 22, 7, 8, 47, 48, 33, 34, 51, 36, 35, 52, 37, 38, 55, 56, 63, 32, 49, 50, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3220(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 20, 5, 6, 23, 24, 15, 16, 1, 18, 3, 20, 19, 4, 21, 22, 7, 8, 63, 32, 49, 34, 51, 36, 35, 52, 37, 38, 55, 56, 47, 48, 33, 50, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3221(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 20, 5, 6, 23, 24, 31, 16, 1, 18, 3, 20, 19, 4, 21, 22, 7, 8, 47, 32, 49, 34, 51, 36, 35, 52, 37, 38, 55, 56, 63, 48, 33, 50, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3222(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 20, 5, 6, 23, 24, 15, 0, 1, 18, 3, 20, 19, 4, 21, 22, 7, 8, 63, 48, 49, 34, 51, 36, 35, 52, 37, 38, 55, 56, 47, 32, 33, 50, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3223(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 20, 5, 6, 23, 24, 31, 0, 1, 18, 3, 20, 19, 4, 21, 22, 7, 8, 47, 48, 49, 34, 51, 36, 35, 52, 37, 38, 55, 56, 63, 32, 33, 50, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3224(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 20, 5, 6, 23, 24, 15, 16, 17, 2, 3, 20, 19, 4, 21, 22, 7, 8, 63, 32, 33, 50, 51, 36, 35, 52, 37, 38, 55, 56, 47, 48, 49, 34, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3225(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 20, 5, 6, 23, 24, 31, 16, 17, 2, 3, 20, 19, 4, 21, 22, 7, 8, 47, 32, 33, 50, 51, 36, 35, 52, 37, 38, 55, 56, 63, 48, 49, 34, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3226(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 20, 5, 6, 23, 24, 15, 0, 17, 2, 3, 20, 19, 4, 21, 22, 7, 8, 63, 48, 33, 50, 51, 36, 35, 52, 37, 38, 55, 56, 47, 32, 49, 34, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3227(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 20, 5, 6, 23, 24, 31, 0, 17, 2, 3, 20, 19, 4, 21, 22, 7, 8, 47, 48, 33, 50, 51, 36, 35, 52, 37, 38, 55, 56, 63, 32, 49, 34, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3228(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 20, 5, 6, 23, 24, 15, 16, 1, 2, 3, 20, 19, 4, 21, 22, 7, 8, 63, 32, 49, 50, 51, 36, 35, 52, 37, 38, 55, 56, 47, 48, 33, 34, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3229(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 20, 5, 6, 23, 24, 31, 16, 1, 2, 3, 20, 19, 4, 21, 22, 7, 8, 47, 32, 49, 50, 51, 36, 35, 52, 37, 38, 55, 56, 63, 48, 33, 34, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3230(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 20, 5, 6, 23, 24, 15, 0, 1, 2, 3, 20, 19, 4, 21, 22, 7, 8, 63, 48, 49, 50, 51, 36, 35, 52, 37, 38, 55, 56, 47, 32, 33, 34, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3231(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 20, 5, 6, 23, 24, 31, 0, 1, 2, 3, 20, 19, 4, 21, 22, 7, 8, 47, 48, 49, 50, 51, 36, 35, 52, 37, 38, 55, 56, 63, 32, 33, 34, 35, 52, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3232(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 20, 5, 6, 23, 24, 15, 16, 17, 18, 19, 4, 19, 4, 21, 22, 7, 8, 63, 32, 33, 34, 35, 52, 35, 52, 37, 38, 55, 56, 47, 48, 49, 50, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3233(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 20, 5, 6, 23, 24, 31, 16, 17, 18, 19, 4, 19, 4, 21, 22, 7, 8, 47, 32, 33, 34, 35, 52, 35, 52, 37, 38, 55, 56, 63, 48, 49, 50, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3234(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 20, 5, 6, 23, 24, 15, 0, 17, 18, 19, 4, 19, 4, 21, 22, 7, 8, 63, 48, 33, 34, 35, 52, 35, 52, 37, 38, 55, 56, 47, 32, 49, 50, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3235(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 20, 5, 6, 23, 24, 31, 0, 17, 18, 19, 4, 19, 4, 21, 22, 7, 8, 47, 48, 33, 34, 35, 52, 35, 52, 37, 38, 55, 56, 63, 32, 49, 50, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3236(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 20, 5, 6, 23, 24, 15, 16, 1, 18, 19, 4, 19, 4, 21, 22, 7, 8, 63, 32, 49, 34, 35, 52, 35, 52, 37, 38, 55, 56, 47, 48, 33, 50, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3237(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 20, 5, 6, 23, 24, 31, 16, 1, 18, 19, 4, 19, 4, 21, 22, 7, 8, 47, 32, 49, 34, 35, 52, 35, 52, 37, 38, 55, 56, 63, 48, 33, 50, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3238(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 20, 5, 6, 23, 24, 15, 0, 1, 18, 19, 4, 19, 4, 21, 22, 7, 8, 63, 48, 49, 34, 35, 52, 35, 52, 37, 38, 55, 56, 47, 32, 33, 50, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3239(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 20, 5, 6, 23, 24, 31, 0, 1, 18, 19, 4, 19, 4, 21, 22, 7, 8, 47, 48, 49, 34, 35, 52, 35, 52, 37, 38, 55, 56, 63, 32, 33, 50, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3240(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 20, 5, 6, 23, 24, 15, 16, 17, 2, 19, 4, 19, 4, 21, 22, 7, 8, 63, 32, 33, 50, 35, 52, 35, 52, 37, 38, 55, 56, 47, 48, 49, 34, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3241(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 20, 5, 6, 23, 24, 31, 16, 17, 2, 19, 4, 19, 4, 21, 22, 7, 8, 47, 32, 33, 50, 35, 52, 35, 52, 37, 38, 55, 56, 63, 48, 49, 34, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3242(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 20, 5, 6, 23, 24, 15, 0, 17, 2, 19, 4, 19, 4, 21, 22, 7, 8, 63, 48, 33, 50, 35, 52, 35, 52, 37, 38, 55, 56, 47, 32, 49, 34, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3243(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 20, 5, 6, 23, 24, 31, 0, 17, 2, 19, 4, 19, 4, 21, 22, 7, 8, 47, 48, 33, 50, 35, 52, 35, 52, 37, 38, 55, 56, 63, 32, 49, 34, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3244(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 20, 5, 6, 23, 24, 15, 16, 1, 2, 19, 4, 19, 4, 21, 22, 7, 8, 63, 32, 49, 50, 35, 52, 35, 52, 37, 38, 55, 56, 47, 48, 33, 34, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3245(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 20, 5, 6, 23, 24, 31, 16, 1, 2, 19, 4, 19, 4, 21, 22, 7, 8, 47, 32, 49, 50, 35, 52, 35, 52, 37, 38, 55, 56, 63, 48, 33, 34, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3246(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 20, 5, 6, 23, 24, 15, 0, 1, 2, 19, 4, 19, 4, 21, 22, 7, 8, 63, 48, 49, 50, 35, 52, 35, 52, 37, 38, 55, 56, 47, 32, 33, 34, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3247(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 20, 5, 6, 23, 24, 31, 0, 1, 2, 19, 4, 19, 4, 21, 22, 7, 8, 47, 48, 49, 50, 35, 52, 35, 52, 37, 38, 55, 56, 63, 32, 33, 34, 51, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3248(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 20, 5, 6, 23, 24, 15, 16, 17, 18, 3, 4, 19, 4, 21, 22, 7, 8, 63, 32, 33, 34, 51, 52, 35, 52, 37, 38, 55, 56, 47, 48, 49, 50, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3249(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 20, 5, 6, 23, 24, 31, 16, 17, 18, 3, 4, 19, 4, 21, 22, 7, 8, 47, 32, 33, 34, 51, 52, 35, 52, 37, 38, 55, 56, 63, 48, 49, 50, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3250(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 20, 5, 6, 23, 24, 15, 0, 17, 18, 3, 4, 19, 4, 21, 22, 7, 8, 63, 48, 33, 34, 51, 52, 35, 52, 37, 38, 55, 56, 47, 32, 49, 50, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3251(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 20, 5, 6, 23, 24, 31, 0, 17, 18, 3, 4, 19, 4, 21, 22, 7, 8, 47, 48, 33, 34, 51, 52, 35, 52, 37, 38, 55, 56, 63, 32, 49, 50, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3252(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 20, 5, 6, 23, 24, 15, 16, 1, 18, 3, 4, 19, 4, 21, 22, 7, 8, 63, 32, 49, 34, 51, 52, 35, 52, 37, 38, 55, 56, 47, 48, 33, 50, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3253(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 20, 5, 6, 23, 24, 31, 16, 1, 18, 3, 4, 19, 4, 21, 22, 7, 8, 47, 32, 49, 34, 51, 52, 35, 52, 37, 38, 55, 56, 63, 48, 33, 50, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3254(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 20, 5, 6, 23, 24, 15, 0, 1, 18, 3, 4, 19, 4, 21, 22, 7, 8, 63, 48, 49, 34, 51, 52, 35, 52, 37, 38, 55, 56, 47, 32, 33, 50, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3255(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 20, 5, 6, 23, 24, 31, 0, 1, 18, 3, 4, 19, 4, 21, 22, 7, 8, 47, 48, 49, 34, 51, 52, 35, 52, 37, 38, 55, 56, 63, 32, 33, 50, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3256(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 20, 5, 6, 23, 24, 15, 16, 17, 2, 3, 4, 19, 4, 21, 22, 7, 8, 63, 32, 33, 50, 51, 52, 35, 52, 37, 38, 55, 56, 47, 48, 49, 34, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3257(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 20, 5, 6, 23, 24, 31, 16, 17, 2, 3, 4, 19, 4, 21, 22, 7, 8, 47, 32, 33, 50, 51, 52, 35, 52, 37, 38, 55, 56, 63, 48, 49, 34, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3258(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 20, 5, 6, 23, 24, 15, 0, 17, 2, 3, 4, 19, 4, 21, 22, 7, 8, 63, 48, 33, 50, 51, 52, 35, 52, 37, 38, 55, 56, 47, 32, 49, 34, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3259(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 20, 5, 6, 23, 24, 31, 0, 17, 2, 3, 4, 19, 4, 21, 22, 7, 8, 47, 48, 33, 50, 51, 52, 35, 52, 37, 38, 55, 56, 63, 32, 49, 34, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3260(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 20, 5, 6, 23, 24, 15, 16, 1, 2, 3, 4, 19, 4, 21, 22, 7, 8, 63, 32, 49, 50, 51, 52, 35, 52, 37, 38, 55, 56, 47, 48, 33, 34, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3261(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 20, 5, 6, 23, 24, 31, 16, 1, 2, 3, 4, 19, 4, 21, 22, 7, 8, 47, 32, 49, 50, 51, 52, 35, 52, 37, 38, 55, 56, 63, 48, 33, 34, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3262(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 20, 5, 6, 23, 24, 15, 0, 1, 2, 3, 4, 19, 4, 21, 22, 7, 8, 63, 48, 49, 50, 51, 52, 35, 52, 37, 38, 55, 56, 47, 32, 33, 34, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3263(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 20, 5, 6, 23, 24, 31, 0, 1, 2, 3, 4, 19, 4, 21, 22, 7, 8, 47, 48, 49, 50, 51, 52, 35, 52, 37, 38, 55, 56, 63, 32, 33, 34, 35, 36, 51, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3264(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 20, 5, 6, 23, 24, 15, 16, 17, 18, 19, 20, 3, 4, 21, 22, 7, 8, 63, 32, 33, 34, 35, 36, 51, 52, 37, 38, 55, 56, 47, 48, 49, 50, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3265(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 20, 5, 6, 23, 24, 31, 16, 17, 18, 19, 20, 3, 4, 21, 22, 7, 8, 47, 32, 33, 34, 35, 36, 51, 52, 37, 38, 55, 56, 63, 48, 49, 50, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3266(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 20, 5, 6, 23, 24, 15, 0, 17, 18, 19, 20, 3, 4, 21, 22, 7, 8, 63, 48, 33, 34, 35, 36, 51, 52, 37, 38, 55, 56, 47, 32, 49, 50, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3267(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 20, 5, 6, 23, 24, 31, 0, 17, 18, 19, 20, 3, 4, 21, 22, 7, 8, 47, 48, 33, 34, 35, 36, 51, 52, 37, 38, 55, 56, 63, 32, 49, 50, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3268(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 20, 5, 6, 23, 24, 15, 16, 1, 18, 19, 20, 3, 4, 21, 22, 7, 8, 63, 32, 49, 34, 35, 36, 51, 52, 37, 38, 55, 56, 47, 48, 33, 50, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3269(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 20, 5, 6, 23, 24, 31, 16, 1, 18, 19, 20, 3, 4, 21, 22, 7, 8, 47, 32, 49, 34, 35, 36, 51, 52, 37, 38, 55, 56, 63, 48, 33, 50, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3270(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 20, 5, 6, 23, 24, 15, 0, 1, 18, 19, 20, 3, 4, 21, 22, 7, 8, 63, 48, 49, 34, 35, 36, 51, 52, 37, 38, 55, 56, 47, 32, 33, 50, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3271(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 20, 5, 6, 23, 24, 31, 0, 1, 18, 19, 20, 3, 4, 21, 22, 7, 8, 47, 48, 49, 34, 35, 36, 51, 52, 37, 38, 55, 56, 63, 32, 33, 50, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3272(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 20, 5, 6, 23, 24, 15, 16, 17, 2, 19, 20, 3, 4, 21, 22, 7, 8, 63, 32, 33, 50, 35, 36, 51, 52, 37, 38, 55, 56, 47, 48, 49, 34, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3273(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 20, 5, 6, 23, 24, 31, 16, 17, 2, 19, 20, 3, 4, 21, 22, 7, 8, 47, 32, 33, 50, 35, 36, 51, 52, 37, 38, 55, 56, 63, 48, 49, 34, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3274(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 20, 5, 6, 23, 24, 15, 0, 17, 2, 19, 20, 3, 4, 21, 22, 7, 8, 63, 48, 33, 50, 35, 36, 51, 52, 37, 38, 55, 56, 47, 32, 49, 34, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3275(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 20, 5, 6, 23, 24, 31, 0, 17, 2, 19, 20, 3, 4, 21, 22, 7, 8, 47, 48, 33, 50, 35, 36, 51, 52, 37, 38, 55, 56, 63, 32, 49, 34, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3276(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 20, 5, 6, 23, 24, 15, 16, 1, 2, 19, 20, 3, 4, 21, 22, 7, 8, 63, 32, 49, 50, 35, 36, 51, 52, 37, 38, 55, 56, 47, 48, 33, 34, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3277(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 20, 5, 6, 23, 24, 31, 16, 1, 2, 19, 20, 3, 4, 21, 22, 7, 8, 47, 32, 49, 50, 35, 36, 51, 52, 37, 38, 55, 56, 63, 48, 33, 34, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3278(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 20, 5, 6, 23, 24, 15, 0, 1, 2, 19, 20, 3, 4, 21, 22, 7, 8, 63, 48, 49, 50, 35, 36, 51, 52, 37, 38, 55, 56, 47, 32, 33, 34, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3279(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 20, 5, 6, 23, 24, 31, 0, 1, 2, 19, 20, 3, 4, 21, 22, 7, 8, 47, 48, 49, 50, 35, 36, 51, 52, 37, 38, 55, 56, 63, 32, 33, 34, 51, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3280(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 20, 5, 6, 23, 24, 15, 16, 17, 18, 3, 20, 3, 4, 21, 22, 7, 8, 63, 32, 33, 34, 51, 36, 51, 52, 37, 38, 55, 56, 47, 48, 49, 50, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3281(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 20, 5, 6, 23, 24, 31, 16, 17, 18, 3, 20, 3, 4, 21, 22, 7, 8, 47, 32, 33, 34, 51, 36, 51, 52, 37, 38, 55, 56, 63, 48, 49, 50, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3282(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 20, 5, 6, 23, 24, 15, 0, 17, 18, 3, 20, 3, 4, 21, 22, 7, 8, 63, 48, 33, 34, 51, 36, 51, 52, 37, 38, 55, 56, 47, 32, 49, 50, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3283(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 20, 5, 6, 23, 24, 31, 0, 17, 18, 3, 20, 3, 4, 21, 22, 7, 8, 47, 48, 33, 34, 51, 36, 51, 52, 37, 38, 55, 56, 63, 32, 49, 50, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3284(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 20, 5, 6, 23, 24, 15, 16, 1, 18, 3, 20, 3, 4, 21, 22, 7, 8, 63, 32, 49, 34, 51, 36, 51, 52, 37, 38, 55, 56, 47, 48, 33, 50, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3285(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 20, 5, 6, 23, 24, 31, 16, 1, 18, 3, 20, 3, 4, 21, 22, 7, 8, 47, 32, 49, 34, 51, 36, 51, 52, 37, 38, 55, 56, 63, 48, 33, 50, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3286(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 20, 5, 6, 23, 24, 15, 0, 1, 18, 3, 20, 3, 4, 21, 22, 7, 8, 63, 48, 49, 34, 51, 36, 51, 52, 37, 38, 55, 56, 47, 32, 33, 50, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3287(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 20, 5, 6, 23, 24, 31, 0, 1, 18, 3, 20, 3, 4, 21, 22, 7, 8, 47, 48, 49, 34, 51, 36, 51, 52, 37, 38, 55, 56, 63, 32, 33, 50, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3288(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 20, 5, 6, 23, 24, 15, 16, 17, 2, 3, 20, 3, 4, 21, 22, 7, 8, 63, 32, 33, 50, 51, 36, 51, 52, 37, 38, 55, 56, 47, 48, 49, 34, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3289(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 20, 5, 6, 23, 24, 31, 16, 17, 2, 3, 20, 3, 4, 21, 22, 7, 8, 47, 32, 33, 50, 51, 36, 51, 52, 37, 38, 55, 56, 63, 48, 49, 34, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3290(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 20, 5, 6, 23, 24, 15, 0, 17, 2, 3, 20, 3, 4, 21, 22, 7, 8, 63, 48, 33, 50, 51, 36, 51, 52, 37, 38, 55, 56, 47, 32, 49, 34, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3291(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 20, 5, 6, 23, 24, 31, 0, 17, 2, 3, 20, 3, 4, 21, 22, 7, 8, 47, 48, 33, 50, 51, 36, 51, 52, 37, 38, 55, 56, 63, 32, 49, 34, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3292(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 20, 5, 6, 23, 24, 15, 16, 1, 2, 3, 20, 3, 4, 21, 22, 7, 8, 63, 32, 49, 50, 51, 36, 51, 52, 37, 38, 55, 56, 47, 48, 33, 34, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3293(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 20, 5, 6, 23, 24, 31, 16, 1, 2, 3, 20, 3, 4, 21, 22, 7, 8, 47, 32, 49, 50, 51, 36, 51, 52, 37, 38, 55, 56, 63, 48, 33, 34, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3294(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 20, 5, 6, 23, 24, 15, 0, 1, 2, 3, 20, 3, 4, 21, 22, 7, 8, 63, 48, 49, 50, 51, 36, 51, 52, 37, 38, 55, 56, 47, 32, 33, 34, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3295(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 20, 5, 6, 23, 24, 31, 0, 1, 2, 3, 20, 3, 4, 21, 22, 7, 8, 47, 48, 49, 50, 51, 36, 51, 52, 37, 38, 55, 56, 63, 32, 33, 34, 35, 52, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3296(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 20, 5, 6, 23, 24, 15, 16, 17, 18, 19, 4, 3, 4, 21, 22, 7, 8, 63, 32, 33, 34, 35, 52, 51, 52, 37, 38, 55, 56, 47, 48, 49, 50, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3297(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 20, 5, 6, 23, 24, 31, 16, 17, 18, 19, 4, 3, 4, 21, 22, 7, 8, 47, 32, 33, 34, 35, 52, 51, 52, 37, 38, 55, 56, 63, 48, 49, 50, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3298(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 20, 5, 6, 23, 24, 15, 0, 17, 18, 19, 4, 3, 4, 21, 22, 7, 8, 63, 48, 33, 34, 35, 52, 51, 52, 37, 38, 55, 56, 47, 32, 49, 50, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3299(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 20, 5, 6, 23, 24, 31, 0, 17, 18, 19, 4, 3, 4, 21, 22, 7, 8, 47, 48, 33, 34, 35, 52, 51, 52, 37, 38, 55, 56, 63, 32, 49, 50, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3300(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 20, 5, 6, 23, 24, 15, 16, 1, 18, 19, 4, 3, 4, 21, 22, 7, 8, 63, 32, 49, 34, 35, 52, 51, 52, 37, 38, 55, 56, 47, 48, 33, 50, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3301(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 20, 5, 6, 23, 24, 31, 16, 1, 18, 19, 4, 3, 4, 21, 22, 7, 8, 47, 32, 49, 34, 35, 52, 51, 52, 37, 38, 55, 56, 63, 48, 33, 50, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3302(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 20, 5, 6, 23, 24, 15, 0, 1, 18, 19, 4, 3, 4, 21, 22, 7, 8, 63, 48, 49, 34, 35, 52, 51, 52, 37, 38, 55, 56, 47, 32, 33, 50, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3303(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 20, 5, 6, 23, 24, 31, 0, 1, 18, 19, 4, 3, 4, 21, 22, 7, 8, 47, 48, 49, 34, 35, 52, 51, 52, 37, 38, 55, 56, 63, 32, 33, 50, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3304(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 20, 5, 6, 23, 24, 15, 16, 17, 2, 19, 4, 3, 4, 21, 22, 7, 8, 63, 32, 33, 50, 35, 52, 51, 52, 37, 38, 55, 56, 47, 48, 49, 34, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3305(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 20, 5, 6, 23, 24, 31, 16, 17, 2, 19, 4, 3, 4, 21, 22, 7, 8, 47, 32, 33, 50, 35, 52, 51, 52, 37, 38, 55, 56, 63, 48, 49, 34, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3306(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 20, 5, 6, 23, 24, 15, 0, 17, 2, 19, 4, 3, 4, 21, 22, 7, 8, 63, 48, 33, 50, 35, 52, 51, 52, 37, 38, 55, 56, 47, 32, 49, 34, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3307(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 20, 5, 6, 23, 24, 31, 0, 17, 2, 19, 4, 3, 4, 21, 22, 7, 8, 47, 48, 33, 50, 35, 52, 51, 52, 37, 38, 55, 56, 63, 32, 49, 34, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3308(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 20, 5, 6, 23, 24, 15, 16, 1, 2, 19, 4, 3, 4, 21, 22, 7, 8, 63, 32, 49, 50, 35, 52, 51, 52, 37, 38, 55, 56, 47, 48, 33, 34, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3309(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 20, 5, 6, 23, 24, 31, 16, 1, 2, 19, 4, 3, 4, 21, 22, 7, 8, 47, 32, 49, 50, 35, 52, 51, 52, 37, 38, 55, 56, 63, 48, 33, 34, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3310(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 20, 5, 6, 23, 24, 15, 0, 1, 2, 19, 4, 3, 4, 21, 22, 7, 8, 63, 48, 49, 50, 35, 52, 51, 52, 37, 38, 55, 56, 47, 32, 33, 34, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3311(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 20, 5, 6, 23, 24, 31, 0, 1, 2, 19, 4, 3, 4, 21, 22, 7, 8, 47, 48, 49, 50, 35, 52, 51, 52, 37, 38, 55, 56, 63, 32, 33, 34, 51, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3312(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 20, 5, 6, 23, 24, 15, 16, 17, 18, 3, 4, 3, 4, 21, 22, 7, 8, 63, 32, 33, 34, 51, 52, 51, 52, 37, 38, 55, 56, 47, 48, 49, 50, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3313(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 20, 5, 6, 23, 24, 31, 16, 17, 18, 3, 4, 3, 4, 21, 22, 7, 8, 47, 32, 33, 34, 51, 52, 51, 52, 37, 38, 55, 56, 63, 48, 49, 50, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3314(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 20, 5, 6, 23, 24, 15, 0, 17, 18, 3, 4, 3, 4, 21, 22, 7, 8, 63, 48, 33, 34, 51, 52, 51, 52, 37, 38, 55, 56, 47, 32, 49, 50, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3315(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 20, 5, 6, 23, 24, 31, 0, 17, 18, 3, 4, 3, 4, 21, 22, 7, 8, 47, 48, 33, 34, 51, 52, 51, 52, 37, 38, 55, 56, 63, 32, 49, 50, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3316(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 20, 5, 6, 23, 24, 15, 16, 1, 18, 3, 4, 3, 4, 21, 22, 7, 8, 63, 32, 49, 34, 51, 52, 51, 52, 37, 38, 55, 56, 47, 48, 33, 50, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3317(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 20, 5, 6, 23, 24, 31, 16, 1, 18, 3, 4, 3, 4, 21, 22, 7, 8, 47, 32, 49, 34, 51, 52, 51, 52, 37, 38, 55, 56, 63, 48, 33, 50, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3318(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 20, 5, 6, 23, 24, 15, 0, 1, 18, 3, 4, 3, 4, 21, 22, 7, 8, 63, 48, 49, 34, 51, 52, 51, 52, 37, 38, 55, 56, 47, 32, 33, 50, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3319(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 20, 5, 6, 23, 24, 31, 0, 1, 18, 3, 4, 3, 4, 21, 22, 7, 8, 47, 48, 49, 34, 51, 52, 51, 52, 37, 38, 55, 56, 63, 32, 33, 50, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3320(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 20, 5, 6, 23, 24, 15, 16, 17, 2, 3, 4, 3, 4, 21, 22, 7, 8, 63, 32, 33, 50, 51, 52, 51, 52, 37, 38, 55, 56, 47, 48, 49, 34, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3321(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 20, 5, 6, 23, 24, 31, 16, 17, 2, 3, 4, 3, 4, 21, 22, 7, 8, 47, 32, 33, 50, 51, 52, 51, 52, 37, 38, 55, 56, 63, 48, 49, 34, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3322(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 20, 5, 6, 23, 24, 15, 0, 17, 2, 3, 4, 3, 4, 21, 22, 7, 8, 63, 48, 33, 50, 51, 52, 51, 52, 37, 38, 55, 56, 47, 32, 49, 34, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3323(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 20, 5, 6, 23, 24, 31, 0, 17, 2, 3, 4, 3, 4, 21, 22, 7, 8, 47, 48, 33, 50, 51, 52, 51, 52, 37, 38, 55, 56, 63, 32, 49, 34, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3324(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 20, 5, 6, 23, 24, 15, 16, 1, 2, 3, 4, 3, 4, 21, 22, 7, 8, 63, 32, 49, 50, 51, 52, 51, 52, 37, 38, 55, 56, 47, 48, 33, 34, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3325(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 20, 5, 6, 23, 24, 31, 16, 1, 2, 3, 4, 3, 4, 21, 22, 7, 8, 47, 32, 49, 50, 51, 52, 51, 52, 37, 38, 55, 56, 63, 48, 33, 34, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3326(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 20, 5, 6, 23, 24, 15, 0, 1, 2, 3, 4, 3, 4, 21, 22, 7, 8, 63, 48, 49, 50, 51, 52, 51, 52, 37, 38, 55, 56, 47, 32, 33, 34, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt3327(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 20, 5, 6, 23, 24, 31, 0, 1, 2, 3, 4, 3, 4, 21, 22, 7, 8, 47, 48, 49, 50, 51, 52, 51, 52, 37, 38, 55, 56, 63, 32, 33, 34, 35, 36, 35, 36, 53, 54, 39, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } #endif // DESGPU_COMPILE_ALL_SALTS
10bd842737580c7408fb5f4dce6ffee0f788a319.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void gpuIt(float *tNew,float *tOld,float *tOrig,int x,int y,int z,float k,float st) { int i = threadIdx.x + blockIdx.x * blockDim.x; // may want an if(i < x*y*z) to prevent overflowing, likea thisa if(i < x*y*z){ if(i == 0){ // top left corner tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 1; } else if(i == x-1){ // top right corner tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 3; } else if(i == x*y - 1){ // bottom right corner tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i-x] + tOld[i] - 4*tOld[i]); //tNew[i] = 5; } else if(i == x*y - x){ // bottom left corner tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i-x] + tOld[i] - 4*tOld[i]); //tNew[i] = 7; } else if(i%x == 0){ // left side tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i-x] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 8; } else if(i%x == x-1){ // right side tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i-x] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 4; } else if(i - x < 0){ // top row tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 2; } else if(i + x > x*y){ // bottom row tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i-x] + tOld[i] - 4*tOld[i]); //tNew[i] = 6; } else{ tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i-x] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 9; } //tNew[i] = i; // for debugging // replace heaters if(tOrig[i] != st){ tNew[i] = tOrig[i]; } //tNew[i] = i%x; } }
10bd842737580c7408fb5f4dce6ffee0f788a319.cu
#include "includes.h" __global__ void gpuIt(float *tNew,float *tOld,float *tOrig,int x,int y,int z,float k,float st) { int i = threadIdx.x + blockIdx.x * blockDim.x; // may want an if(i < x*y*z) to prevent overflowing, likea thisa if(i < x*y*z){ if(i == 0){ // top left corner tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 1; } else if(i == x-1){ // top right corner tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 3; } else if(i == x*y - 1){ // bottom right corner tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i-x] + tOld[i] - 4*tOld[i]); //tNew[i] = 5; } else if(i == x*y - x){ // bottom left corner tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i-x] + tOld[i] - 4*tOld[i]); //tNew[i] = 7; } else if(i%x == 0){ // left side tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i-x] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 8; } else if(i%x == x-1){ // right side tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i-x] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 4; } else if(i - x < 0){ // top row tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 2; } else if(i + x > x*y){ // bottom row tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i-x] + tOld[i] - 4*tOld[i]); //tNew[i] = 6; } else{ tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i-x] + tOld[i+x] - 4*tOld[i]); //tNew[i] = 9; } //tNew[i] = i; // for debugging // replace heaters if(tOrig[i] != st){ tNew[i] = tOrig[i]; } //tNew[i] = i%x; } }
6c543e92115808d8ffebdaf1389471125559fb6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** File name: bfs_gpu_status_array_flag.cu Author: Yuede Ji Last update: 13:30 10-10-2015 Description: Using status array to implent GPU version of bfs. Calculate the shortest distance from 0 to others **/ #include <stdio.h> #include <stdlib.h> #include <string.h> //Using arrays to implement queue char filein[] = "/home/yuede/dataset/kron_16_16.dat";// no need char fileout[] = "/home/yuede/dataset/kron_16_16.gpu.as.flag.result"; char file_v_e[] = "/home/yuede/dataset/kron_16_16.v_e"; char file_beg_pos[] = "/home/yuede/dataset/kron_16_16.beg.pos"; char file_csr[] = "/home/yuede/dataset/kron_16_16.csr"; /** char filein[] = "/home/yuede/dataset/kron_10_4.dat";// no need char fileout[] = "/home/yuede/dataset/kron_10_4.gpu.as.result"; char file_v_e[] = "/home/yuede/dataset/kron_10_4.v_e"; char file_beg_pos[] = "/home/yuede/dataset/kron_10_4.beg.pos"; char file_csr[] = "/home/yuede/dataset/kron_10_4.csr"; **/ const int v_num = 65535; const int e_num = 2097152; const int INF = 0x7FFFFFFF; const int threads_num = 256; int beg_pos[v_num+1]; int csr[e_num]; int sa[v_num]; //load from .dat files, and store in array csr[N*N], beg_pos[N] int csr_begin(int v, int e); void bfs_sa(int root, int v, int e); __global__ void traverse_one(int level, int * dev_sa, int * dev_beg_pos, int * dev_csr) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(dev_sa[id] == level)///node i belongs to current level { //int j = dev_beg_pos[id]; for(int j=dev_beg_pos[id]; j<dev_beg_pos[id+1]; ++j) { if(dev_sa[dev_csr[j]] > level + 1) { dev_sa[dev_csr[j]] = level + 1; printf("%d\n", dev_csr[j]); } } } } int main() { csr_begin(v_num, e_num); bfs_sa(0, v_num, e_num); FILE * fp_out = fopen(fileout, "w"); for(int i=0; i<v_num; ++i) fprintf(fp_out, "%d\n", sa[i]); fclose(fp_out); return 0; } void bfs_sa(int root, int v, int e) { for(int i=0; i<v; ++i) sa[i] = INF; int level = 0; sa[0] = 0; bool flag = true; //flag whether current level has nodes int *dev_sa; int *dev_beg_pos; int *dev_csr; hipMalloc( (void **) &dev_sa, v*sizeof(int)); hipMalloc( (void **) &dev_beg_pos, (v+1)*sizeof(int)); hipMalloc( (void **) &dev_csr, e*sizeof(int)); hipMemcpy(dev_sa, sa, v*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_beg_pos, beg_pos, (v+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_csr, csr, e*sizeof(int), hipMemcpyHostToDevice); while(flag) { flag = false; hipLaunchKernelGGL(( traverse_one), dim3(threads_num), dim3(threads_num), 0, 0, level, dev_sa, dev_beg_pos, dev_csr); ++level; hipMemcpy(sa, dev_sa, v*sizeof(int), hipMemcpyDeviceToHost); for(int i=0; i<v; ++i) if(sa[i] == level) { flag = true; break; } } hipMemcpy(sa, dev_sa, v*sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_sa); hipFree(dev_beg_pos); hipFree(dev_csr); } int csr_begin(int v, int e) { FILE * fp_beg = fopen(file_beg_pos, "r"); int i = 0; int p; while(fscanf(fp_beg, "%d", &p) != EOF) { beg_pos[i] = p; ++i; } fclose(fp_beg); i = 0; FILE * fp_csr = fopen(file_csr, "r"); while(fscanf(fp_csr, "%d", &p) != EOF) { csr[i] = p; ++i; } fclose(fp_csr); return v; }
6c543e92115808d8ffebdaf1389471125559fb6e.cu
/** File name: bfs_gpu_status_array_flag.cu Author: Yuede Ji Last update: 13:30 10-10-2015 Description: Using status array to implent GPU version of bfs. Calculate the shortest distance from 0 to others **/ #include <stdio.h> #include <stdlib.h> #include <string.h> //Using arrays to implement queue char filein[] = "/home/yuede/dataset/kron_16_16.dat";// no need char fileout[] = "/home/yuede/dataset/kron_16_16.gpu.as.flag.result"; char file_v_e[] = "/home/yuede/dataset/kron_16_16.v_e"; char file_beg_pos[] = "/home/yuede/dataset/kron_16_16.beg.pos"; char file_csr[] = "/home/yuede/dataset/kron_16_16.csr"; /** char filein[] = "/home/yuede/dataset/kron_10_4.dat";// no need char fileout[] = "/home/yuede/dataset/kron_10_4.gpu.as.result"; char file_v_e[] = "/home/yuede/dataset/kron_10_4.v_e"; char file_beg_pos[] = "/home/yuede/dataset/kron_10_4.beg.pos"; char file_csr[] = "/home/yuede/dataset/kron_10_4.csr"; **/ const int v_num = 65535; const int e_num = 2097152; const int INF = 0x7FFFFFFF; const int threads_num = 256; int beg_pos[v_num+1]; int csr[e_num]; int sa[v_num]; //load from .dat files, and store in array csr[N*N], beg_pos[N] int csr_begin(int v, int e); void bfs_sa(int root, int v, int e); __global__ void traverse_one(int level, int * dev_sa, int * dev_beg_pos, int * dev_csr) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(dev_sa[id] == level)///node i belongs to current level { //int j = dev_beg_pos[id]; for(int j=dev_beg_pos[id]; j<dev_beg_pos[id+1]; ++j) { if(dev_sa[dev_csr[j]] > level + 1) { dev_sa[dev_csr[j]] = level + 1; printf("%d\n", dev_csr[j]); } } } } int main() { csr_begin(v_num, e_num); bfs_sa(0, v_num, e_num); FILE * fp_out = fopen(fileout, "w"); for(int i=0; i<v_num; ++i) fprintf(fp_out, "%d\n", sa[i]); fclose(fp_out); return 0; } void bfs_sa(int root, int v, int e) { for(int i=0; i<v; ++i) sa[i] = INF; int level = 0; sa[0] = 0; bool flag = true; //flag whether current level has nodes int *dev_sa; int *dev_beg_pos; int *dev_csr; cudaMalloc( (void **) &dev_sa, v*sizeof(int)); cudaMalloc( (void **) &dev_beg_pos, (v+1)*sizeof(int)); cudaMalloc( (void **) &dev_csr, e*sizeof(int)); cudaMemcpy(dev_sa, sa, v*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_beg_pos, beg_pos, (v+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_csr, csr, e*sizeof(int), cudaMemcpyHostToDevice); while(flag) { flag = false; traverse_one<<<threads_num, threads_num>>>(level, dev_sa, dev_beg_pos, dev_csr); ++level; cudaMemcpy(sa, dev_sa, v*sizeof(int), cudaMemcpyDeviceToHost); for(int i=0; i<v; ++i) if(sa[i] == level) { flag = true; break; } } cudaMemcpy(sa, dev_sa, v*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_sa); cudaFree(dev_beg_pos); cudaFree(dev_csr); } int csr_begin(int v, int e) { FILE * fp_beg = fopen(file_beg_pos, "r"); int i = 0; int p; while(fscanf(fp_beg, "%d", &p) != EOF) { beg_pos[i] = p; ++i; } fclose(fp_beg); i = 0; FILE * fp_csr = fopen(file_csr, "r"); while(fscanf(fp_csr, "%d", &p) != EOF) { csr[i] = p; ++i; } fclose(fp_csr); return v; }
e57540b91e54f8050e9bafcb0143152a636c20ca.hip
// !!! This is a file automatically generated by hipify!!! #include "variable_constraint_kernel.hpp" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "yuzu/foundation/memory/RelativePointer.hpp" #include "yuzu/foundation/memory/pointer.hpp" #include "yuzu/domain/boundary_conditions/BoundaryConditionData.hpp" #include "yuzu/common/gpu.hpp" #include "yuzu/utils/kernel_utils.hpp" #include "stdio.h" #define DOF_STATUS_FREE 0 namespace ay = axis::yuzu; namespace ayfm = axis::yuzu::foundation::memory; namespace aydbc = axis::yuzu::domain::boundary_conditions; struct VariableConstraintData { const real *CurveDataPtr; real ScalingFactor; real ReleaseTime; }; __global__ void __launch_bounds__(AXIS_YUZU_MAX_THREADS_PER_BLOCK) UpdateConstraintOnGPUKernel(uint64 numThreads, uint64 startIndex, void *baseMemoryAddressOnGPU, real time, axis::yuzu::foundation::memory::RelativePointer vectorMaskPtr) { uint64 index = ay::GetThreadIndex(gridDim, blockIdx, blockDim, threadIdx, startIndex); if (!ay::IsActiveThread(index, numThreads)) return; aydbc::BoundaryConditionData bcData(baseMemoryAddressOnGPU, index, sizeof(VariableConstraintData)); real *bucket = bcData.GetOutputBucket(); VariableConstraintData *data = (VariableConstraintData *)bcData.GetCustomData(); real curveCurVal = *data->CurveDataPtr; real scaleFactor = data->ScalingFactor; real releaseTime = data->ReleaseTime; uint64 dofId = bcData.GetDofId(); *bucket = curveCurVal * scaleFactor; if (releaseTime >= 0 && time > releaseTime) { char *vectorMask = axis::yabsptr<char>(vectorMaskPtr); vectorMask[dofId] = DOF_STATUS_FREE; } } void axis::domain::boundary_conditions::UpdateConstraintOnGPU( uint64 numThreadsToUse, uint64 startIndex, void *baseMemoryAddressOnGPU, const axis::Dimension3D& gridDim, const axis::Dimension3D& blockDim, void * streamPtr, real time, axis::foundation::memory::RelativePointer vectorMaskPtr ) { dim3 grid, block; grid.x = gridDim.X; grid.y = gridDim.Y; grid.z = gridDim.Z; block.x = blockDim.X; block.y = blockDim.Y; block.z = blockDim.Z; hipLaunchKernelGGL(( UpdateConstraintOnGPUKernel), dim3(grid), dim3(block), 0, (hipStream_t)streamPtr, numThreadsToUse, startIndex, baseMemoryAddressOnGPU, time, reinterpret_cast<ayfm::RelativePointer&>(vectorMaskPtr)); }
e57540b91e54f8050e9bafcb0143152a636c20ca.cu
#include "variable_constraint_kernel.hpp" #include <cuda.h> #include <cuda_runtime.h> #include "yuzu/foundation/memory/RelativePointer.hpp" #include "yuzu/foundation/memory/pointer.hpp" #include "yuzu/domain/boundary_conditions/BoundaryConditionData.hpp" #include "yuzu/common/gpu.hpp" #include "yuzu/utils/kernel_utils.hpp" #include "stdio.h" #define DOF_STATUS_FREE 0 namespace ay = axis::yuzu; namespace ayfm = axis::yuzu::foundation::memory; namespace aydbc = axis::yuzu::domain::boundary_conditions; struct VariableConstraintData { const real *CurveDataPtr; real ScalingFactor; real ReleaseTime; }; __global__ void __launch_bounds__(AXIS_YUZU_MAX_THREADS_PER_BLOCK) UpdateConstraintOnGPUKernel(uint64 numThreads, uint64 startIndex, void *baseMemoryAddressOnGPU, real time, axis::yuzu::foundation::memory::RelativePointer vectorMaskPtr) { uint64 index = ay::GetThreadIndex(gridDim, blockIdx, blockDim, threadIdx, startIndex); if (!ay::IsActiveThread(index, numThreads)) return; aydbc::BoundaryConditionData bcData(baseMemoryAddressOnGPU, index, sizeof(VariableConstraintData)); real *bucket = bcData.GetOutputBucket(); VariableConstraintData *data = (VariableConstraintData *)bcData.GetCustomData(); real curveCurVal = *data->CurveDataPtr; real scaleFactor = data->ScalingFactor; real releaseTime = data->ReleaseTime; uint64 dofId = bcData.GetDofId(); *bucket = curveCurVal * scaleFactor; if (releaseTime >= 0 && time > releaseTime) { char *vectorMask = axis::yabsptr<char>(vectorMaskPtr); vectorMask[dofId] = DOF_STATUS_FREE; } } void axis::domain::boundary_conditions::UpdateConstraintOnGPU( uint64 numThreadsToUse, uint64 startIndex, void *baseMemoryAddressOnGPU, const axis::Dimension3D& gridDim, const axis::Dimension3D& blockDim, void * streamPtr, real time, axis::foundation::memory::RelativePointer vectorMaskPtr ) { dim3 grid, block; grid.x = gridDim.X; grid.y = gridDim.Y; grid.z = gridDim.Z; block.x = blockDim.X; block.y = blockDim.Y; block.z = blockDim.Z; UpdateConstraintOnGPUKernel<<<grid, block, 0, (cudaStream_t)streamPtr>>>( numThreadsToUse, startIndex, baseMemoryAddressOnGPU, time, reinterpret_cast<ayfm::RelativePointer&>(vectorMaskPtr)); }
e8ddb2fe3e5336c1fffeb3d92fec7b8e8bd87b3d.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <fstream> #include <iostream> #include <cmath> #include <map> #include <stdio.h> #include <stdlib.h> #include <string> #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/multiply.h> #include <cusp/blas.h> #include <cusp/detail/format_utils.h> #include <cusp/detail/host/convert.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cusparse/common.h> #include <cusparse/timer.h> #include "hipsparse.h" using std::endl; using std::cerr; using std::cout; // ----------------------------------------------------------------------------- // ----------------------------------------------------------------------------- #ifdef WIN32 # define ISNAN(A) (_isnan(A)) #else # define ISNAN(A) (isnan(A)) #endif enum TestColor {COLOR_NO = 0, COLOR_RED, COLOR_GREEN} ; class OutputItem { public: OutputItem(std::ostream &o): m_o(o), m_additional_item_count(19) {} int m_additional_item_count; template <typename T> void operator() (T item, TestColor c = COLOR_NO) { m_o << "<td style=\"border-style: inset;\">\n"; switch (c) { case COLOR_RED: m_o << "<p> <FONT COLOR=\"Red\">" << item << " </FONT> </p>\n"; break; case COLOR_GREEN: m_o << "<p> <FONT COLOR=\"Green\">" << item << " </FONT> </p>\n"; break; default: m_o << "<p> " << item << " </p>\n"; break; } m_o << "</td>\n"; } private: std::ostream &m_o; }; int main(int argc, char **argv) { if (argc < 2) { cerr << "Usage: ./example MATRIX_MARKET_FILE_NAME" << endl; return 1; } cusp::csr_matrix<int, double, cusp::device_memory> Ad_cusp; cusp::io::read_matrix_market_file(Ad_cusp, argv[1]); cusparse::CuSparseCsrMatrixD A(Ad_cusp.row_offsets, Ad_cusp.column_indices, Ad_cusp.values); cusparse::CuSparseCsrMatrixD Abak(Ad_cusp.row_offsets, Ad_cusp.column_indices, Ad_cusp.values); thrust::device_vector<double> x(A.m_n, 1.0); thrust::device_vector<double> y; thrust::device_vector<double> x_new(A.m_n); // Name of matrix OutputItem outputItem(cout); cout << "<tr valign=top>" << endl; // Name of matrix { std::string fileMat = argv[1]; int i; for (i = fileMat.size()-1; i>=0 && fileMat[i] != '/' && fileMat[i] != '\\'; i--); i++; fileMat = fileMat.substr(i); size_t j = fileMat.rfind(".mtx"); if (j != std::string::npos) outputItem( fileMat.substr(0, j)); else outputItem( fileMat); } // Dimension outputItem( A.m_n); //aNNZ outputItem( A.m_nnz); cusparse::GPUTimer local_timer; local_timer.Start(); cusolverStatus_t status = A.QRSolve(x, y); local_timer.Stop(); int code = status; // Error code if (status != CUSOLVER_STATUS_SUCCESS) { outputItem(code, COLOR_RED); outputItem(""); outputItem(""); cout << "</tr>" << endl; return 1; } else outputItem(int(0)); Abak.spmv(y, x_new); // The relative infinity norm of solution double nrm_target = cusp::blas::nrmmax(x); cusp::blas::axpy(x, x_new, (double)(-1)); double rel_err = fabs(cusp::blas::nrmmax(x_new))/ nrm_target; if (isnan(cusp::blas::nrm1(x_new))) outputItem("NaN", COLOR_RED); else if (rel_err >= 1) outputItem(rel_err, COLOR_RED); else outputItem(rel_err); outputItem( local_timer.getElapsed()); cout << "</tr>" << endl; return 0; }
e8ddb2fe3e5336c1fffeb3d92fec7b8e8bd87b3d.cu
#include <algorithm> #include <fstream> #include <iostream> #include <cmath> #include <map> #include <stdio.h> #include <stdlib.h> #include <string> #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/multiply.h> #include <cusp/blas.h> #include <cusp/detail/format_utils.h> #include <cusp/detail/host/convert.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cusparse/common.h> #include <cusparse/timer.h> #include "cusparse.h" using std::endl; using std::cerr; using std::cout; // ----------------------------------------------------------------------------- // ----------------------------------------------------------------------------- #ifdef WIN32 # define ISNAN(A) (_isnan(A)) #else # define ISNAN(A) (isnan(A)) #endif enum TestColor {COLOR_NO = 0, COLOR_RED, COLOR_GREEN} ; class OutputItem { public: OutputItem(std::ostream &o): m_o(o), m_additional_item_count(19) {} int m_additional_item_count; template <typename T> void operator() (T item, TestColor c = COLOR_NO) { m_o << "<td style=\"border-style: inset;\">\n"; switch (c) { case COLOR_RED: m_o << "<p> <FONT COLOR=\"Red\">" << item << " </FONT> </p>\n"; break; case COLOR_GREEN: m_o << "<p> <FONT COLOR=\"Green\">" << item << " </FONT> </p>\n"; break; default: m_o << "<p> " << item << " </p>\n"; break; } m_o << "</td>\n"; } private: std::ostream &m_o; }; int main(int argc, char **argv) { if (argc < 2) { cerr << "Usage: ./example MATRIX_MARKET_FILE_NAME" << endl; return 1; } cusp::csr_matrix<int, double, cusp::device_memory> Ad_cusp; cusp::io::read_matrix_market_file(Ad_cusp, argv[1]); cusparse::CuSparseCsrMatrixD A(Ad_cusp.row_offsets, Ad_cusp.column_indices, Ad_cusp.values); cusparse::CuSparseCsrMatrixD Abak(Ad_cusp.row_offsets, Ad_cusp.column_indices, Ad_cusp.values); thrust::device_vector<double> x(A.m_n, 1.0); thrust::device_vector<double> y; thrust::device_vector<double> x_new(A.m_n); // Name of matrix OutputItem outputItem(cout); cout << "<tr valign=top>" << endl; // Name of matrix { std::string fileMat = argv[1]; int i; for (i = fileMat.size()-1; i>=0 && fileMat[i] != '/' && fileMat[i] != '\\'; i--); i++; fileMat = fileMat.substr(i); size_t j = fileMat.rfind(".mtx"); if (j != std::string::npos) outputItem( fileMat.substr(0, j)); else outputItem( fileMat); } // Dimension outputItem( A.m_n); //aNNZ outputItem( A.m_nnz); cusparse::GPUTimer local_timer; local_timer.Start(); cusolverStatus_t status = A.QRSolve(x, y); local_timer.Stop(); int code = status; // Error code if (status != CUSOLVER_STATUS_SUCCESS) { outputItem(code, COLOR_RED); outputItem(""); outputItem(""); cout << "</tr>" << endl; return 1; } else outputItem(int(0)); Abak.spmv(y, x_new); // The relative infinity norm of solution double nrm_target = cusp::blas::nrmmax(x); cusp::blas::axpy(x, x_new, (double)(-1)); double rel_err = fabs(cusp::blas::nrmmax(x_new))/ nrm_target; if (isnan(cusp::blas::nrm1(x_new))) outputItem("NaN", COLOR_RED); else if (rel_err >= 1) outputItem(rel_err, COLOR_RED); else outputItem(rel_err); outputItem( local_timer.getElapsed()); cout << "</tr>" << endl; return 0; }
a49d3e8da2c2b7254deb2b0f5459a3744e88b979.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void addSingleThread(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; }
a49d3e8da2c2b7254deb2b0f5459a3744e88b979.cu
#include "includes.h" __global__ void addSingleThread(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; }
227dfcb06724692a8366e37e9fec1db64bbebe9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zgeqr2x_gpu-v4.cu normal z -> c, Fri Jul 18 17:34:13 2014 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 __global__ void magma_ctrmv_kernel2(const magmaFloatComplex *T, int ldt, magmaFloatComplex *v, magmaFloatComplex *y, magmaFloatComplex *tau); __global__ void magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv, magmaFloatComplex *c, magmaFloatComplex *dwork, magmaFloatComplex *tau); ////////////////////////////////////////////////////////////////////////////// __global__ void magma_cgemv_kernel1(int m, const magmaFloatComplex * __restrict__ V, int ldv, const magmaFloatComplex * __restrict__ c, magmaFloatComplex *dwork); __global__ void magma_cgemv_kernel2(int m, int n, const magmaFloatComplex * __restrict__ V, int ldv, const magmaFloatComplex * __restrict__ x, magmaFloatComplex *c); __global__ void magma_ctrmv_tkernel(magmaFloatComplex *T, int ldt, magmaFloatComplex *v, magmaFloatComplex *y); __global__ void magma_scnrm2_adjust_kernel(float *xnorm, magmaFloatComplex *c); /** Purpose ------- CGEQR2 computes a QR factorization of a complex m by n matrix A: A = Q * R. This expert routine requires two more arguments than the standard cgeqr2, namely, dT and ddA, explained below. The storage for A is also not as in the LAPACK's cgeqr2 routine (see below). The first is used to output the triangular n x n factor T of the block reflector used in the factorization. The second holds the diagonal nxn blocks of A, i.e., the diagonal submatrices of R. This routine implements the left looking QR. This version adds internal blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA COMPLEX array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the unitary matrix Q as a product of elementary reflectors (see Further Details). \n the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the unitary matrix Q as a product of elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] dtau COMPLEX array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] dT COMPLEX array, dimension N x N. Stores the triangular N x N factor T of the block reflector used in the factorization. The lower triangular part is 0. @param[out] ddA COMPLEX array, dimension N x N. Stores the elements of the upper N x N diagonal block of A. LAPACK stores this array in A. There are 0s below the diagonal. @param dwork (workspace) DOUBLE_PRECISION array, dimension (3 N) @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_cgeqrf_comp ********************************************************************/ extern "C" magma_int_t magma_cgeqr2x4_gpu(magma_int_t *m, magma_int_t *n, magmaFloatComplex *dA, magma_int_t *ldda, magmaFloatComplex *dtau, magmaFloatComplex *dT, magmaFloatComplex *ddA, float *dwork, magma_int_t *info, magma_queue_t stream) { #define da_ref(a_1,a_2) ( dA+(a_2)*(*ldda) + (a_1)) #define dt_ref(a_1,a_2) ( dT+(a_2)*(k) + (a_1)) #define BS 32 magma_int_t i, k; float *dnorm = (float *)dwork; magmaFloatComplex *work = (magmaFloatComplex *)(dwork+2*(*n)); magma_queue_t cstream; magmablasGetKernelStream(&cstream); magmablasSetKernelStream(stream); *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ldda < max(1,*m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } /* Compute the norms of the trailing columns */ k = min(*m,*n); magmablas_scnrm2_cols(*m, k, da_ref(0,0), *ldda, dnorm); for (magma_int_t b=0; b < k; b += BS) { for (i = b; i < min(k, b+BS); ++i) { /* Apply H' to A(:,i) from the left */ if ( i-b > 0){ hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(i-1), dim3(BLOCK_SIZE), 0, magma_stream , *m-i+1, da_ref(i-1,0), *ldda, da_ref(i-1, i-1), work, dtau+i-1); hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(i-1), dim3(i-1), 0, magma_stream , dt_ref(0,0), k, work, dt_ref(0,i-1), dtau+i-1); /* dwork = V' c */ hipLaunchKernelGGL(( magma_cgemv_kernel1), dim3(i-b), dim3(BLOCK_SIZE), 0, magma_stream , *m-b, da_ref(b, b), *ldda, da_ref(b,i), work); /* dwork = T' work */ hipLaunchKernelGGL(( magma_ctrmv_tkernel), dim3(i-b), dim3(i-b), 0, magma_stream , dt_ref(b,b), k, work, work+i-b); /* c = c - V work */ dim3 blocks3( (*m-b + BLOCK_SIZE-1) / BLOCK_SIZE ); dim3 threads3( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_cgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , *m-b, i-b, da_ref(b,b), *ldda, work+i-b, da_ref(b, i)); } /* Adjust the dnorm[i] to hold the norm of A(i:m,i) */ if ( i > 0 ) hipLaunchKernelGGL(( magma_scnrm2_adjust_kernel), dim3(1), dim3(i), 0, magma_stream , dnorm+i, da_ref(0, i)); /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) 1. 1 is not yet put on the diagonal of A 2. Elements above the diagonal are copied in ddA and the ones in A are set to zero 3. update T */ magma_clarfgx_gpu(*m-i, da_ref(i, i), da_ref(min(i+1,*m),i), dtau+i, dnorm+i, ddA + i + i*(*n), i); if (i==0){ magmaFloatComplex tt = MAGMA_C_ONE; magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, dt_ref(0,0), 1); magma_csetmatrix(1,1, &tt,1, da_ref(i, i),1); } /* else { // Compute the i-th column of T. // Set da_ref(i, i) = 1. magma_cgemv_kernel3<<< i, BLOCK_SIZE, 0, magma_stream >>>( *m-i, da_ref(i,0), *ldda, da_ref(i, i), work, dtau+i); magma_ctrmv_kernel2<<< i, i, 0, magma_stream >>>( dt_ref(0,0), k, work, dt_ref(0,i), dtau+i); } */ } hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(i-1), dim3(BLOCK_SIZE), 0, magma_stream , *m-i+1, da_ref(i-1,0), *ldda, da_ref(i-1, i-1), work, dtau+i-1); hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(i-1), dim3(i-1), 0, magma_stream , dt_ref(0,0), k, work, dt_ref(0,i-1), dtau+i-1); /* Apply the transformations to the trailing matrix. */ //magma_clarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise, magma_clarfb2_gpu( *m-b, k-i, BS, da_ref(b, b), *ldda, dT+b+b*k, k, da_ref(b, i), *ldda, work, k-i); } magmablasSetKernelStream(cstream); return *info; } /* magma_cgeqr2 */
227dfcb06724692a8366e37e9fec1db64bbebe9e.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zgeqr2x_gpu-v4.cu normal z -> c, Fri Jul 18 17:34:13 2014 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 __global__ void magma_ctrmv_kernel2(const magmaFloatComplex *T, int ldt, magmaFloatComplex *v, magmaFloatComplex *y, magmaFloatComplex *tau); __global__ void magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv, magmaFloatComplex *c, magmaFloatComplex *dwork, magmaFloatComplex *tau); ////////////////////////////////////////////////////////////////////////////// __global__ void magma_cgemv_kernel1(int m, const magmaFloatComplex * __restrict__ V, int ldv, const magmaFloatComplex * __restrict__ c, magmaFloatComplex *dwork); __global__ void magma_cgemv_kernel2(int m, int n, const magmaFloatComplex * __restrict__ V, int ldv, const magmaFloatComplex * __restrict__ x, magmaFloatComplex *c); __global__ void magma_ctrmv_tkernel(magmaFloatComplex *T, int ldt, magmaFloatComplex *v, magmaFloatComplex *y); __global__ void magma_scnrm2_adjust_kernel(float *xnorm, magmaFloatComplex *c); /** Purpose ------- CGEQR2 computes a QR factorization of a complex m by n matrix A: A = Q * R. This expert routine requires two more arguments than the standard cgeqr2, namely, dT and ddA, explained below. The storage for A is also not as in the LAPACK's cgeqr2 routine (see below). The first is used to output the triangular n x n factor T of the block reflector used in the factorization. The second holds the diagonal nxn blocks of A, i.e., the diagonal submatrices of R. This routine implements the left looking QR. This version adds internal blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA COMPLEX array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the unitary matrix Q as a product of elementary reflectors (see Further Details). \n the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the unitary matrix Q as a product of elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] dtau COMPLEX array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] dT COMPLEX array, dimension N x N. Stores the triangular N x N factor T of the block reflector used in the factorization. The lower triangular part is 0. @param[out] ddA COMPLEX array, dimension N x N. Stores the elements of the upper N x N diagonal block of A. LAPACK stores this array in A. There are 0s below the diagonal. @param dwork (workspace) DOUBLE_PRECISION array, dimension (3 N) @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_cgeqrf_comp ********************************************************************/ extern "C" magma_int_t magma_cgeqr2x4_gpu(magma_int_t *m, magma_int_t *n, magmaFloatComplex *dA, magma_int_t *ldda, magmaFloatComplex *dtau, magmaFloatComplex *dT, magmaFloatComplex *ddA, float *dwork, magma_int_t *info, magma_queue_t stream) { #define da_ref(a_1,a_2) ( dA+(a_2)*(*ldda) + (a_1)) #define dt_ref(a_1,a_2) ( dT+(a_2)*(k) + (a_1)) #define BS 32 magma_int_t i, k; float *dnorm = (float *)dwork; magmaFloatComplex *work = (magmaFloatComplex *)(dwork+2*(*n)); magma_queue_t cstream; magmablasGetKernelStream(&cstream); magmablasSetKernelStream(stream); *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ldda < max(1,*m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } /* Compute the norms of the trailing columns */ k = min(*m,*n); magmablas_scnrm2_cols(*m, k, da_ref(0,0), *ldda, dnorm); for (magma_int_t b=0; b < k; b += BS) { for (i = b; i < min(k, b+BS); ++i) { /* Apply H' to A(:,i) from the left */ if ( i-b > 0){ magma_cgemv_kernel3<<< i-1, BLOCK_SIZE, 0, magma_stream >>>( *m-i+1, da_ref(i-1,0), *ldda, da_ref(i-1, i-1), work, dtau+i-1); magma_ctrmv_kernel2<<< i-1, i-1, 0, magma_stream >>>( dt_ref(0,0), k, work, dt_ref(0,i-1), dtau+i-1); /* dwork = V' c */ magma_cgemv_kernel1<<< i-b, BLOCK_SIZE, 0, magma_stream >>>(*m-b, da_ref(b, b), *ldda, da_ref(b,i), work); /* dwork = T' work */ magma_ctrmv_tkernel<<< i-b, i-b, 0, magma_stream >>>(dt_ref(b,b), k, work, work+i-b); /* c = c - V work */ dim3 blocks3( (*m-b + BLOCK_SIZE-1) / BLOCK_SIZE ); dim3 threads3( BLOCK_SIZE ); magma_cgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>>(*m-b, i-b, da_ref(b,b), *ldda, work+i-b, da_ref(b, i)); } /* Adjust the dnorm[i] to hold the norm of A(i:m,i) */ if ( i > 0 ) magma_scnrm2_adjust_kernel<<< 1, i, 0, magma_stream >>> (dnorm+i, da_ref(0, i)); /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) 1. 1 is not yet put on the diagonal of A 2. Elements above the diagonal are copied in ddA and the ones in A are set to zero 3. update T */ magma_clarfgx_gpu(*m-i, da_ref(i, i), da_ref(min(i+1,*m),i), dtau+i, dnorm+i, ddA + i + i*(*n), i); if (i==0){ magmaFloatComplex tt = MAGMA_C_ONE; magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, dt_ref(0,0), 1); magma_csetmatrix(1,1, &tt,1, da_ref(i, i),1); } /* else { // Compute the i-th column of T. // Set da_ref(i, i) = 1. magma_cgemv_kernel3<<< i, BLOCK_SIZE, 0, magma_stream >>>( *m-i, da_ref(i,0), *ldda, da_ref(i, i), work, dtau+i); magma_ctrmv_kernel2<<< i, i, 0, magma_stream >>>( dt_ref(0,0), k, work, dt_ref(0,i), dtau+i); } */ } magma_cgemv_kernel3<<< i-1, BLOCK_SIZE, 0, magma_stream >>>( *m-i+1, da_ref(i-1,0), *ldda, da_ref(i-1, i-1), work, dtau+i-1); magma_ctrmv_kernel2<<< i-1, i-1, 0, magma_stream >>>( dt_ref(0,0), k, work, dt_ref(0,i-1), dtau+i-1); /* Apply the transformations to the trailing matrix. */ //magma_clarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise, magma_clarfb2_gpu( *m-b, k-i, BS, da_ref(b, b), *ldda, dT+b+b*k, k, da_ref(b, i), *ldda, work, k-i); } magmablasSetKernelStream(cstream); return *info; } /* magma_cgeqr2 */
ed36a17e7256e2e8932f1adcf753fd494cf3cea2.hip
// !!! This is a file automatically generated by hipify!!! #include "check.h" #include "Devices.h" #include "interpolator.h" #include "process.h" #include <iostream> #include <memory> #include <mutex> using namespace NAMESPACE; using namespace std; Devices::Devices() { MPI_Process* process; MPI_ERR_CHECK(MPI_Process_get(&process)); // Check for available CUDA GPU(s) int ngpus = 0; CUDA_ERR_CHECK(hipGetLastError()); hipError_t hipError_t = hipGetDeviceCount(&ngpus); if (hipError_t == hipErrorNoDevice) { if (process->isMaster()) cout << hipGetErrorString(hipError_t) << endl; ngpus = 0; } else { CUDA_ERR_CHECK(hipError_t); CUDA_ERR_CHECK(hipDeviceReset()); } devices.resize(ngpus); for (int igpu = 0; igpu < ngpus; igpu++) { struct hipDeviceProp_t props; CUDA_ERR_CHECK(hipGetDeviceProperties(&props, igpu)); int id[2]; id[0] = props.pciBusID; id[1] = props.pciDeviceID; devices[igpu].id = *(long long*)id; devices[igpu].warpSize = props.warpSize; devices[igpu].cc = props.major * 10 + props.minor; devices[igpu].sm = SM(props.multiProcessorCount, props.sharedMemPerMultiprocessor); } } int Devices::getCount() { return devices.size(); } const Device* Devices::getDevice(int index) const { return &devices[index]; } Device* Devices::tryAcquire() { Device* device = NULL; for (int i = 0; i < devices.size(); i++) { { static std::mutex mutex; std::lock_guard<std::mutex> lock(mutex); if (devices[i].available && !device) { devices[i].available = 0; device = &devices[i]; } } if (device) break; } return device; } void Devices::release(Device* device) { if (!device) return; { static std::mutex mutex; std::lock_guard<std::mutex> lock(mutex); device->available++; } } namespace NAMESPACE { unique_ptr<Devices> devices; NAMESPACE::Device* tryAcquireDevice() { if (!devices) devices.reset(new Devices()); return devices->tryAcquire(); } void releaseDevice(NAMESPACE::Device* device) { if (!devices) devices.reset(new Devices()); devices->release(device); } } extern "C" Device* tryAcquireDevice() { return NAMESPACE::tryAcquireDevice(); } extern "C" void releaseDevice(Device* device) { NAMESPACE::releaseDevice(device); }
ed36a17e7256e2e8932f1adcf753fd494cf3cea2.cu
#include "check.h" #include "Devices.h" #include "interpolator.h" #include "process.h" #include <iostream> #include <memory> #include <mutex> using namespace NAMESPACE; using namespace std; Devices::Devices() { MPI_Process* process; MPI_ERR_CHECK(MPI_Process_get(&process)); // Check for available CUDA GPU(s) int ngpus = 0; CUDA_ERR_CHECK(cudaGetLastError()); cudaError_t cudaError = cudaGetDeviceCount(&ngpus); if (cudaError == cudaErrorNoDevice) { if (process->isMaster()) cout << cudaGetErrorString(cudaError) << endl; ngpus = 0; } else { CUDA_ERR_CHECK(cudaError); CUDA_ERR_CHECK(cudaDeviceReset()); } devices.resize(ngpus); for (int igpu = 0; igpu < ngpus; igpu++) { struct cudaDeviceProp props; CUDA_ERR_CHECK(cudaGetDeviceProperties(&props, igpu)); int id[2]; id[0] = props.pciBusID; id[1] = props.pciDeviceID; devices[igpu].id = *(long long*)id; devices[igpu].warpSize = props.warpSize; devices[igpu].cc = props.major * 10 + props.minor; devices[igpu].sm = SM(props.multiProcessorCount, props.sharedMemPerMultiprocessor); } } int Devices::getCount() { return devices.size(); } const Device* Devices::getDevice(int index) const { return &devices[index]; } Device* Devices::tryAcquire() { Device* device = NULL; for (int i = 0; i < devices.size(); i++) { { static std::mutex mutex; std::lock_guard<std::mutex> lock(mutex); if (devices[i].available && !device) { devices[i].available = 0; device = &devices[i]; } } if (device) break; } return device; } void Devices::release(Device* device) { if (!device) return; { static std::mutex mutex; std::lock_guard<std::mutex> lock(mutex); device->available++; } } namespace NAMESPACE { unique_ptr<Devices> devices; NAMESPACE::Device* tryAcquireDevice() { if (!devices) devices.reset(new Devices()); return devices->tryAcquire(); } void releaseDevice(NAMESPACE::Device* device) { if (!devices) devices.reset(new Devices()); devices->release(device); } } extern "C" Device* tryAcquireDevice() { return NAMESPACE::tryAcquireDevice(); } extern "C" void releaseDevice(Device* device) { NAMESPACE::releaseDevice(device); }
bb82e2173231c647f8016330918db9be5b4003cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <assert.h> #include <iostream> #include <math.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_cooperative_groups.h> // https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define GPU_ERRCHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) { exit(code); }; } } typedef float real; // Precision of float numbers using namespace cooperative_groups; #define MAX_STRING 100 #define EXP_TABLE_SIZE 1000 #define MAX_EXP 6 // HACK: THIS IS 1000 #define MAX_SENTENCE_LENGTH 128 #define MAX_CODE_LENGTH 40 const int vocab_hash_size = 30000000; // Maximum 30 * 0.7 = 21M words in the vocabulary struct vocab_word { long long cn; int *point; char *word, *code, codelen; }; struct Vec { int len; real *v; public: inline void freemem() { free(v); } // return the allocation size needed for a vector of dimension len static long int alloc_size_for_dim(int d) { return d * sizeof(real); } inline void alloc(int len) { this->len = len; // int a = hipMalloc((void **)&v, 128 * (long long)len * sizeof(real)); int a = posix_memalign((void **)&v, 128, (long long)len * sizeof(real)); assert(v != nullptr && "memory allocation failed"); (void)a; } inline void alloczero(int len) { this->len = len; this->v = (real *)calloc(len, sizeof(real)); } inline void fillzero() const { for (int i = 0; i < len; ++i) { v[i] = 0; } } }; char train_file[MAX_STRING], output_file[MAX_STRING]; char save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING]; struct vocab_word *vocab; int binary = 0, cbow = 1, debug_mode = 2, window = 5, min_count = 5, num_threads = 12, min_reduce = 1; int *vocab_hash; long long vocab_max_size = 1000, vocab_size = 0, layer1_size = 100; long long train_words = 0, word_count_actual = 0, iter = 5, file_size = 0, classes = 0; real alpha = 0.025, starting_alpha, sample = 1e-3; Vec *syn0, *syn1, *syn1neg; real *quadform; real *dev_syn0, *dev_syn1neg, *dev_quadform; real *dev_gsyn0, *dev_gsyn1neg; // mask for whether gradient has been updated bool *dev_mask_syn0; bool *dev_mask_syn1neg; real *dev_dots; real *dev_dots_scratch; const int NSAMPLES_PER_KERNEL_LAUNCH = 1e5; // const int NSAMPLES_PER_KERNEL_LAUNCH = 1e5; int *dev_labels; char *dev_codes; unsigned long long *dev_focuses, *dev_ctxes; unsigned long long *dev_uniq_focuses, *dev_uniq_ctxes; real *expTable; clock_t start; int hs = 0, negative = 5; const int table_size = 1e8; int *table; real *dev_total_loss; unsigned long long calcBlockSize(unsigned long long total, unsigned long long thread) { if (total / thread == 0) return 1; return (total / thread) + ((total % thread) != 0); } __global__ void zeroRealKernel(const unsigned long long size, real *r) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= size) return; r[x] = 0; } void InitUnigramTable() { int a, i; double train_words_pow = 0; double d1, power = 0.75; table = (int *)malloc(table_size * sizeof(int)); for (a = 0; a < vocab_size; a++) train_words_pow += pow(vocab[a].cn, power); i = 0; d1 = pow(vocab[i].cn, power) / train_words_pow; for (a = 0; a < table_size; a++) { table[a] = i; if (a / (double)table_size > d1) { i++; d1 += pow(vocab[i].cn, power) / train_words_pow; } if (i >= vocab_size) i = vocab_size - 1; } } // Reads a single word from a file, assuming space + tab + EOL to be word // boundaries void ReadWord(char *word, FILE *fin, char *eof) { int a = 0, ch; while (1) { ch = fgetc_unlocked(fin); if (ch == EOF) { *eof = 1; break; } if (ch == 13) continue; if ((ch == ' ') || (ch == '\t') || (ch == '\n')) { if (a > 0) { if (ch == '\n') ungetc(ch, fin); break; } if (ch == '\n') { strcpy(word, (char *)"</s>"); return; } else continue; } word[a] = ch; a++; if (a >= MAX_STRING - 1) a--; // Truncate too long words } word[a] = 0; } // Returns hash value of a word int GetWordHash(char *word) { unsigned long long a, hash = 0; for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a]; hash = hash % vocab_hash_size; return hash; } // Returns position of a word in the vocabulary; if the word is not found, // returns -1 int SearchVocab(char *word) { unsigned int hash = GetWordHash(word); while (1) { if (vocab_hash[hash] == -1) return -1; if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash]; hash = (hash + 1) % vocab_hash_size; } assert(false && "unreachable"); // return -1; } // Reads a word and returns its index in the vocabulary int ReadWordIndex(FILE *fin, char *eof) { char word[MAX_STRING], eof_l = 0; ReadWord(word, fin, &eof_l); if (eof_l) { *eof = 1; return -1; } return SearchVocab(word); } // Adds a word to the vocabulary int AddWordToVocab(char *word) { unsigned int hash, length = strlen(word) + 1; if (length > MAX_STRING) length = MAX_STRING; vocab[vocab_size].word = (char *)calloc(length, sizeof(char)); strcpy(vocab[vocab_size].word, word); vocab[vocab_size].cn = 0; vocab_size++; // Reallocate memory if needed if (vocab_size + 2 >= vocab_max_size) { vocab_max_size += 1000; vocab = (struct vocab_word *)realloc( vocab, vocab_max_size * sizeof(struct vocab_word)); } hash = GetWordHash(word); while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size; vocab_hash[hash] = vocab_size - 1; return vocab_size - 1; } // Used later for sorting by word counts int VocabCompare(const void *a, const void *b) { long long l = ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn; if (l > 0) return 1; if (l < 0) return -1; return 0; } // Sorts the vocabulary by frequency using word counts void SortVocab() { int a, size; unsigned int hash; // Sort the vocabulary and keep </s> at the first position qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare); for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; size = vocab_size; train_words = 0; for (a = 0; a < size; a++) { // Words occuring less than min_count times will be discarded from // the vocab if ((vocab[a].cn < min_count) && (a != 0)) { vocab_size--; free(vocab[a].word); } else { // Hash will be re-computed, as after the sorting it is not // actual hash = GetWordHash(vocab[a].word); while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size; vocab_hash[hash] = a; train_words += vocab[a].cn; } } vocab = (struct vocab_word *)realloc( vocab, (vocab_size + 1) * sizeof(struct vocab_word)); // Allocate memory for the binary tree construction for (a = 0; a < vocab_size; a++) { vocab[a].code = (char *)calloc(MAX_CODE_LENGTH, sizeof(char)); vocab[a].point = (int *)calloc(MAX_CODE_LENGTH, sizeof(int)); } } // Reduces the vocabulary by removing infrequent tokens void ReduceVocab() { int a, b = 0; unsigned int hash; for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) { vocab[b].cn = vocab[a].cn; vocab[b].word = vocab[a].word; b++; } else free(vocab[a].word); vocab_size = b; for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; for (a = 0; a < vocab_size; a++) { // Hash will be re-computed, as it is not actual hash = GetWordHash(vocab[a].word); while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size; vocab_hash[hash] = a; } fflush(stdout); min_reduce++; } // Create binary Huffman tree using the word counts // Frequent words will have short uniqe binary codes void CreateBinaryTree() { long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH]; char code[MAX_CODE_LENGTH]; long long *count = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long)); long long *binary = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long)); long long *parent_node = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long)); for (a = 0; a < vocab_size; a++) count[a] = vocab[a].cn; for (a = vocab_size; a < vocab_size * 2; a++) count[a] = 1e15; pos1 = vocab_size - 1; pos2 = vocab_size; // Following algorithm constructs the Huffman tree by adding one node at // a time for (a = 0; a < vocab_size - 1; a++) { // First, find two smallest nodes 'min1, min2' if (pos1 >= 0) { if (count[pos1] < count[pos2]) { min1i = pos1; pos1--; } else { min1i = pos2; pos2++; } } else { min1i = pos2; pos2++; } if (pos1 >= 0) { if (count[pos1] < count[pos2]) { min2i = pos1; pos1--; } else { min2i = pos2; pos2++; } } else { min2i = pos2; pos2++; } count[vocab_size + a] = count[min1i] + count[min2i]; parent_node[min1i] = vocab_size + a; parent_node[min2i] = vocab_size + a; binary[min2i] = 1; } // Now assign binary code to each vocabulary word for (a = 0; a < vocab_size; a++) { b = a; i = 0; while (1) { code[i] = binary[b]; point[i] = b; i++; b = parent_node[b]; if (b == vocab_size * 2 - 2) break; } vocab[a].codelen = i; vocab[a].point[0] = vocab_size - 2; for (b = 0; b < i; b++) { vocab[a].code[i - b - 1] = code[b]; vocab[a].point[i - b] = point[b] - vocab_size; } } free(count); free(binary); free(parent_node); } void LearnVocabFromTrainFile() { char word[MAX_STRING], eof = 0; FILE *fin; long long a, i, wc = 0; for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; fin = fopen(train_file, "rb"); if (fin == NULL) { printf("ERROR: training data file not found!\n"); exit(1); } vocab_size = 0; AddWordToVocab((char *)"</s>"); while (1) { ReadWord(word, fin, &eof); if (eof) break; train_words++; wc++; if ((debug_mode > 1) && (wc >= 1000000)) { printf("%lldM%c", train_words / 1000000, 13); fflush(stdout); wc = 0; } i = SearchVocab(word); if (i == -1) { a = AddWordToVocab(word); vocab[a].cn = 1; } else vocab[i].cn++; if (vocab_size > vocab_hash_size * 0.7) ReduceVocab(); } SortVocab(); if (debug_mode > 0) { printf("Vocab size: %lld\n", vocab_size); printf("Words in train file: %lld\n", train_words); } file_size = ftell(fin); fclose(fin); } void SaveVocab() { long long i; FILE *fo = fopen(save_vocab_file, "wb"); for (i = 0; i < vocab_size; i++) fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn); fclose(fo); } void ReadVocab() { long long a, i = 0; char c, eof = 0; char word[MAX_STRING]; FILE *fin = fopen(read_vocab_file, "rb"); if (fin == NULL) { printf("Vocabulary file not found\n"); exit(1); } for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; vocab_size = 0; while (1) { ReadWord(word, fin, &eof); if (eof) break; a = AddWordToVocab(word); fscanf(fin, "%lld%c", &vocab[a].cn, &c); i++; } SortVocab(); if (debug_mode > 0) { printf("Vocab size: %lld\n", vocab_size); printf("Words in train file: %lld\n", train_words); } fin = fopen(train_file, "rb"); if (fin == NULL) { printf("ERROR: training data file not found!\n"); exit(1); } fseek(fin, 0, SEEK_END); file_size = ftell(fin); fclose(fin); } void InitNet() { long long a, b; unsigned long long next_random = 1; a = posix_memalign((void **)&syn0, 128, (long long)vocab_size * sizeof(Vec)); if (syn0 == NULL) { printf("%d: Memory allocation failed\n", __LINE__); exit(1); } GPU_ERRCHECK(hipMalloc((void **)&dev_syn0, (long long) vocab_size * layer1_size * sizeof(real))); GPU_ERRCHECK(hipMalloc((void **)&dev_gsyn0, (long long) vocab_size * layer1_size * sizeof(real))); hipLaunchKernelGGL(( zeroRealKernel), dim3(dim3(calcBlockSize(vocab_size * layer1_size, 512))), dim3(dim3(512)), 0, 0, (long long)vocab_size * layer1_size, dev_gsyn0); printf("allocating syn0..."); for (a = 0; a < vocab_size; ++a) { new (Vec)(syn0[a]); syn0[a].alloc(layer1_size); for (b = 0; b < layer1_size; b++) { next_random = next_random * (unsigned long long)25214903917 + 11; syn0[a].v[b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (layer1_size); } // copy vector to host hipMemcpy(dev_syn0 + layer1_size * a, syn0[a].v, layer1_size * sizeof(real), hipMemcpyHostToDevice); } printf("%callocated syn0.\t\t\t\t\n", 13); // if (hs) { // a = posix_memalign((void **)&syn1, 128, // (long long)vocab_size * layer1_size * // sizeof(real)); // if (syn1 == NULL) { // printf("Memory allocation failed\n"); // exit(1); // } // for (a = 0; a < vocab_size; a++) // for (b = 0; b < layer1_size; b++) syn1[a * layer1_size + b] = 0; // } printf("allocating syn1neg..."); GPU_ERRCHECK(hipMalloc((void **)&dev_syn1neg, (long long) vocab_size * layer1_size * sizeof(real))); if (negative > 0) { a = posix_memalign((void **)&syn1neg, 128, (long long)vocab_size * sizeof(Vec)); if (syn1neg == NULL) { printf("%d: Memory allocation failed\n", __LINE__); exit(1); } printf("%d: Randomly initializing syn1neg...", __LINE__); for (a = 0; a < vocab_size; a++) { new (Vec)(syn1neg[a]); syn1neg[a].alloc(layer1_size); // for (b = 0; b < layer1_size; b++) syn1neg[a].v[b] = 0; for (b = 0; b < layer1_size; b++) { next_random = next_random * (unsigned long long)25214903917 + 11; syn1neg[a].v[b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (layer1_size); } // copy vector to device hipMemcpy(dev_syn1neg + layer1_size * a, syn1neg[a].v, layer1_size * sizeof(real), hipMemcpyHostToDevice); } } GPU_ERRCHECK(hipMalloc((void **)&dev_gsyn1neg, (long long) vocab_size * layer1_size * sizeof(real))); hipLaunchKernelGGL(( zeroRealKernel), dim3(dim3(calcBlockSize((long long)vocab_size * layer1_size, 512))), dim3(dim3(512)), 0, 0, (long long)vocab_size * layer1_size, dev_gsyn1neg); printf("%callocated syn1neg.\t\t\t\t\n", 13); a = posix_memalign((void **)&quadform, 128, (long long)layer1_size * layer1_size * sizeof(real)); if (quadform == NULL) { printf("%d: Memory allocation failed\n", __LINE__); exit(1); } for(int i = 0; i < layer1_size; ++i){ for(int j = 0; j < layer1_size; ++j) { // dot product quadform[i * layer1_size + j] = i == j ? 1 : 0; } } hipMalloc((void **)&dev_quadform, (long long) layer1_size * layer1_size * sizeof(real)); hipMemcpy(dev_quadform, quadform, layer1_size * layer1_size * sizeof(real), hipMemcpyHostToDevice); hipMalloc((void **)&dev_dots, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(real)); hipMalloc((void **)&dev_labels, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(int)); hipMalloc((void **)&dev_codes, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(char)); hipMalloc((void **)&dev_focuses, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(unsigned long long)); hipMalloc((void **)&dev_ctxes, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(unsigned long long)); hipMalloc((void **)&dev_uniq_focuses, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(unsigned long long)); hipMalloc((void **)&dev_uniq_ctxes, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(unsigned long long)); hipMalloc((void **)&dev_total_loss, (long long) sizeof(real)); hipMalloc((void **)&dev_mask_syn0, (long long) vocab_size * sizeof(bool)); hipMalloc((void **)&dev_mask_syn1neg, (long long) vocab_size * sizeof(bool)); hipMalloc((void **)&dev_dots_scratch, (long long) NSAMPLES_PER_KERNEL_LAUNCH * layer1_size * sizeof(real)); CreateBinaryTree(); } inline real sigmoid(real x) { // we are trying to calculate sigmoid(127) if (x > 5) { return 1; } if (x < -5) { return 0; } real exp = powf(2, x); return exp / (1 + exp); } // location y contains lensq of ixs[y] __global__ void lensqKernel(const int size, const int nsamples, const real * __restrict__ vecs, // SIZE x NSAMPLES real * __restrict__ lensqs, // NSAMPLES real * __restrict__ dotsScratch, // dotScratch: NSAMPLES_PER_KERNEL_LAUNCH * SIZE const unsigned long long *ixs // NSAMPLES ) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) return; const real dot = vecs[ixs[y] * size + x] * vecs[ixs[y]* size + x]; atomicAdd(&lensqs[y], dot); // dotsScratch[y*size + x] = dot; // int partition = size / 2; // while (partition > 0) { // if (x < partition) { // __syncthreads(); // dotsScratch[y*size+x] += dotsScratch[y*size+partition+x]; // } // partition = partition / 2; // } // __syncthreads(); // if (x == 0) { // // atomicAdd(&lensqs[y], dotsScratch[y * size + 0]); // lensqs[y] = dotsScratch[y * size]; // } } __global__ void normalizeVecKernel(const int size, const int nsamples, real * __restrict__ vecs, // SIZE x NSAMPLES const real * __restrict__ lensqs, // NSAMPLES const unsigned long long *ixs //NSAMPLES ) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) return; vecs[ixs[y]*size+x] /= sqrt(lensqs[y]); } __global__ void dotsKernel(const int size, const int nsamples, const real * __restrict__ syn0, // LAYER1_SIZE * VOCAB_SIZE const real * __restrict__ syn1neg, // LAYER1_SIZE * VOCAB_SIZE __restrict__ real *dots, // dots: [y] NSAMPLES_PER_KERNEL_LAUNCH __restrict__ real *dotsScratch, // dotScratch: NSAMPLES_PER_KERNEL_LAUNCH * LAYER1_SIZE const unsigned long long *focuses, // NSAMPLER_PER_KERNEL_LAUNCH const unsigned long long *ctxes) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) return; // dot product of (aT Q b)_xy for sample z const real dot = syn0[focuses[y] * size + x] * syn1neg[ctxes[y] * size + x]; atomicAdd(&dots[y], dot); // dotsScratch[y*size + x] = dot; // unsigned long long curix = x; // int partition = size / 2; // while (partition > 0) { // if (curix < partition) { // __syncthreads(); // atomicAdd(&dotsScratch[y*size+curix], dotsScratch[y*size+partition+curix]); // } // partition = partition / 2; // } // __syncthreads(); // if (curix == 0) { // // atomicAdd(&dots[y], dotsScratch[y * size + 0]); // dots[y] = dotsScratch[y * size]; // } } __device__ real sigmoidGPU(real x) { // we are trying to calculate sigmoid(127) // if (x > 5) { return 1; } // if (x < -5) { return 0; } return tanh(x); // real e = powf(2, x); // return e / (1 + e); } __device__ real gradSigmoidGPU(real x) { return 1 - tanh(x) * tanh(x); // return sigmoidGPU(x) * (1 - sigmoidGPU(x)); } #define FULL_MASK 0xffffffff __global__ void HSGradSyn0(const int size, const int nsamples, const real *__restrict__ dots, const char *__restrict__ codes, real *gsyn0, const real * __restrict__ syn1neg, const real alpha, const unsigned long long *__restrict__ focuses, const unsigned long long *__restrict__ ctxes) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) { return; } if (dots[y] > 5 || dots[y] < -5) {return; } // error const real err = 1 - codes[y] - sigmoidGPU(dots[y]); const real g = err * alpha * gradSigmoidGPU(dots[y]); // all threads that write into the same array index atomicAdd(&gsyn0[focuses[y] * size + x], g*syn1neg[ctxes[y]*size + x]); // atomicAdd(&syn1neg[ctxes[y] * size + x], g*syn0[focuses[y] * size + x]); } __global__ void HSGradSyn1Neg(const int size, const int nsamples, const real * __restrict__ dots, const char * __restrict__ codes, const real * __restrict__ syn0, __restrict__ real *syn1neg, const real alpha, const unsigned long long *__restrict__ focuses, const unsigned long long *__restrict__ ctxes) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) { return; } if (dots[y] > 5 || dots[y] < -5) { return; } // error const real err = 1 - codes[y] - sigmoidGPU(dots[y]); const real g = err * alpha * gradSigmoidGPU(dots[y]); // all threads that write into the same array index // atomicAdd(&gsyn0[focuses[y] * size + x], g*syn1neg[ctxes[y]*size + x]); atomicAdd(&syn1neg[ctxes[y] * size + x], g*syn0[focuses[y] * size + x]); } // 2D kernel: layer1_size x nsamples __global__ void backpropGradIndirect(const int size, const int nseen, __restrict__ real *vec, __restrict__ real *grad, const unsigned long long *seen) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; // const int z = blockIdx.z * blockDim.z + threadIdx.z; if (x >= size || y >= nseen) { return; } // atomicAdd(&vec[seen[y]*size + x], grad[seen[y] * size + x]); vec[seen[y]*size + x] += grad[seen[y] * size + x]; grad[seen[y] * size + x] = 0; } const int TX = 128, TY = 8; void runHSKernel(int nsamples, unsigned long long *focuses, unsigned long long *ctxes, char *codes, const unsigned long long num_uniq_focuses, const unsigned long long *uniq_focuses, const unsigned long long num_uniq_ctxes, const unsigned long long *uniq_ctxes) { // printf("running HS kernel...\n"); // zeroRealKernel<<<dim3(calcBlockSize(vocab_size * layer1_size, 1024)), dim3(1024)>>>(vocab_size * layer1_size, dev_gsyn0); // zeroRealKernel<<<dim3(calcBlockSize(vocab_size * layer1_size, 1024)), dim3(1024)>>>(vocab_size * layer1_size, dev_gsyn1neg); hipMemcpy(dev_focuses, focuses, nsamples * sizeof(unsigned long long), hipMemcpyHostToDevice); hipMemcpy(dev_ctxes, ctxes, nsamples * sizeof(unsigned long long), hipMemcpyHostToDevice); hipMemcpy(dev_codes, codes, nsamples * sizeof(char), hipMemcpyHostToDevice); hipMemcpy(dev_uniq_focuses, uniq_focuses, num_uniq_focuses * sizeof(unsigned long long), hipMemcpyHostToDevice); hipMemcpy(dev_uniq_ctxes, uniq_ctxes, num_uniq_ctxes * sizeof(unsigned long long), hipMemcpyHostToDevice); dim3 threadDims3(TX, TY); dim3 blockDims3(calcBlockSize(layer1_size, TX), calcBlockSize(nsamples, TY)); hipLaunchKernelGGL(( zeroRealKernel), dim3(dim3(calcBlockSize(NSAMPLES_PER_KERNEL_LAUNCH, 1024))), dim3(dim3(1024)), 0, 0, NSAMPLES_PER_KERNEL_LAUNCH, dev_dots); hipLaunchKernelGGL(( dotsKernel), dim3(blockDims3), dim3(threadDims3), 0, 0, layer1_size, nsamples, dev_syn0, dev_syn1neg, dev_dots, dev_dots_scratch, dev_focuses, dev_ctxes); if (0) { real dots[nsamples]; hipMemcpy(dots, dev_dots, nsamples * sizeof(real), hipMemcpyDeviceToHost); printf("DOTS: "); for(int i = 0; i < nsamples; ++i) printf("%4.2f ", dots[i]); getchar(); } // printf("launching graDHS kernel...\n"); hipLaunchKernelGGL(( HSGradSyn0), dim3(blockDims3), dim3(threadDims3), 0, 0, layer1_size, nsamples, dev_dots, dev_codes, dev_gsyn0, dev_syn1neg, alpha, dev_focuses, dev_ctxes); hipLaunchKernelGGL(( HSGradSyn1Neg), dim3(blockDims3), dim3(threadDims3), 0, 0, layer1_size, nsamples, dev_dots, dev_codes, dev_syn0, dev_syn1neg, alpha, dev_focuses, dev_ctxes); if (0) { real gsyn0[vocab_size * layer1_size]; hipMemcpy(gsyn0, dev_gsyn0, vocab_size * layer1_size * sizeof(real), hipMemcpyDeviceToHost); printf("gsyn0: "); for(int i = 0; i < nsamples; ++i) { for(int j = 0; j < layer1_size; ++j) { printf("%4.2f ", gsyn0[focuses[i] * layer1_size + j]); } printf("\n"); } getchar(); } if (0) { real dbg_syn0[vocab_size * layer1_size]; hipMemcpy(dbg_syn0, dev_syn0, vocab_size * layer1_size * sizeof(real), hipMemcpyDeviceToHost); printf("(BEFORE)dbg_syn0: "); for(int i = 0; i < nsamples; ++i) { for(int j = 0; j < layer1_size; ++j) { printf("%4.2f ", dbg_syn0[focuses[i] * layer1_size + j]); } printf("\n"); } getchar(); } { assert(num_uniq_focuses < NSAMPLES_PER_KERNEL_LAUNCH); dim3 threadDims2(TX, TY); dim3 blockDims2(calcBlockSize(layer1_size, TX), calcBlockSize(num_uniq_focuses, TY)); if (0) { std::cout << "blockDims2: (" << blockDims2.x << ", " << blockDims2.y << ", " << blockDims2.z << ")\n"; std::cout << "threadDims2: (" << threadDims2.x << ", " << threadDims2.y << ", " << threadDims2.z << ")\n"; } // printf("launching backprophs kernel...\n"); hipLaunchKernelGGL(( backpropGradIndirect), dim3(blockDims2), dim3(threadDims2), 0, 0, layer1_size, num_uniq_focuses, dev_syn0, dev_gsyn0, dev_uniq_focuses); // printf("ran backprophs kernel...\n"); if (0) { real dbg_syn0[vocab_size * layer1_size]; printf("(AFTER)dbg_syn0: "); hipMemcpy(dbg_syn0, dev_syn0, vocab_size * layer1_size * sizeof(real), hipMemcpyDeviceToHost); for(int i = 0; i < nsamples; ++i) { for(int j = 0; j < layer1_size; ++j) { printf("%4.2f ", dbg_syn0[focuses[i] * layer1_size + j]); } printf("\n"); } getchar(); } } // { // assert(num_uniq_ctxes <= NSAMPLES_PER_KERNEL_LAUNCH); // dim3 threadDims2(TX, TY); // dim3 blockDims2(calcBlockSize(layer1_size, TX), calcBlockSize(num_uniq_ctxes, TY)); // hipLaunchKernelGGL(( backpropGradIndirect), dim3(blockDims2), dim3(threadDims2), 0, 0, layer1_size, num_uniq_ctxes, // dev_syn1neg, dev_gsyn1neg, dev_uniq_ctxes); // } } __global__ void NegSamplingGradSyn0(const int size, const int nsamples, const real *__restrict__ dots, const int *__restrict__ labels, real *gsyn0, const real * __restrict__ syn1neg, const real alpha, const unsigned long long *__restrict__ focuses, const unsigned long long *__restrict__ ctxes) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) { return; } // error const real err = labels[y] - sigmoidGPU(dots[y]); const real g = err * alpha * gradSigmoidGPU(dots[y]); // all threads that write into the same array index atomicAdd(&gsyn0[focuses[y] * size + x], g*syn1neg[ctxes[y]*size + x]); // atomicAdd(&syn1neg[ctxes[y] * size + x], g*syn0[focuses[y] * size + x]); } __global__ void NegSamplingGradSyn1Neg(const int size, const int nsamples, const real * __restrict__ dots, const int * __restrict__ labels, const real * __restrict__ syn0, __restrict__ real *syn1neg, const real alpha, const unsigned long long *__restrict__ focuses, const unsigned long long *__restrict__ ctxes) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) { return; } // error const real err = labels[y] - sigmoidGPU(dots[y]); const real g = err * alpha * gradSigmoidGPU(dots[y]); // all threads that write into the same array index // atomicAdd(&gsyn0[focuses[y] * size + x], g*syn1neg[ctxes[y]*size + x]); atomicAdd(&syn1neg[ctxes[y] * size + x], g*syn0[focuses[y] * size + x]); } void runNegSamplingKernel(int nsamples, int *labels, unsigned long long *focuses, unsigned long long *ctxes, int num_uniq_focuses, int num_uniq_ctxes, unsigned long long *uniq_focuses, unsigned long long *uniq_ctxes) { hipMemcpy(dev_focuses, focuses, nsamples * sizeof(unsigned long long), hipMemcpyHostToDevice); hipMemcpy(dev_ctxes, ctxes, nsamples * sizeof(unsigned long long), hipMemcpyHostToDevice); hipMemcpy(dev_labels, labels, nsamples * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_uniq_focuses, uniq_focuses, num_uniq_focuses * sizeof(unsigned long long), hipMemcpyHostToDevice); hipMemcpy(dev_uniq_ctxes, uniq_ctxes, num_uniq_ctxes * sizeof(unsigned long long), hipMemcpyHostToDevice); dim3 threadDims3(TX, TY); dim3 blockDims3(calcBlockSize(layer1_size, TX), calcBlockSize(nsamples, TY)); hipLaunchKernelGGL(( zeroRealKernel), dim3(dim3(calcBlockSize(nsamples, 1024))), dim3(dim3(1024)), 0, 0, nsamples, dev_dots); hipLaunchKernelGGL(( dotsKernel), dim3(blockDims3), dim3(threadDims3), 0, 0, layer1_size, nsamples, dev_syn0, dev_syn1neg, dev_dots, dev_dots_scratch, dev_focuses, dev_ctxes); hipLaunchKernelGGL(( NegSamplingGradSyn0), dim3(blockDims3), dim3(threadDims3), 0, 0, layer1_size, nsamples, dev_dots, dev_labels, dev_gsyn0, dev_syn1neg, alpha, dev_focuses, dev_ctxes); hipLaunchKernelGGL(( NegSamplingGradSyn1Neg), dim3(blockDims3), dim3(threadDims3), 0, 0, layer1_size, nsamples, dev_dots, dev_labels, dev_syn0, dev_syn1neg, alpha, dev_focuses, dev_ctxes); { assert(num_uniq_focuses < NSAMPLES_PER_KERNEL_LAUNCH); dim3 threadDims2(TX, TY); dim3 blockDims2(calcBlockSize(layer1_size, TX), calcBlockSize(num_uniq_focuses, TY)); hipLaunchKernelGGL(( backpropGradIndirect), dim3(blockDims2), dim3(threadDims2), 0, 0, layer1_size, num_uniq_focuses, dev_syn0, dev_gsyn0, dev_uniq_focuses); } /* // normalize syn0 { assert(num_uniq_focuses <= nsamples); dim3 threadDims2(TX, TY); dim3 blockDims2(calcBlockSize(layer1_size, TX), calcBlockSize(num_uniq_focuses, TY)); // lensq of syn0 zeroRealKernel<<<dim3(calcBlockSize(num_uniq_focuses, 1024)), dim3(1024)>>>( num_uniq_focuses, dev_dots); lensqKernel<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_focuses, dev_syn0, dev_dots, dev_dots_scratch, dev_uniq_focuses); normalizeVecKernel<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_focuses, dev_syn0, dev_dots, dev_uniq_focuses); if (1) { real dots[num_uniq_focuses]; hipMemcpy(dots, dev_dots, num_uniq_focuses * sizeof(real), hipMemcpyDeviceToHost); for(int i = 0; i < num_uniq_focuses; ++i) { real dbg_syn0[layer1_size]; printf("i: %d | nsamples: %lld | uniq_focuses[i]: %lld | vocab_size: %lld\n", i, nsamples, uniq_focuses[i], vocab_size); fflush(stdout); hipMemcpy(&dbg_syn0, dev_syn0 + uniq_focuses[i] * layer1_size, layer1_size * sizeof(real), hipMemcpyDeviceToHost); float curlen = 0; printf("syn0[%d]: ", i); for(int j = 0; j < layer1_size; ++j) { curlen += dbg_syn0[j] * dbg_syn0[j]; printf("%4.2f ", dbg_syn0[j]); } printf(" | curlensq: %4.4f", curlen); printf(" | dots[i]: %4.4f ", dots[i]); printf("\n"); } } // normalize syn0 using lensq // normalizeVecKernel<<<blockDims3, threadDims3>>>(layer1_size, nsamples, // dev_syn0, dev_dots); } { zeroRealKernel<<<dim3(calcBlockSize(num_uniq_focuses, 1024)), dim3(1024)>>>( num_uniq_focuses, dev_dots); lensqKernel<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_ctxes, dev_syn1neg, dev_dots, dev_dots_scratch, dev_uniq_ctxes); normalizeVecKernel<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_ctxes, dev_syn0, dev_dots, dev_uniq_ctxes); } */ } void TrainModelThread(void *id) { long long a, b, d, word, last_word, sentence_length = 0, sentence_position = 0; long long word_count = 0, last_word_count = 0, sen[MAX_SENTENCE_LENGTH + 1]; // long long l1, l2; long long c, target, label, local_iter = iter; unsigned long long next_random = (long long)id; char eof = 0; clock_t now; real total_loss = 0; // real *neu1 = (real *)calloc(layer1_size, sizeof(real)); // real *neu1e = (real *)calloc(layer1_size, sizeof(real)); // buffer to store gradient of syn0 in one round // real *gsyn0 = (real *)calloc(layer1_size, sizeof(real)); // buffer to accumulate gradient of syn0 real *gsyn0_accum = (real *)calloc(layer1_size, sizeof(real)); // buffer to store gradient of syn1neg real *gsyn1neg = (real *)calloc(layer1_size, sizeof(real)); // buffer to store gradient of syn0 in a train step. real *gsyn0 = (real *)calloc(layer1_size, sizeof(real)); Vec neu1e; neu1e.alloczero(layer1_size); int ix = 0; int labels[NSAMPLES_PER_KERNEL_LAUNCH]; char codes[NSAMPLES_PER_KERNEL_LAUNCH]; unsigned long long ctxes[NSAMPLES_PER_KERNEL_LAUNCH], focuses[NSAMPLES_PER_KERNEL_LAUNCH], uniq_focuses[NSAMPLES_PER_KERNEL_LAUNCH], uniq_ctxes[NSAMPLES_PER_KERNEL_LAUNCH]; int n_uniq_focuses = 0; int n_uniq_ctxes = 0; bool focus_seen[vocab_size], ctx_seen[vocab_size]; for(int i = 0; i < vocab_size; ++i) focus_seen[i] = false; for(int i = 0; i < vocab_size; ++i) ctx_seen[i] = false; FILE *fi = fopen(train_file, "rb"); fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET); while (1) { if (word_count - last_word_count > 1) { word_count_actual += word_count - last_word_count; last_word_count = word_count; if ((debug_mode > 1)) { now = clock(); printf( "%cAlpha: %f Progress: %.2f%% Words/thread/sec: " "%.2fk Total loss: %4.2f", 13, alpha, word_count_actual / (real)(iter * train_words + 1) * 100, word_count_actual / ((real)(now - start + 1) / (real)CLOCKS_PER_SEC * 1000), total_loss); fflush(stdout); total_loss = 0; } // alpha = starting_alpha * // (1 - word_count_actual / (real)(iter * train_words + 1)); // if (alpha < starting_alpha * 0.0001) // alpha = starting_alpha * 0.0001; } if (sentence_length == 0) { while (1) { word = ReadWordIndex(fi, &eof); if (eof) break; if (word == -1) continue; word_count++; if (word == 0) break; // The subsampling randomly discards frequent words while // keeping the ranking same if (sample > 0) { real ran = (sqrt(vocab[word].cn / (sample * train_words)) + 1) * (sample * train_words) / vocab[word].cn; next_random = next_random * (unsigned long long)25214903917 + 11; if (ran < (next_random & 0xFFFF) / (real)65536) continue; } sen[sentence_length] = word; sentence_length++; if (sentence_length >= MAX_SENTENCE_LENGTH) break; } sentence_position = 0; } if (eof || (word_count > train_words / num_threads)) { eof = 0; word_count_actual += word_count - last_word_count; local_iter--; if (local_iter == 0) break; word_count = 0; last_word_count = 0; sentence_length = 0; fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET); // alpha = starting_alpha * // (1 - word_count_actual / (real)(iter * train_words + 1)); // if (alpha < starting_alpha * 0.0001) // alpha = starting_alpha * 0.0001; continue; } word = sen[sentence_position]; if (word == -1) continue; // for (c = 0; c < layer1_size; c++) neu1[c] = 0; neu1e.fillzero(); // for (c = 0; c < layer1_size; c++) neu1e[c] = 0; next_random = next_random * (unsigned long long)25214903917 + 11; b = next_random % window; /* if (cbow) { // train the cbow architecture // in -> hidden cw = 0; for (a = b; a < window * 2 + 1 - b; a++) if (a != window) { c = sentence_position - window + a; if (c < 0) continue; if (c >= sentence_length) continue; last_word = sen[c]; if (last_word == -1) continue; neu1e.accumadd(syn0.ix(last_word)); // for (c = 0; c < layer1_size; c++) // neu1[c] += syn0[c + last_word * layer1_size]; cw++; } if (cw) { for (c = 0; c < layer1_size; c++) neu1[c] /= cw; if (hs) for (d = 0; d < vocab[word].codelen; d++) { f = 0; l2 = vocab[word].point[d] * layer1_size; // Propagate hidden -> output f += neu1.dot(syn1); // for (c = 0; c < layer1_size; c++) // f += neu1[c] * syn1[c + l2]; if (f <= -MAX_EXP) continue; else if (f >= MAX_EXP) continue; else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]; // 'g' is the gradient multiplied by the learning rate g = (1 - vocab[word].code[d] - f) * alpha; // Propagate errors output -> hidden for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2]; // Learn weights hidden -> output for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c]; } // NEGATIVE SAMPLING if (negative > 0) for (d = 0; d < negative + 1; d++) { if (d == 0) { target = word; label = 1; } else { next_random = next_random * (unsigned long long)25214903917 + 11; target = table[(next_random >> 16) % table_size]; if (target == 0) target = next_random % (vocab_size - 1) + 1; if (target == word) continue; label = 0; } l2 = target * layer1_size; f = 0; for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2]; if (f > MAX_EXP) g = (label - 1) * alpha; else if (f < -MAX_EXP) g = (label - 0) * alpha; else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha; for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2]; for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c]; } // hidden -> in for (a = b; a < window * 2 + 1 - b; a++) if (a != window) { c = sentence_position - window + a; if (c < 0) continue; if (c >= sentence_length) continue; last_word = sen[c]; if (last_word == -1) continue; for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c]; } } } else { // train skip-gram */ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) { c = sentence_position - window + a; if (c < 0) continue; if (c >= sentence_length) continue; last_word = sen[c]; if (last_word == -1) continue; // l1 = last_word * layer1_size; // Vec *syn0v = &syn0[last_word]; // neu1e.fillzero(); // for (c = 0; c < layer1_size; c++) neu1e[c] = 0; // HIERARCHICAL SOFTMAX if (hs) { for (d = 0; d < vocab[word].codelen; d++) { // float f = 0; // l2 = vocab[word].point[d] * layer1_size; const unsigned long long target = vocab[word].point[d]; focuses[ix] = last_word; ctxes[ix] = target; codes[ix] = vocab[word].code[d]; if (!focus_seen[last_word]) { uniq_focuses[n_uniq_focuses] = last_word; n_uniq_focuses++; focus_seen[last_word] = true; } if (!ctx_seen[target]) { uniq_ctxes[n_uniq_ctxes] = target; n_uniq_ctxes++; ctx_seen[target] = true; } if (ix == NSAMPLES_PER_KERNEL_LAUNCH - 1) { runHSKernel(NSAMPLES_PER_KERNEL_LAUNCH, focuses, ctxes, codes, n_uniq_focuses, uniq_focuses, n_uniq_ctxes, uniq_ctxes); ix = 0; for(int i = 0; i < n_uniq_ctxes; ++i) { ctx_seen[uniq_ctxes[i]] = false; } for(int i = 0; i < n_uniq_focuses; ++i) { focus_seen[uniq_focuses[i]] = false; } n_uniq_ctxes = 0; n_uniq_focuses = 0; } else { ix++; } /* // Propagate hidden -> output float f = 0; for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1[c + l2]; if (f <= -MAX_EXP) continue; else if (f >= MAX_EXP) continue; else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]; // 'g' is the gradient multiplied by the learning // rate float g = (1 - vocab[word].code[d] - f) * alpha; // Propagate errors output -> hidden for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2]; // Learn weights hidden -> output for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * syn0[c + l1]; */ } } // NEGATIVE SAMPLING if (!hs && negative > 0) { for (d = 0; d < negative + 1; d++) { if (d == 0) { target = word; label = 1; } else { next_random = next_random * (unsigned long long)25214903917 + 11; target = table[(next_random >> 16) % table_size]; if (target == 0) target = next_random % (vocab_size - 1) + 1; if (target == word) continue; label = 0; } labels[ix] = label; focuses[ix] = last_word; ctxes[ix] = target; if (!focus_seen[last_word]) { uniq_focuses[n_uniq_focuses] = last_word; n_uniq_focuses++; focus_seen[last_word] = true; } if (!ctx_seen[target]) { uniq_ctxes[n_uniq_ctxes] = target; n_uniq_ctxes++; ctx_seen[target] = true; } if (ix == NSAMPLES_PER_KERNEL_LAUNCH - 1) { runNegSamplingKernel(NSAMPLES_PER_KERNEL_LAUNCH, labels, focuses, ctxes, n_uniq_focuses, n_uniq_ctxes, uniq_focuses, uniq_ctxes); ix = 0; n_uniq_ctxes = 0; n_uniq_focuses = 0; for(int i = 0; i < vocab_size; ++i) { ctx_seen[i] = false; } for(int i = 0; i < vocab_size; ++i) { focus_seen[i] = false; } } else { ix++; } } // end for loop for negative sampling } // end condition around negative samples // Learn weights input -> hidden // for (c = 0; c < layer1_size; c++) syn0[c + l1] += neu1e[c]; } // end a != window sentence_position++; if (sentence_position >= sentence_length) { sentence_length = 0; continue; } } assert(ix < NSAMPLES_PER_KERNEL_LAUNCH); // consume leftover data. if (hs > 0) { runHSKernel(ix, focuses, ctxes, codes, n_uniq_focuses, uniq_focuses, n_uniq_ctxes, uniq_ctxes); } else if (negative > 0) { runNegSamplingKernel(ix, labels, focuses, ctxes, n_uniq_focuses, n_uniq_ctxes, uniq_focuses, uniq_ctxes); } fclose(fi); // free(neu1); neu1e.freemem(); // pthread_exit(NULL); } void TrainModel() { long a, b, c, d; FILE *fo; pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t)); printf("Starting training using file %s\n", train_file); starting_alpha = alpha; if (read_vocab_file[0] != 0) ReadVocab(); else LearnVocabFromTrainFile(); if (save_vocab_file[0] != 0) SaveVocab(); if (output_file[0] == 0) return; InitNet(); if (negative > 0) InitUnigramTable(); start = clock(); if (iter > 0) { for(int i = 0; i < num_threads; ++i) { TrainModelThread((void *)i); // alpha = starting_alpha * // (1 - word_count_actual / (real)(iter * train_words + 1)); // if (alpha < starting_alpha * 0.0001) // alpha = starting_alpha * 0.0001; } // for (a = 0; a < num_threads; a++) // pthread_create(&pt[a], NULL, TrainModelThread, (void *)a); // for (a = 0; a < num_threads; a++) pthread_join(pt[a], NULL); } fo = fopen(output_file, "wb"); if (classes == 0) { real *syn0_out = (real*) malloc(sizeof(real) * layer1_size * vocab_size); // [vocab_size * layer1_size]; hipMemcpy(syn0_out, dev_syn0, vocab_size * layer1_size * sizeof(real), hipMemcpyDeviceToHost); fprintf(fo, "%lld %lld\n", vocab_size, layer1_size); // printf("%lld %lld\n", vocab_size, layer1_size); for (a = 0; a < vocab_size; a++) { fprintf(fo, "%s ", vocab[a].word); // printf("\n%s ", vocab[a].word); if (binary) { for (b = 0; b < layer1_size; b++) { fwrite(syn0_out + a *layer1_size + b, sizeof(real), 1, fo); // printf("%f ", *(syn0_out + a *layer1_size + b)); } } else { for (b = 0; b < layer1_size; b++) { fprintf(fo, "%lf ", syn0[a].v[b]); } } fprintf(fo, "\n"); } } else { // Run K-means on the word vectors int clcn = classes, iter = 10, closeid; int *centcn = (int *)malloc(classes * sizeof(int)); int *cl = (int *)calloc(vocab_size, sizeof(int)); real closev, x; real *cent = (real *)calloc(classes * layer1_size, sizeof(real)); for (a = 0; a < vocab_size; a++) cl[a] = a % clcn; for (a = 0; a < iter; a++) { for (b = 0; b < clcn * layer1_size; b++) cent[b] = 0; for (b = 0; b < clcn; b++) centcn[b] = 1; for (c = 0; c < vocab_size; c++) { for (d = 0; d < layer1_size; d++) cent[layer1_size * cl[c] + d] += syn0[c].v[d]; centcn[cl[c]]++; } for (b = 0; b < clcn; b++) { closev = 0; for (c = 0; c < layer1_size; c++) { cent[layer1_size * b + c] /= centcn[b]; closev += cent[layer1_size * b + c] * cent[layer1_size * b + c]; } closev = sqrt(closev); for (c = 0; c < layer1_size; c++) cent[layer1_size * b + c] /= closev; } for (c = 0; c < vocab_size; c++) { closev = -10; closeid = 0; for (d = 0; d < clcn; d++) { x = 0; for (b = 0; b < layer1_size; b++) x += cent[layer1_size * d + b] * syn0[c].v[b]; if (x > closev) { closev = x; closeid = d; } } cl[c] = closeid; } } // Save the K-means classes for (a = 0; a < vocab_size; a++) fprintf(fo, "%s %d\n", vocab[a].word, cl[a]); free(centcn); free(cent); free(cl); } fclose(fo); } int ArgPos(char *str, int argc, char **argv) { int a; for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) { if (a == argc - 1) { printf("Argument missing for %s\n", str); exit(1); } return a; } return -1; } int mainw2v(int argc, char **argv) { int i; if (argc == 1) { printf("WORD VECTOR estimation toolkit v 0.1c\n\n"); printf("Options:\n"); printf("Parameters for training:\n"); printf("\t-train <file>\n"); printf("\t\tUse text data from <file> to train the model\n"); printf("\t-output <file>\n"); printf( "\t\tUse <file> to save the resulting word vectors / word " "clusters\n"); printf("\t-size <int>\n"); printf("\t\tSet size of word vectors; default is 100\n"); printf("\t-window <int>\n"); printf("\t\tSet max skip length between words; default is 5\n"); printf("\t-sample <float>\n"); printf( "\t\tSet threshold for occurrence of words. Those that appear " "with " "higher frequency in the training data\n"); printf( "\t\twill be randomly down-sampled; default is 1e-3, useful " "range " "is (0, 1e-5)\n"); printf("\t-hs <int>\n"); printf("\t\tUse Hierarchical Softmax; default is 0 (not used)\n"); printf("\t-negative <int>\n"); printf( "\t\tNumber of negative examples; default is 5, common values " "are " "3 - 10 (0 = not used)\n"); printf("\t-threads <int>\n"); printf("\t\tUse <int> threads (default 12)\n"); printf("\t-iter <int>\n"); printf("\t\tRun more training iterations (default 5)\n"); printf("\t-min-count <int>\n"); printf( "\t\tThis will discard words that appear less than <int> " "times; " "default is 5\n"); printf("\t-alpha <float>\n"); printf( "\t\tSet the starting learning rate; default is 0.025 for " "skip-gram and 0.05 for CBOW\n"); printf("\t-classes <int>\n"); printf( "\t\tOutput word classes rather than word vectors; default " "number " "of classes is 0 (vectors are written)\n"); printf("\t-debug <int>\n"); printf( "\t\tSet the debug mode (default = 2 = more info during " "training)\n"); printf("\t-binary <int>\n"); printf( "\t\tSave the resulting vectors in binary moded; default is 0 " "(off)\n"); printf("\t-save-vocab <file>\n"); printf("\t\tThe vocabulary will be saved to <file>\n"); printf("\t-read-vocab <file>\n"); printf( "\t\tThe vocabulary will be read from <file>, not constructed " "from " "the training data\n"); printf("\t-cbow <int>\n"); printf( "\t\tUse the continuous bag of words model; default is 1 (use " "0 " "for skip-gram model)\n"); printf("\nExamples:\n"); printf( "./word2vec -train data.txt -output vec.txt -size 200 -window " "5 " "-sample 1e-4 -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3\n\n"); return 0; } output_file[0] = 0; save_vocab_file[0] = 0; read_vocab_file[0] = 0; if ((i = ArgPos((char *)"-size", argc, argv)) > 0) layer1_size = atoi(argv[i + 1]); fprintf(stdout, "size: %lld\n", layer1_size); if ((i = ArgPos((char *)"-train", argc, argv)) > 0) strcpy(train_file, argv[i + 1]); if ((i = ArgPos((char *)"-save-vocab", argc, argv)) > 0) strcpy(save_vocab_file, argv[i + 1]); if ((i = ArgPos((char *)"-read-vocab", argc, argv)) > 0) strcpy(read_vocab_file, argv[i + 1]); if ((i = ArgPos((char *)"-debug", argc, argv)) > 0) debug_mode = atoi(argv[i + 1]); if ((i = ArgPos((char *)"-binary", argc, argv)) > 0) binary = atoi(argv[i + 1]); if ((i = ArgPos((char *)"-cbow", argc, argv)) > 0) cbow = atoi(argv[i + 1]); fprintf(stdout, "cbow: %d\n", cbow); if (cbow) alpha = 0.05; if ((i = ArgPos((char *)"-alpha", argc, argv)) > 0) alpha = atof(argv[i + 1]); fprintf(stdout, "alpha: %f\n", alpha); if ((i = ArgPos((char *)"-output", argc, argv)) > 0) strcpy(output_file, argv[i + 1]); if ((i = ArgPos((char *)"-window", argc, argv)) > 0) window = atoi(argv[i + 1]); fprintf(stdout, "window: %d\n", window); if ((i = ArgPos((char *)"-sample", argc, argv)) > 0) sample = atof(argv[i + 1]); if ((i = ArgPos((char *)"-hs", argc, argv)) > 0) hs = atoi(argv[i + 1]); fprintf(stdout, "hs: %d\n", hs); if ((i = ArgPos((char *)"-negative", argc, argv)) > 0) negative = atoi(argv[i + 1]); fprintf(stdout, "negative: %d\n", negative); if ((i = ArgPos((char *)"-threads", argc, argv)) > 0) num_threads = atoi(argv[i + 1]); if ((i = ArgPos((char *)"-iter", argc, argv)) > 0) iter = atoi(argv[i + 1]); if ((i = ArgPos((char *)"-min-count", argc, argv)) > 0) min_count = atoi(argv[i + 1]); if ((i = ArgPos((char *)"-classes", argc, argv)) > 0) classes = atoi(argv[i + 1]); vocab = (struct vocab_word *)calloc(vocab_max_size, sizeof(struct vocab_word)); vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int)); expTable = (real *)malloc((EXP_TABLE_SIZE + 1) * sizeof(real)); for (i = 0; i < EXP_TABLE_SIZE; i++) { expTable[i] = exp((i / (real)EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1) } TrainModel(); return 0; } int main(int argc, char *argv[]) { mainw2v(argc, argv); return 0; }
bb82e2173231c647f8016330918db9be5b4003cc.cu
// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <assert.h> #include <iostream> #include <math.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cooperative_groups.h> // https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define GPU_ERRCHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) { exit(code); }; } } typedef float real; // Precision of float numbers using namespace cooperative_groups; #define MAX_STRING 100 #define EXP_TABLE_SIZE 1000 #define MAX_EXP 6 // HACK: THIS IS 1000 #define MAX_SENTENCE_LENGTH 128 #define MAX_CODE_LENGTH 40 const int vocab_hash_size = 30000000; // Maximum 30 * 0.7 = 21M words in the vocabulary struct vocab_word { long long cn; int *point; char *word, *code, codelen; }; struct Vec { int len; real *v; public: inline void freemem() { free(v); } // return the allocation size needed for a vector of dimension len static long int alloc_size_for_dim(int d) { return d * sizeof(real); } inline void alloc(int len) { this->len = len; // int a = cudaMalloc((void **)&v, 128 * (long long)len * sizeof(real)); int a = posix_memalign((void **)&v, 128, (long long)len * sizeof(real)); assert(v != nullptr && "memory allocation failed"); (void)a; } inline void alloczero(int len) { this->len = len; this->v = (real *)calloc(len, sizeof(real)); } inline void fillzero() const { for (int i = 0; i < len; ++i) { v[i] = 0; } } }; char train_file[MAX_STRING], output_file[MAX_STRING]; char save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING]; struct vocab_word *vocab; int binary = 0, cbow = 1, debug_mode = 2, window = 5, min_count = 5, num_threads = 12, min_reduce = 1; int *vocab_hash; long long vocab_max_size = 1000, vocab_size = 0, layer1_size = 100; long long train_words = 0, word_count_actual = 0, iter = 5, file_size = 0, classes = 0; real alpha = 0.025, starting_alpha, sample = 1e-3; Vec *syn0, *syn1, *syn1neg; real *quadform; real *dev_syn0, *dev_syn1neg, *dev_quadform; real *dev_gsyn0, *dev_gsyn1neg; // mask for whether gradient has been updated bool *dev_mask_syn0; bool *dev_mask_syn1neg; real *dev_dots; real *dev_dots_scratch; const int NSAMPLES_PER_KERNEL_LAUNCH = 1e5; // const int NSAMPLES_PER_KERNEL_LAUNCH = 1e5; int *dev_labels; char *dev_codes; unsigned long long *dev_focuses, *dev_ctxes; unsigned long long *dev_uniq_focuses, *dev_uniq_ctxes; real *expTable; clock_t start; int hs = 0, negative = 5; const int table_size = 1e8; int *table; real *dev_total_loss; unsigned long long calcBlockSize(unsigned long long total, unsigned long long thread) { if (total / thread == 0) return 1; return (total / thread) + ((total % thread) != 0); } __global__ void zeroRealKernel(const unsigned long long size, real *r) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= size) return; r[x] = 0; } void InitUnigramTable() { int a, i; double train_words_pow = 0; double d1, power = 0.75; table = (int *)malloc(table_size * sizeof(int)); for (a = 0; a < vocab_size; a++) train_words_pow += pow(vocab[a].cn, power); i = 0; d1 = pow(vocab[i].cn, power) / train_words_pow; for (a = 0; a < table_size; a++) { table[a] = i; if (a / (double)table_size > d1) { i++; d1 += pow(vocab[i].cn, power) / train_words_pow; } if (i >= vocab_size) i = vocab_size - 1; } } // Reads a single word from a file, assuming space + tab + EOL to be word // boundaries void ReadWord(char *word, FILE *fin, char *eof) { int a = 0, ch; while (1) { ch = fgetc_unlocked(fin); if (ch == EOF) { *eof = 1; break; } if (ch == 13) continue; if ((ch == ' ') || (ch == '\t') || (ch == '\n')) { if (a > 0) { if (ch == '\n') ungetc(ch, fin); break; } if (ch == '\n') { strcpy(word, (char *)"</s>"); return; } else continue; } word[a] = ch; a++; if (a >= MAX_STRING - 1) a--; // Truncate too long words } word[a] = 0; } // Returns hash value of a word int GetWordHash(char *word) { unsigned long long a, hash = 0; for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a]; hash = hash % vocab_hash_size; return hash; } // Returns position of a word in the vocabulary; if the word is not found, // returns -1 int SearchVocab(char *word) { unsigned int hash = GetWordHash(word); while (1) { if (vocab_hash[hash] == -1) return -1; if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash]; hash = (hash + 1) % vocab_hash_size; } assert(false && "unreachable"); // return -1; } // Reads a word and returns its index in the vocabulary int ReadWordIndex(FILE *fin, char *eof) { char word[MAX_STRING], eof_l = 0; ReadWord(word, fin, &eof_l); if (eof_l) { *eof = 1; return -1; } return SearchVocab(word); } // Adds a word to the vocabulary int AddWordToVocab(char *word) { unsigned int hash, length = strlen(word) + 1; if (length > MAX_STRING) length = MAX_STRING; vocab[vocab_size].word = (char *)calloc(length, sizeof(char)); strcpy(vocab[vocab_size].word, word); vocab[vocab_size].cn = 0; vocab_size++; // Reallocate memory if needed if (vocab_size + 2 >= vocab_max_size) { vocab_max_size += 1000; vocab = (struct vocab_word *)realloc( vocab, vocab_max_size * sizeof(struct vocab_word)); } hash = GetWordHash(word); while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size; vocab_hash[hash] = vocab_size - 1; return vocab_size - 1; } // Used later for sorting by word counts int VocabCompare(const void *a, const void *b) { long long l = ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn; if (l > 0) return 1; if (l < 0) return -1; return 0; } // Sorts the vocabulary by frequency using word counts void SortVocab() { int a, size; unsigned int hash; // Sort the vocabulary and keep </s> at the first position qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare); for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; size = vocab_size; train_words = 0; for (a = 0; a < size; a++) { // Words occuring less than min_count times will be discarded from // the vocab if ((vocab[a].cn < min_count) && (a != 0)) { vocab_size--; free(vocab[a].word); } else { // Hash will be re-computed, as after the sorting it is not // actual hash = GetWordHash(vocab[a].word); while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size; vocab_hash[hash] = a; train_words += vocab[a].cn; } } vocab = (struct vocab_word *)realloc( vocab, (vocab_size + 1) * sizeof(struct vocab_word)); // Allocate memory for the binary tree construction for (a = 0; a < vocab_size; a++) { vocab[a].code = (char *)calloc(MAX_CODE_LENGTH, sizeof(char)); vocab[a].point = (int *)calloc(MAX_CODE_LENGTH, sizeof(int)); } } // Reduces the vocabulary by removing infrequent tokens void ReduceVocab() { int a, b = 0; unsigned int hash; for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) { vocab[b].cn = vocab[a].cn; vocab[b].word = vocab[a].word; b++; } else free(vocab[a].word); vocab_size = b; for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; for (a = 0; a < vocab_size; a++) { // Hash will be re-computed, as it is not actual hash = GetWordHash(vocab[a].word); while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size; vocab_hash[hash] = a; } fflush(stdout); min_reduce++; } // Create binary Huffman tree using the word counts // Frequent words will have short uniqe binary codes void CreateBinaryTree() { long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH]; char code[MAX_CODE_LENGTH]; long long *count = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long)); long long *binary = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long)); long long *parent_node = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long)); for (a = 0; a < vocab_size; a++) count[a] = vocab[a].cn; for (a = vocab_size; a < vocab_size * 2; a++) count[a] = 1e15; pos1 = vocab_size - 1; pos2 = vocab_size; // Following algorithm constructs the Huffman tree by adding one node at // a time for (a = 0; a < vocab_size - 1; a++) { // First, find two smallest nodes 'min1, min2' if (pos1 >= 0) { if (count[pos1] < count[pos2]) { min1i = pos1; pos1--; } else { min1i = pos2; pos2++; } } else { min1i = pos2; pos2++; } if (pos1 >= 0) { if (count[pos1] < count[pos2]) { min2i = pos1; pos1--; } else { min2i = pos2; pos2++; } } else { min2i = pos2; pos2++; } count[vocab_size + a] = count[min1i] + count[min2i]; parent_node[min1i] = vocab_size + a; parent_node[min2i] = vocab_size + a; binary[min2i] = 1; } // Now assign binary code to each vocabulary word for (a = 0; a < vocab_size; a++) { b = a; i = 0; while (1) { code[i] = binary[b]; point[i] = b; i++; b = parent_node[b]; if (b == vocab_size * 2 - 2) break; } vocab[a].codelen = i; vocab[a].point[0] = vocab_size - 2; for (b = 0; b < i; b++) { vocab[a].code[i - b - 1] = code[b]; vocab[a].point[i - b] = point[b] - vocab_size; } } free(count); free(binary); free(parent_node); } void LearnVocabFromTrainFile() { char word[MAX_STRING], eof = 0; FILE *fin; long long a, i, wc = 0; for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; fin = fopen(train_file, "rb"); if (fin == NULL) { printf("ERROR: training data file not found!\n"); exit(1); } vocab_size = 0; AddWordToVocab((char *)"</s>"); while (1) { ReadWord(word, fin, &eof); if (eof) break; train_words++; wc++; if ((debug_mode > 1) && (wc >= 1000000)) { printf("%lldM%c", train_words / 1000000, 13); fflush(stdout); wc = 0; } i = SearchVocab(word); if (i == -1) { a = AddWordToVocab(word); vocab[a].cn = 1; } else vocab[i].cn++; if (vocab_size > vocab_hash_size * 0.7) ReduceVocab(); } SortVocab(); if (debug_mode > 0) { printf("Vocab size: %lld\n", vocab_size); printf("Words in train file: %lld\n", train_words); } file_size = ftell(fin); fclose(fin); } void SaveVocab() { long long i; FILE *fo = fopen(save_vocab_file, "wb"); for (i = 0; i < vocab_size; i++) fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn); fclose(fo); } void ReadVocab() { long long a, i = 0; char c, eof = 0; char word[MAX_STRING]; FILE *fin = fopen(read_vocab_file, "rb"); if (fin == NULL) { printf("Vocabulary file not found\n"); exit(1); } for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; vocab_size = 0; while (1) { ReadWord(word, fin, &eof); if (eof) break; a = AddWordToVocab(word); fscanf(fin, "%lld%c", &vocab[a].cn, &c); i++; } SortVocab(); if (debug_mode > 0) { printf("Vocab size: %lld\n", vocab_size); printf("Words in train file: %lld\n", train_words); } fin = fopen(train_file, "rb"); if (fin == NULL) { printf("ERROR: training data file not found!\n"); exit(1); } fseek(fin, 0, SEEK_END); file_size = ftell(fin); fclose(fin); } void InitNet() { long long a, b; unsigned long long next_random = 1; a = posix_memalign((void **)&syn0, 128, (long long)vocab_size * sizeof(Vec)); if (syn0 == NULL) { printf("%d: Memory allocation failed\n", __LINE__); exit(1); } GPU_ERRCHECK(cudaMalloc((void **)&dev_syn0, (long long) vocab_size * layer1_size * sizeof(real))); GPU_ERRCHECK(cudaMalloc((void **)&dev_gsyn0, (long long) vocab_size * layer1_size * sizeof(real))); zeroRealKernel<<<dim3(calcBlockSize(vocab_size * layer1_size, 512)), dim3(512)>>>((long long)vocab_size * layer1_size, dev_gsyn0); printf("allocating syn0..."); for (a = 0; a < vocab_size; ++a) { new (Vec)(syn0[a]); syn0[a].alloc(layer1_size); for (b = 0; b < layer1_size; b++) { next_random = next_random * (unsigned long long)25214903917 + 11; syn0[a].v[b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (layer1_size); } // copy vector to host cudaMemcpy(dev_syn0 + layer1_size * a, syn0[a].v, layer1_size * sizeof(real), cudaMemcpyHostToDevice); } printf("%callocated syn0.\t\t\t\t\n", 13); // if (hs) { // a = posix_memalign((void **)&syn1, 128, // (long long)vocab_size * layer1_size * // sizeof(real)); // if (syn1 == NULL) { // printf("Memory allocation failed\n"); // exit(1); // } // for (a = 0; a < vocab_size; a++) // for (b = 0; b < layer1_size; b++) syn1[a * layer1_size + b] = 0; // } printf("allocating syn1neg..."); GPU_ERRCHECK(cudaMalloc((void **)&dev_syn1neg, (long long) vocab_size * layer1_size * sizeof(real))); if (negative > 0) { a = posix_memalign((void **)&syn1neg, 128, (long long)vocab_size * sizeof(Vec)); if (syn1neg == NULL) { printf("%d: Memory allocation failed\n", __LINE__); exit(1); } printf("%d: Randomly initializing syn1neg...", __LINE__); for (a = 0; a < vocab_size; a++) { new (Vec)(syn1neg[a]); syn1neg[a].alloc(layer1_size); // for (b = 0; b < layer1_size; b++) syn1neg[a].v[b] = 0; for (b = 0; b < layer1_size; b++) { next_random = next_random * (unsigned long long)25214903917 + 11; syn1neg[a].v[b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / (layer1_size); } // copy vector to device cudaMemcpy(dev_syn1neg + layer1_size * a, syn1neg[a].v, layer1_size * sizeof(real), cudaMemcpyHostToDevice); } } GPU_ERRCHECK(cudaMalloc((void **)&dev_gsyn1neg, (long long) vocab_size * layer1_size * sizeof(real))); zeroRealKernel<<<dim3(calcBlockSize((long long)vocab_size * layer1_size, 512)), dim3(512)>>>((long long)vocab_size * layer1_size, dev_gsyn1neg); printf("%callocated syn1neg.\t\t\t\t\n", 13); a = posix_memalign((void **)&quadform, 128, (long long)layer1_size * layer1_size * sizeof(real)); if (quadform == NULL) { printf("%d: Memory allocation failed\n", __LINE__); exit(1); } for(int i = 0; i < layer1_size; ++i){ for(int j = 0; j < layer1_size; ++j) { // dot product quadform[i * layer1_size + j] = i == j ? 1 : 0; } } cudaMalloc((void **)&dev_quadform, (long long) layer1_size * layer1_size * sizeof(real)); cudaMemcpy(dev_quadform, quadform, layer1_size * layer1_size * sizeof(real), cudaMemcpyHostToDevice); cudaMalloc((void **)&dev_dots, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(real)); cudaMalloc((void **)&dev_labels, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(int)); cudaMalloc((void **)&dev_codes, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(char)); cudaMalloc((void **)&dev_focuses, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(unsigned long long)); cudaMalloc((void **)&dev_ctxes, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(unsigned long long)); cudaMalloc((void **)&dev_uniq_focuses, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(unsigned long long)); cudaMalloc((void **)&dev_uniq_ctxes, (long long) NSAMPLES_PER_KERNEL_LAUNCH * sizeof(unsigned long long)); cudaMalloc((void **)&dev_total_loss, (long long) sizeof(real)); cudaMalloc((void **)&dev_mask_syn0, (long long) vocab_size * sizeof(bool)); cudaMalloc((void **)&dev_mask_syn1neg, (long long) vocab_size * sizeof(bool)); cudaMalloc((void **)&dev_dots_scratch, (long long) NSAMPLES_PER_KERNEL_LAUNCH * layer1_size * sizeof(real)); CreateBinaryTree(); } inline real sigmoid(real x) { // we are trying to calculate sigmoid(127) if (x > 5) { return 1; } if (x < -5) { return 0; } real exp = powf(2, x); return exp / (1 + exp); } // location y contains lensq of ixs[y] __global__ void lensqKernel(const int size, const int nsamples, const real * __restrict__ vecs, // SIZE x NSAMPLES real * __restrict__ lensqs, // NSAMPLES real * __restrict__ dotsScratch, // dotScratch: NSAMPLES_PER_KERNEL_LAUNCH * SIZE const unsigned long long *ixs // NSAMPLES ) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) return; const real dot = vecs[ixs[y] * size + x] * vecs[ixs[y]* size + x]; atomicAdd(&lensqs[y], dot); // dotsScratch[y*size + x] = dot; // int partition = size / 2; // while (partition > 0) { // if (x < partition) { // __syncthreads(); // dotsScratch[y*size+x] += dotsScratch[y*size+partition+x]; // } // partition = partition / 2; // } // __syncthreads(); // if (x == 0) { // // atomicAdd(&lensqs[y], dotsScratch[y * size + 0]); // lensqs[y] = dotsScratch[y * size]; // } } __global__ void normalizeVecKernel(const int size, const int nsamples, real * __restrict__ vecs, // SIZE x NSAMPLES const real * __restrict__ lensqs, // NSAMPLES const unsigned long long *ixs //NSAMPLES ) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) return; vecs[ixs[y]*size+x] /= sqrt(lensqs[y]); } __global__ void dotsKernel(const int size, const int nsamples, const real * __restrict__ syn0, // LAYER1_SIZE * VOCAB_SIZE const real * __restrict__ syn1neg, // LAYER1_SIZE * VOCAB_SIZE __restrict__ real *dots, // dots: [y] NSAMPLES_PER_KERNEL_LAUNCH __restrict__ real *dotsScratch, // dotScratch: NSAMPLES_PER_KERNEL_LAUNCH * LAYER1_SIZE const unsigned long long *focuses, // NSAMPLER_PER_KERNEL_LAUNCH const unsigned long long *ctxes) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) return; // dot product of (aT Q b)_xy for sample z const real dot = syn0[focuses[y] * size + x] * syn1neg[ctxes[y] * size + x]; atomicAdd(&dots[y], dot); // dotsScratch[y*size + x] = dot; // unsigned long long curix = x; // int partition = size / 2; // while (partition > 0) { // if (curix < partition) { // __syncthreads(); // atomicAdd(&dotsScratch[y*size+curix], dotsScratch[y*size+partition+curix]); // } // partition = partition / 2; // } // __syncthreads(); // if (curix == 0) { // // atomicAdd(&dots[y], dotsScratch[y * size + 0]); // dots[y] = dotsScratch[y * size]; // } } __device__ real sigmoidGPU(real x) { // we are trying to calculate sigmoid(127) // if (x > 5) { return 1; } // if (x < -5) { return 0; } return tanh(x); // real e = powf(2, x); // return e / (1 + e); } __device__ real gradSigmoidGPU(real x) { return 1 - tanh(x) * tanh(x); // return sigmoidGPU(x) * (1 - sigmoidGPU(x)); } #define FULL_MASK 0xffffffff __global__ void HSGradSyn0(const int size, const int nsamples, const real *__restrict__ dots, const char *__restrict__ codes, real *gsyn0, const real * __restrict__ syn1neg, const real alpha, const unsigned long long *__restrict__ focuses, const unsigned long long *__restrict__ ctxes) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) { return; } if (dots[y] > 5 || dots[y] < -5) {return; } // error const real err = 1 - codes[y] - sigmoidGPU(dots[y]); const real g = err * alpha * gradSigmoidGPU(dots[y]); // all threads that write into the same array index atomicAdd(&gsyn0[focuses[y] * size + x], g*syn1neg[ctxes[y]*size + x]); // atomicAdd(&syn1neg[ctxes[y] * size + x], g*syn0[focuses[y] * size + x]); } __global__ void HSGradSyn1Neg(const int size, const int nsamples, const real * __restrict__ dots, const char * __restrict__ codes, const real * __restrict__ syn0, __restrict__ real *syn1neg, const real alpha, const unsigned long long *__restrict__ focuses, const unsigned long long *__restrict__ ctxes) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) { return; } if (dots[y] > 5 || dots[y] < -5) { return; } // error const real err = 1 - codes[y] - sigmoidGPU(dots[y]); const real g = err * alpha * gradSigmoidGPU(dots[y]); // all threads that write into the same array index // atomicAdd(&gsyn0[focuses[y] * size + x], g*syn1neg[ctxes[y]*size + x]); atomicAdd(&syn1neg[ctxes[y] * size + x], g*syn0[focuses[y] * size + x]); } // 2D kernel: layer1_size x nsamples __global__ void backpropGradIndirect(const int size, const int nseen, __restrict__ real *vec, __restrict__ real *grad, const unsigned long long *seen) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; // const int z = blockIdx.z * blockDim.z + threadIdx.z; if (x >= size || y >= nseen) { return; } // atomicAdd(&vec[seen[y]*size + x], grad[seen[y] * size + x]); vec[seen[y]*size + x] += grad[seen[y] * size + x]; grad[seen[y] * size + x] = 0; } const int TX = 128, TY = 8; void runHSKernel(int nsamples, unsigned long long *focuses, unsigned long long *ctxes, char *codes, const unsigned long long num_uniq_focuses, const unsigned long long *uniq_focuses, const unsigned long long num_uniq_ctxes, const unsigned long long *uniq_ctxes) { // printf("running HS kernel...\n"); // zeroRealKernel<<<dim3(calcBlockSize(vocab_size * layer1_size, 1024)), dim3(1024)>>>(vocab_size * layer1_size, dev_gsyn0); // zeroRealKernel<<<dim3(calcBlockSize(vocab_size * layer1_size, 1024)), dim3(1024)>>>(vocab_size * layer1_size, dev_gsyn1neg); cudaMemcpy(dev_focuses, focuses, nsamples * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy(dev_ctxes, ctxes, nsamples * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy(dev_codes, codes, nsamples * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(dev_uniq_focuses, uniq_focuses, num_uniq_focuses * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy(dev_uniq_ctxes, uniq_ctxes, num_uniq_ctxes * sizeof(unsigned long long), cudaMemcpyHostToDevice); dim3 threadDims3(TX, TY); dim3 blockDims3(calcBlockSize(layer1_size, TX), calcBlockSize(nsamples, TY)); zeroRealKernel<<<dim3(calcBlockSize(NSAMPLES_PER_KERNEL_LAUNCH, 1024)), dim3(1024)>>>(NSAMPLES_PER_KERNEL_LAUNCH, dev_dots); dotsKernel<<<blockDims3, threadDims3>>>(layer1_size, nsamples, dev_syn0, dev_syn1neg, dev_dots, dev_dots_scratch, dev_focuses, dev_ctxes); if (0) { real dots[nsamples]; cudaMemcpy(dots, dev_dots, nsamples * sizeof(real), cudaMemcpyDeviceToHost); printf("DOTS: "); for(int i = 0; i < nsamples; ++i) printf("%4.2f ", dots[i]); getchar(); } // printf("launching graDHS kernel...\n"); HSGradSyn0<<<blockDims3, threadDims3>>>(layer1_size, nsamples, dev_dots, dev_codes, dev_gsyn0, dev_syn1neg, alpha, dev_focuses, dev_ctxes); HSGradSyn1Neg<<<blockDims3, threadDims3>>>(layer1_size, nsamples, dev_dots, dev_codes, dev_syn0, dev_syn1neg, alpha, dev_focuses, dev_ctxes); if (0) { real gsyn0[vocab_size * layer1_size]; cudaMemcpy(gsyn0, dev_gsyn0, vocab_size * layer1_size * sizeof(real), cudaMemcpyDeviceToHost); printf("gsyn0: "); for(int i = 0; i < nsamples; ++i) { for(int j = 0; j < layer1_size; ++j) { printf("%4.2f ", gsyn0[focuses[i] * layer1_size + j]); } printf("\n"); } getchar(); } if (0) { real dbg_syn0[vocab_size * layer1_size]; cudaMemcpy(dbg_syn0, dev_syn0, vocab_size * layer1_size * sizeof(real), cudaMemcpyDeviceToHost); printf("(BEFORE)dbg_syn0: "); for(int i = 0; i < nsamples; ++i) { for(int j = 0; j < layer1_size; ++j) { printf("%4.2f ", dbg_syn0[focuses[i] * layer1_size + j]); } printf("\n"); } getchar(); } { assert(num_uniq_focuses < NSAMPLES_PER_KERNEL_LAUNCH); dim3 threadDims2(TX, TY); dim3 blockDims2(calcBlockSize(layer1_size, TX), calcBlockSize(num_uniq_focuses, TY)); if (0) { std::cout << "blockDims2: (" << blockDims2.x << ", " << blockDims2.y << ", " << blockDims2.z << ")\n"; std::cout << "threadDims2: (" << threadDims2.x << ", " << threadDims2.y << ", " << threadDims2.z << ")\n"; } // printf("launching backprophs kernel...\n"); backpropGradIndirect<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_focuses, dev_syn0, dev_gsyn0, dev_uniq_focuses); // printf("ran backprophs kernel...\n"); if (0) { real dbg_syn0[vocab_size * layer1_size]; printf("(AFTER)dbg_syn0: "); cudaMemcpy(dbg_syn0, dev_syn0, vocab_size * layer1_size * sizeof(real), cudaMemcpyDeviceToHost); for(int i = 0; i < nsamples; ++i) { for(int j = 0; j < layer1_size; ++j) { printf("%4.2f ", dbg_syn0[focuses[i] * layer1_size + j]); } printf("\n"); } getchar(); } } // { // assert(num_uniq_ctxes <= NSAMPLES_PER_KERNEL_LAUNCH); // dim3 threadDims2(TX, TY); // dim3 blockDims2(calcBlockSize(layer1_size, TX), calcBlockSize(num_uniq_ctxes, TY)); // backpropGradIndirect<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_ctxes, // dev_syn1neg, dev_gsyn1neg, dev_uniq_ctxes); // } } __global__ void NegSamplingGradSyn0(const int size, const int nsamples, const real *__restrict__ dots, const int *__restrict__ labels, real *gsyn0, const real * __restrict__ syn1neg, const real alpha, const unsigned long long *__restrict__ focuses, const unsigned long long *__restrict__ ctxes) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) { return; } // error const real err = labels[y] - sigmoidGPU(dots[y]); const real g = err * alpha * gradSigmoidGPU(dots[y]); // all threads that write into the same array index atomicAdd(&gsyn0[focuses[y] * size + x], g*syn1neg[ctxes[y]*size + x]); // atomicAdd(&syn1neg[ctxes[y] * size + x], g*syn0[focuses[y] * size + x]); } __global__ void NegSamplingGradSyn1Neg(const int size, const int nsamples, const real * __restrict__ dots, const int * __restrict__ labels, const real * __restrict__ syn0, __restrict__ real *syn1neg, const real alpha, const unsigned long long *__restrict__ focuses, const unsigned long long *__restrict__ ctxes) { const unsigned long long x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long long y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size || y >= nsamples) { return; } // error const real err = labels[y] - sigmoidGPU(dots[y]); const real g = err * alpha * gradSigmoidGPU(dots[y]); // all threads that write into the same array index // atomicAdd(&gsyn0[focuses[y] * size + x], g*syn1neg[ctxes[y]*size + x]); atomicAdd(&syn1neg[ctxes[y] * size + x], g*syn0[focuses[y] * size + x]); } void runNegSamplingKernel(int nsamples, int *labels, unsigned long long *focuses, unsigned long long *ctxes, int num_uniq_focuses, int num_uniq_ctxes, unsigned long long *uniq_focuses, unsigned long long *uniq_ctxes) { cudaMemcpy(dev_focuses, focuses, nsamples * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy(dev_ctxes, ctxes, nsamples * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy(dev_labels, labels, nsamples * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_uniq_focuses, uniq_focuses, num_uniq_focuses * sizeof(unsigned long long), cudaMemcpyHostToDevice); cudaMemcpy(dev_uniq_ctxes, uniq_ctxes, num_uniq_ctxes * sizeof(unsigned long long), cudaMemcpyHostToDevice); dim3 threadDims3(TX, TY); dim3 blockDims3(calcBlockSize(layer1_size, TX), calcBlockSize(nsamples, TY)); zeroRealKernel<<<dim3(calcBlockSize(nsamples, 1024)), dim3(1024)>>>(nsamples, dev_dots); dotsKernel<<<blockDims3, threadDims3>>>(layer1_size, nsamples, dev_syn0, dev_syn1neg, dev_dots, dev_dots_scratch, dev_focuses, dev_ctxes); NegSamplingGradSyn0<<<blockDims3, threadDims3>>>(layer1_size, nsamples, dev_dots, dev_labels, dev_gsyn0, dev_syn1neg, alpha, dev_focuses, dev_ctxes); NegSamplingGradSyn1Neg<<<blockDims3, threadDims3>>>(layer1_size, nsamples, dev_dots, dev_labels, dev_syn0, dev_syn1neg, alpha, dev_focuses, dev_ctxes); { assert(num_uniq_focuses < NSAMPLES_PER_KERNEL_LAUNCH); dim3 threadDims2(TX, TY); dim3 blockDims2(calcBlockSize(layer1_size, TX), calcBlockSize(num_uniq_focuses, TY)); backpropGradIndirect<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_focuses, dev_syn0, dev_gsyn0, dev_uniq_focuses); } /* // normalize syn0 { assert(num_uniq_focuses <= nsamples); dim3 threadDims2(TX, TY); dim3 blockDims2(calcBlockSize(layer1_size, TX), calcBlockSize(num_uniq_focuses, TY)); // lensq of syn0 zeroRealKernel<<<dim3(calcBlockSize(num_uniq_focuses, 1024)), dim3(1024)>>>( num_uniq_focuses, dev_dots); lensqKernel<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_focuses, dev_syn0, dev_dots, dev_dots_scratch, dev_uniq_focuses); normalizeVecKernel<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_focuses, dev_syn0, dev_dots, dev_uniq_focuses); if (1) { real dots[num_uniq_focuses]; cudaMemcpy(dots, dev_dots, num_uniq_focuses * sizeof(real), cudaMemcpyDeviceToHost); for(int i = 0; i < num_uniq_focuses; ++i) { real dbg_syn0[layer1_size]; printf("i: %d | nsamples: %lld | uniq_focuses[i]: %lld | vocab_size: %lld\n", i, nsamples, uniq_focuses[i], vocab_size); fflush(stdout); cudaMemcpy(&dbg_syn0, dev_syn0 + uniq_focuses[i] * layer1_size, layer1_size * sizeof(real), cudaMemcpyDeviceToHost); float curlen = 0; printf("syn0[%d]: ", i); for(int j = 0; j < layer1_size; ++j) { curlen += dbg_syn0[j] * dbg_syn0[j]; printf("%4.2f ", dbg_syn0[j]); } printf(" | curlensq: %4.4f", curlen); printf(" | dots[i]: %4.4f ", dots[i]); printf("\n"); } } // normalize syn0 using lensq // normalizeVecKernel<<<blockDims3, threadDims3>>>(layer1_size, nsamples, // dev_syn0, dev_dots); } { zeroRealKernel<<<dim3(calcBlockSize(num_uniq_focuses, 1024)), dim3(1024)>>>( num_uniq_focuses, dev_dots); lensqKernel<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_ctxes, dev_syn1neg, dev_dots, dev_dots_scratch, dev_uniq_ctxes); normalizeVecKernel<<<blockDims2, threadDims2>>>(layer1_size, num_uniq_ctxes, dev_syn0, dev_dots, dev_uniq_ctxes); } */ } void TrainModelThread(void *id) { long long a, b, d, word, last_word, sentence_length = 0, sentence_position = 0; long long word_count = 0, last_word_count = 0, sen[MAX_SENTENCE_LENGTH + 1]; // long long l1, l2; long long c, target, label, local_iter = iter; unsigned long long next_random = (long long)id; char eof = 0; clock_t now; real total_loss = 0; // real *neu1 = (real *)calloc(layer1_size, sizeof(real)); // real *neu1e = (real *)calloc(layer1_size, sizeof(real)); // buffer to store gradient of syn0 in one round // real *gsyn0 = (real *)calloc(layer1_size, sizeof(real)); // buffer to accumulate gradient of syn0 real *gsyn0_accum = (real *)calloc(layer1_size, sizeof(real)); // buffer to store gradient of syn1neg real *gsyn1neg = (real *)calloc(layer1_size, sizeof(real)); // buffer to store gradient of syn0 in a train step. real *gsyn0 = (real *)calloc(layer1_size, sizeof(real)); Vec neu1e; neu1e.alloczero(layer1_size); int ix = 0; int labels[NSAMPLES_PER_KERNEL_LAUNCH]; char codes[NSAMPLES_PER_KERNEL_LAUNCH]; unsigned long long ctxes[NSAMPLES_PER_KERNEL_LAUNCH], focuses[NSAMPLES_PER_KERNEL_LAUNCH], uniq_focuses[NSAMPLES_PER_KERNEL_LAUNCH], uniq_ctxes[NSAMPLES_PER_KERNEL_LAUNCH]; int n_uniq_focuses = 0; int n_uniq_ctxes = 0; bool focus_seen[vocab_size], ctx_seen[vocab_size]; for(int i = 0; i < vocab_size; ++i) focus_seen[i] = false; for(int i = 0; i < vocab_size; ++i) ctx_seen[i] = false; FILE *fi = fopen(train_file, "rb"); fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET); while (1) { if (word_count - last_word_count > 1) { word_count_actual += word_count - last_word_count; last_word_count = word_count; if ((debug_mode > 1)) { now = clock(); printf( "%cAlpha: %f Progress: %.2f%% Words/thread/sec: " "%.2fk Total loss: %4.2f", 13, alpha, word_count_actual / (real)(iter * train_words + 1) * 100, word_count_actual / ((real)(now - start + 1) / (real)CLOCKS_PER_SEC * 1000), total_loss); fflush(stdout); total_loss = 0; } // alpha = starting_alpha * // (1 - word_count_actual / (real)(iter * train_words + 1)); // if (alpha < starting_alpha * 0.0001) // alpha = starting_alpha * 0.0001; } if (sentence_length == 0) { while (1) { word = ReadWordIndex(fi, &eof); if (eof) break; if (word == -1) continue; word_count++; if (word == 0) break; // The subsampling randomly discards frequent words while // keeping the ranking same if (sample > 0) { real ran = (sqrt(vocab[word].cn / (sample * train_words)) + 1) * (sample * train_words) / vocab[word].cn; next_random = next_random * (unsigned long long)25214903917 + 11; if (ran < (next_random & 0xFFFF) / (real)65536) continue; } sen[sentence_length] = word; sentence_length++; if (sentence_length >= MAX_SENTENCE_LENGTH) break; } sentence_position = 0; } if (eof || (word_count > train_words / num_threads)) { eof = 0; word_count_actual += word_count - last_word_count; local_iter--; if (local_iter == 0) break; word_count = 0; last_word_count = 0; sentence_length = 0; fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET); // alpha = starting_alpha * // (1 - word_count_actual / (real)(iter * train_words + 1)); // if (alpha < starting_alpha * 0.0001) // alpha = starting_alpha * 0.0001; continue; } word = sen[sentence_position]; if (word == -1) continue; // for (c = 0; c < layer1_size; c++) neu1[c] = 0; neu1e.fillzero(); // for (c = 0; c < layer1_size; c++) neu1e[c] = 0; next_random = next_random * (unsigned long long)25214903917 + 11; b = next_random % window; /* if (cbow) { // train the cbow architecture // in -> hidden cw = 0; for (a = b; a < window * 2 + 1 - b; a++) if (a != window) { c = sentence_position - window + a; if (c < 0) continue; if (c >= sentence_length) continue; last_word = sen[c]; if (last_word == -1) continue; neu1e.accumadd(syn0.ix(last_word)); // for (c = 0; c < layer1_size; c++) // neu1[c] += syn0[c + last_word * layer1_size]; cw++; } if (cw) { for (c = 0; c < layer1_size; c++) neu1[c] /= cw; if (hs) for (d = 0; d < vocab[word].codelen; d++) { f = 0; l2 = vocab[word].point[d] * layer1_size; // Propagate hidden -> output f += neu1.dot(syn1); // for (c = 0; c < layer1_size; c++) // f += neu1[c] * syn1[c + l2]; if (f <= -MAX_EXP) continue; else if (f >= MAX_EXP) continue; else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]; // 'g' is the gradient multiplied by the learning rate g = (1 - vocab[word].code[d] - f) * alpha; // Propagate errors output -> hidden for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2]; // Learn weights hidden -> output for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c]; } // NEGATIVE SAMPLING if (negative > 0) for (d = 0; d < negative + 1; d++) { if (d == 0) { target = word; label = 1; } else { next_random = next_random * (unsigned long long)25214903917 + 11; target = table[(next_random >> 16) % table_size]; if (target == 0) target = next_random % (vocab_size - 1) + 1; if (target == word) continue; label = 0; } l2 = target * layer1_size; f = 0; for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2]; if (f > MAX_EXP) g = (label - 1) * alpha; else if (f < -MAX_EXP) g = (label - 0) * alpha; else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha; for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2]; for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c]; } // hidden -> in for (a = b; a < window * 2 + 1 - b; a++) if (a != window) { c = sentence_position - window + a; if (c < 0) continue; if (c >= sentence_length) continue; last_word = sen[c]; if (last_word == -1) continue; for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c]; } } } else { // train skip-gram */ for (a = b; a < window * 2 + 1 - b; a++) if (a != window) { c = sentence_position - window + a; if (c < 0) continue; if (c >= sentence_length) continue; last_word = sen[c]; if (last_word == -1) continue; // l1 = last_word * layer1_size; // Vec *syn0v = &syn0[last_word]; // neu1e.fillzero(); // for (c = 0; c < layer1_size; c++) neu1e[c] = 0; // HIERARCHICAL SOFTMAX if (hs) { for (d = 0; d < vocab[word].codelen; d++) { // float f = 0; // l2 = vocab[word].point[d] * layer1_size; const unsigned long long target = vocab[word].point[d]; focuses[ix] = last_word; ctxes[ix] = target; codes[ix] = vocab[word].code[d]; if (!focus_seen[last_word]) { uniq_focuses[n_uniq_focuses] = last_word; n_uniq_focuses++; focus_seen[last_word] = true; } if (!ctx_seen[target]) { uniq_ctxes[n_uniq_ctxes] = target; n_uniq_ctxes++; ctx_seen[target] = true; } if (ix == NSAMPLES_PER_KERNEL_LAUNCH - 1) { runHSKernel(NSAMPLES_PER_KERNEL_LAUNCH, focuses, ctxes, codes, n_uniq_focuses, uniq_focuses, n_uniq_ctxes, uniq_ctxes); ix = 0; for(int i = 0; i < n_uniq_ctxes; ++i) { ctx_seen[uniq_ctxes[i]] = false; } for(int i = 0; i < n_uniq_focuses; ++i) { focus_seen[uniq_focuses[i]] = false; } n_uniq_ctxes = 0; n_uniq_focuses = 0; } else { ix++; } /* // Propagate hidden -> output float f = 0; for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1[c + l2]; if (f <= -MAX_EXP) continue; else if (f >= MAX_EXP) continue; else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]; // 'g' is the gradient multiplied by the learning // rate float g = (1 - vocab[word].code[d] - f) * alpha; // Propagate errors output -> hidden for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2]; // Learn weights hidden -> output for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * syn0[c + l1]; */ } } // NEGATIVE SAMPLING if (!hs && negative > 0) { for (d = 0; d < negative + 1; d++) { if (d == 0) { target = word; label = 1; } else { next_random = next_random * (unsigned long long)25214903917 + 11; target = table[(next_random >> 16) % table_size]; if (target == 0) target = next_random % (vocab_size - 1) + 1; if (target == word) continue; label = 0; } labels[ix] = label; focuses[ix] = last_word; ctxes[ix] = target; if (!focus_seen[last_word]) { uniq_focuses[n_uniq_focuses] = last_word; n_uniq_focuses++; focus_seen[last_word] = true; } if (!ctx_seen[target]) { uniq_ctxes[n_uniq_ctxes] = target; n_uniq_ctxes++; ctx_seen[target] = true; } if (ix == NSAMPLES_PER_KERNEL_LAUNCH - 1) { runNegSamplingKernel(NSAMPLES_PER_KERNEL_LAUNCH, labels, focuses, ctxes, n_uniq_focuses, n_uniq_ctxes, uniq_focuses, uniq_ctxes); ix = 0; n_uniq_ctxes = 0; n_uniq_focuses = 0; for(int i = 0; i < vocab_size; ++i) { ctx_seen[i] = false; } for(int i = 0; i < vocab_size; ++i) { focus_seen[i] = false; } } else { ix++; } } // end for loop for negative sampling } // end condition around negative samples // Learn weights input -> hidden // for (c = 0; c < layer1_size; c++) syn0[c + l1] += neu1e[c]; } // end a != window sentence_position++; if (sentence_position >= sentence_length) { sentence_length = 0; continue; } } assert(ix < NSAMPLES_PER_KERNEL_LAUNCH); // consume leftover data. if (hs > 0) { runHSKernel(ix, focuses, ctxes, codes, n_uniq_focuses, uniq_focuses, n_uniq_ctxes, uniq_ctxes); } else if (negative > 0) { runNegSamplingKernel(ix, labels, focuses, ctxes, n_uniq_focuses, n_uniq_ctxes, uniq_focuses, uniq_ctxes); } fclose(fi); // free(neu1); neu1e.freemem(); // pthread_exit(NULL); } void TrainModel() { long a, b, c, d; FILE *fo; pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t)); printf("Starting training using file %s\n", train_file); starting_alpha = alpha; if (read_vocab_file[0] != 0) ReadVocab(); else LearnVocabFromTrainFile(); if (save_vocab_file[0] != 0) SaveVocab(); if (output_file[0] == 0) return; InitNet(); if (negative > 0) InitUnigramTable(); start = clock(); if (iter > 0) { for(int i = 0; i < num_threads; ++i) { TrainModelThread((void *)i); // alpha = starting_alpha * // (1 - word_count_actual / (real)(iter * train_words + 1)); // if (alpha < starting_alpha * 0.0001) // alpha = starting_alpha * 0.0001; } // for (a = 0; a < num_threads; a++) // pthread_create(&pt[a], NULL, TrainModelThread, (void *)a); // for (a = 0; a < num_threads; a++) pthread_join(pt[a], NULL); } fo = fopen(output_file, "wb"); if (classes == 0) { real *syn0_out = (real*) malloc(sizeof(real) * layer1_size * vocab_size); // [vocab_size * layer1_size]; cudaMemcpy(syn0_out, dev_syn0, vocab_size * layer1_size * sizeof(real), cudaMemcpyDeviceToHost); fprintf(fo, "%lld %lld\n", vocab_size, layer1_size); // printf("%lld %lld\n", vocab_size, layer1_size); for (a = 0; a < vocab_size; a++) { fprintf(fo, "%s ", vocab[a].word); // printf("\n%s ", vocab[a].word); if (binary) { for (b = 0; b < layer1_size; b++) { fwrite(syn0_out + a *layer1_size + b, sizeof(real), 1, fo); // printf("%f ", *(syn0_out + a *layer1_size + b)); } } else { for (b = 0; b < layer1_size; b++) { fprintf(fo, "%lf ", syn0[a].v[b]); } } fprintf(fo, "\n"); } } else { // Run K-means on the word vectors int clcn = classes, iter = 10, closeid; int *centcn = (int *)malloc(classes * sizeof(int)); int *cl = (int *)calloc(vocab_size, sizeof(int)); real closev, x; real *cent = (real *)calloc(classes * layer1_size, sizeof(real)); for (a = 0; a < vocab_size; a++) cl[a] = a % clcn; for (a = 0; a < iter; a++) { for (b = 0; b < clcn * layer1_size; b++) cent[b] = 0; for (b = 0; b < clcn; b++) centcn[b] = 1; for (c = 0; c < vocab_size; c++) { for (d = 0; d < layer1_size; d++) cent[layer1_size * cl[c] + d] += syn0[c].v[d]; centcn[cl[c]]++; } for (b = 0; b < clcn; b++) { closev = 0; for (c = 0; c < layer1_size; c++) { cent[layer1_size * b + c] /= centcn[b]; closev += cent[layer1_size * b + c] * cent[layer1_size * b + c]; } closev = sqrt(closev); for (c = 0; c < layer1_size; c++) cent[layer1_size * b + c] /= closev; } for (c = 0; c < vocab_size; c++) { closev = -10; closeid = 0; for (d = 0; d < clcn; d++) { x = 0; for (b = 0; b < layer1_size; b++) x += cent[layer1_size * d + b] * syn0[c].v[b]; if (x > closev) { closev = x; closeid = d; } } cl[c] = closeid; } } // Save the K-means classes for (a = 0; a < vocab_size; a++) fprintf(fo, "%s %d\n", vocab[a].word, cl[a]); free(centcn); free(cent); free(cl); } fclose(fo); } int ArgPos(char *str, int argc, char **argv) { int a; for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) { if (a == argc - 1) { printf("Argument missing for %s\n", str); exit(1); } return a; } return -1; } int mainw2v(int argc, char **argv) { int i; if (argc == 1) { printf("WORD VECTOR estimation toolkit v 0.1c\n\n"); printf("Options:\n"); printf("Parameters for training:\n"); printf("\t-train <file>\n"); printf("\t\tUse text data from <file> to train the model\n"); printf("\t-output <file>\n"); printf( "\t\tUse <file> to save the resulting word vectors / word " "clusters\n"); printf("\t-size <int>\n"); printf("\t\tSet size of word vectors; default is 100\n"); printf("\t-window <int>\n"); printf("\t\tSet max skip length between words; default is 5\n"); printf("\t-sample <float>\n"); printf( "\t\tSet threshold for occurrence of words. Those that appear " "with " "higher frequency in the training data\n"); printf( "\t\twill be randomly down-sampled; default is 1e-3, useful " "range " "is (0, 1e-5)\n"); printf("\t-hs <int>\n"); printf("\t\tUse Hierarchical Softmax; default is 0 (not used)\n"); printf("\t-negative <int>\n"); printf( "\t\tNumber of negative examples; default is 5, common values " "are " "3 - 10 (0 = not used)\n"); printf("\t-threads <int>\n"); printf("\t\tUse <int> threads (default 12)\n"); printf("\t-iter <int>\n"); printf("\t\tRun more training iterations (default 5)\n"); printf("\t-min-count <int>\n"); printf( "\t\tThis will discard words that appear less than <int> " "times; " "default is 5\n"); printf("\t-alpha <float>\n"); printf( "\t\tSet the starting learning rate; default is 0.025 for " "skip-gram and 0.05 for CBOW\n"); printf("\t-classes <int>\n"); printf( "\t\tOutput word classes rather than word vectors; default " "number " "of classes is 0 (vectors are written)\n"); printf("\t-debug <int>\n"); printf( "\t\tSet the debug mode (default = 2 = more info during " "training)\n"); printf("\t-binary <int>\n"); printf( "\t\tSave the resulting vectors in binary moded; default is 0 " "(off)\n"); printf("\t-save-vocab <file>\n"); printf("\t\tThe vocabulary will be saved to <file>\n"); printf("\t-read-vocab <file>\n"); printf( "\t\tThe vocabulary will be read from <file>, not constructed " "from " "the training data\n"); printf("\t-cbow <int>\n"); printf( "\t\tUse the continuous bag of words model; default is 1 (use " "0 " "for skip-gram model)\n"); printf("\nExamples:\n"); printf( "./word2vec -train data.txt -output vec.txt -size 200 -window " "5 " "-sample 1e-4 -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3\n\n"); return 0; } output_file[0] = 0; save_vocab_file[0] = 0; read_vocab_file[0] = 0; if ((i = ArgPos((char *)"-size", argc, argv)) > 0) layer1_size = atoi(argv[i + 1]); fprintf(stdout, "size: %lld\n", layer1_size); if ((i = ArgPos((char *)"-train", argc, argv)) > 0) strcpy(train_file, argv[i + 1]); if ((i = ArgPos((char *)"-save-vocab", argc, argv)) > 0) strcpy(save_vocab_file, argv[i + 1]); if ((i = ArgPos((char *)"-read-vocab", argc, argv)) > 0) strcpy(read_vocab_file, argv[i + 1]); if ((i = ArgPos((char *)"-debug", argc, argv)) > 0) debug_mode = atoi(argv[i + 1]); if ((i = ArgPos((char *)"-binary", argc, argv)) > 0) binary = atoi(argv[i + 1]); if ((i = ArgPos((char *)"-cbow", argc, argv)) > 0) cbow = atoi(argv[i + 1]); fprintf(stdout, "cbow: %d\n", cbow); if (cbow) alpha = 0.05; if ((i = ArgPos((char *)"-alpha", argc, argv)) > 0) alpha = atof(argv[i + 1]); fprintf(stdout, "alpha: %f\n", alpha); if ((i = ArgPos((char *)"-output", argc, argv)) > 0) strcpy(output_file, argv[i + 1]); if ((i = ArgPos((char *)"-window", argc, argv)) > 0) window = atoi(argv[i + 1]); fprintf(stdout, "window: %d\n", window); if ((i = ArgPos((char *)"-sample", argc, argv)) > 0) sample = atof(argv[i + 1]); if ((i = ArgPos((char *)"-hs", argc, argv)) > 0) hs = atoi(argv[i + 1]); fprintf(stdout, "hs: %d\n", hs); if ((i = ArgPos((char *)"-negative", argc, argv)) > 0) negative = atoi(argv[i + 1]); fprintf(stdout, "negative: %d\n", negative); if ((i = ArgPos((char *)"-threads", argc, argv)) > 0) num_threads = atoi(argv[i + 1]); if ((i = ArgPos((char *)"-iter", argc, argv)) > 0) iter = atoi(argv[i + 1]); if ((i = ArgPos((char *)"-min-count", argc, argv)) > 0) min_count = atoi(argv[i + 1]); if ((i = ArgPos((char *)"-classes", argc, argv)) > 0) classes = atoi(argv[i + 1]); vocab = (struct vocab_word *)calloc(vocab_max_size, sizeof(struct vocab_word)); vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int)); expTable = (real *)malloc((EXP_TABLE_SIZE + 1) * sizeof(real)); for (i = 0; i < EXP_TABLE_SIZE; i++) { expTable[i] = exp((i / (real)EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1) } TrainModel(); return 0; } int main(int argc, char *argv[]) { mainw2v(argc, argv); return 0; }
33edcade959f6132208aa261151d26d52c3f1d0b.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/CUDAGenerator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/DistributionTemplates.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <THH/THHGeneral.h> #include <THH/THHApply.cuh> #include <THH/THHDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> namespace at { namespace native { void normal_kernel(Tensor& self, double mean, double std, Generator gen) { auto generator = get_generator_or_default<CUDAGenerator>(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::normal_kernel(self, mean, std, generator); } REGISTER_DISPATCH(normal_stub, &normal_kernel); }} // namespace at::native
33edcade959f6132208aa261151d26d52c3f1d0b.cu
#include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/CUDAGenerator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/DistributionTemplates.h> #include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <THC/THCGeneral.h> #include <THC/THCApply.cuh> #include <THC/THCDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> namespace at { namespace native { void normal_kernel(Tensor& self, double mean, double std, Generator gen) { auto generator = get_generator_or_default<CUDAGenerator>(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::normal_kernel(self, mean, std, generator); } REGISTER_DISPATCH(normal_stub, &normal_kernel); }} // namespace at::native
0592f42d2293e1037a1fca7d23603db790e5bbf2.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/IndexBinaryFlat.h> #include <faiss/utils/random.h> #include <faiss/gpu/GpuIndexBinaryFlat.h> #include <faiss/gpu/StandardGpuResources.h> #include <faiss/gpu/test/TestUtils.h> #include <faiss/gpu/utils/DeviceTensor.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/HostTensor.cuh> #include <faiss/gpu/utils/Timer.h> #include <gflags/gflags.h> #include <map> #include <memory> #include <vector> #include <hip/hip_runtime_api.h> DEFINE_int32(k, 3, "final number of closest results returned"); DEFINE_int32(num, 128, "# of vecs"); DEFINE_int32(dim, 128, "# of dimensions"); DEFINE_int32(num_queries, 3, "number of query vectors"); DEFINE_int64(seed, -1, "specify random seed"); DEFINE_int64(pinned_mem, 0, "pinned memory allocation to use"); DEFINE_bool(cpu, true, "run the CPU code for timing and comparison"); DEFINE_bool(use_unified_mem, false, "use Pascal unified memory for the index"); using namespace faiss::gpu; int main(int argc, char** argv) { gflags::ParseCommandLineFlags(&argc, &argv, true); hipProfilerStop(); auto seed = FLAGS_seed != -1L ? FLAGS_seed : time(nullptr); printf("using seed %" PRId64 "\n", seed); auto numQueries = FLAGS_num_queries; auto index = std::unique_ptr<faiss::IndexBinaryFlat>( new faiss::IndexBinaryFlat(FLAGS_dim)); HostTensor<unsigned char, 2, true> vecs({FLAGS_num, FLAGS_dim / 8}); faiss::byte_rand(vecs.data(), vecs.numElements(), seed); index->add(FLAGS_num, vecs.data()); printf("Database: dim %d num vecs %d\n", FLAGS_dim, FLAGS_num); printf("Hamming lookup: %d queries, total k %d\n", numQueries, FLAGS_k); // Convert to GPU index printf("Copying index to GPU...\n"); GpuIndexBinaryFlatConfig config; config.memorySpace = FLAGS_use_unified_mem ? MemorySpace::Unified : MemorySpace::Device; faiss::gpu::StandardGpuResources res; faiss::gpu::GpuIndexBinaryFlat gpuIndex(&res, index.get(), config); printf("copy done\n"); // Build query vectors HostTensor<unsigned char, 2, true> cpuQuery({numQueries, FLAGS_dim / 8}); faiss::byte_rand(cpuQuery.data(), cpuQuery.numElements(), seed); // Time faiss CPU HostTensor<int, 2, true> cpuDistances({numQueries, FLAGS_k}); HostTensor<faiss::IndexBinary::idx_t, 2, true> cpuIndices({numQueries, FLAGS_k}); if (FLAGS_cpu) { float cpuTime = 0.0f; CpuTimer timer; index->search(numQueries, cpuQuery.data(), FLAGS_k, cpuDistances.data(), cpuIndices.data()); cpuTime = timer.elapsedMilliseconds(); printf("CPU time %.3f ms\n", cpuTime); } HostTensor<int, 2, true> gpuDistances({numQueries, FLAGS_k}); HostTensor<faiss::Index::idx_t, 2, true> gpuIndices({numQueries, FLAGS_k}); CUDA_VERIFY(hipProfilerStart()); faiss::gpu::synchronizeAllDevices(); float gpuTime = 0.0f; // Time GPU { CpuTimer timer; gpuIndex.search(cpuQuery.getSize(0), cpuQuery.data(), FLAGS_k, gpuDistances.data(), gpuIndices.data()); // There is a device -> host copy above, so no need to time // additional synchronization with the GPU gpuTime = timer.elapsedMilliseconds(); } CUDA_VERIFY(hipProfilerStop()); printf("GPU time %.3f ms\n", gpuTime); CUDA_VERIFY(hipDeviceSynchronize()); return 0; }
0592f42d2293e1037a1fca7d23603db790e5bbf2.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/IndexBinaryFlat.h> #include <faiss/utils/random.h> #include <faiss/gpu/GpuIndexBinaryFlat.h> #include <faiss/gpu/StandardGpuResources.h> #include <faiss/gpu/test/TestUtils.h> #include <faiss/gpu/utils/DeviceTensor.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/HostTensor.cuh> #include <faiss/gpu/utils/Timer.h> #include <gflags/gflags.h> #include <map> #include <memory> #include <vector> #include <cuda_profiler_api.h> DEFINE_int32(k, 3, "final number of closest results returned"); DEFINE_int32(num, 128, "# of vecs"); DEFINE_int32(dim, 128, "# of dimensions"); DEFINE_int32(num_queries, 3, "number of query vectors"); DEFINE_int64(seed, -1, "specify random seed"); DEFINE_int64(pinned_mem, 0, "pinned memory allocation to use"); DEFINE_bool(cpu, true, "run the CPU code for timing and comparison"); DEFINE_bool(use_unified_mem, false, "use Pascal unified memory for the index"); using namespace faiss::gpu; int main(int argc, char** argv) { gflags::ParseCommandLineFlags(&argc, &argv, true); cudaProfilerStop(); auto seed = FLAGS_seed != -1L ? FLAGS_seed : time(nullptr); printf("using seed %" PRId64 "\n", seed); auto numQueries = FLAGS_num_queries; auto index = std::unique_ptr<faiss::IndexBinaryFlat>( new faiss::IndexBinaryFlat(FLAGS_dim)); HostTensor<unsigned char, 2, true> vecs({FLAGS_num, FLAGS_dim / 8}); faiss::byte_rand(vecs.data(), vecs.numElements(), seed); index->add(FLAGS_num, vecs.data()); printf("Database: dim %d num vecs %d\n", FLAGS_dim, FLAGS_num); printf("Hamming lookup: %d queries, total k %d\n", numQueries, FLAGS_k); // Convert to GPU index printf("Copying index to GPU...\n"); GpuIndexBinaryFlatConfig config; config.memorySpace = FLAGS_use_unified_mem ? MemorySpace::Unified : MemorySpace::Device; faiss::gpu::StandardGpuResources res; faiss::gpu::GpuIndexBinaryFlat gpuIndex(&res, index.get(), config); printf("copy done\n"); // Build query vectors HostTensor<unsigned char, 2, true> cpuQuery({numQueries, FLAGS_dim / 8}); faiss::byte_rand(cpuQuery.data(), cpuQuery.numElements(), seed); // Time faiss CPU HostTensor<int, 2, true> cpuDistances({numQueries, FLAGS_k}); HostTensor<faiss::IndexBinary::idx_t, 2, true> cpuIndices({numQueries, FLAGS_k}); if (FLAGS_cpu) { float cpuTime = 0.0f; CpuTimer timer; index->search(numQueries, cpuQuery.data(), FLAGS_k, cpuDistances.data(), cpuIndices.data()); cpuTime = timer.elapsedMilliseconds(); printf("CPU time %.3f ms\n", cpuTime); } HostTensor<int, 2, true> gpuDistances({numQueries, FLAGS_k}); HostTensor<faiss::Index::idx_t, 2, true> gpuIndices({numQueries, FLAGS_k}); CUDA_VERIFY(cudaProfilerStart()); faiss::gpu::synchronizeAllDevices(); float gpuTime = 0.0f; // Time GPU { CpuTimer timer; gpuIndex.search(cpuQuery.getSize(0), cpuQuery.data(), FLAGS_k, gpuDistances.data(), gpuIndices.data()); // There is a device -> host copy above, so no need to time // additional synchronization with the GPU gpuTime = timer.elapsedMilliseconds(); } CUDA_VERIFY(cudaProfilerStop()); printf("GPU time %.3f ms\n", gpuTime); CUDA_VERIFY(cudaDeviceSynchronize()); return 0; }
d2733def081e0a58a6faba2e36170245c2918de8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /* * Refactor `loop` to be a CUDA Kernel. The new kernel should * only do the work of 1 iteration of the original loop. */ __global__ void loop() { /* for (int i = 0; i < N; ++i) { printf("This is iteration number %d\n", i); } */ printf("This is iteration number %d\n", threadIdx.x); } int main() { /* * When refactoring `loop` to launch as a kernel, be sure * to use the execution configuration to control how many * "iterations" to perform. * * For this exercise, only use 1 block of threads. */ hipLaunchKernelGGL(( loop), dim3(1), dim3(10), 0, 0, ); hipDeviceSynchronize(); }
d2733def081e0a58a6faba2e36170245c2918de8.cu
#include <stdio.h> /* * Refactor `loop` to be a CUDA Kernel. The new kernel should * only do the work of 1 iteration of the original loop. */ __global__ void loop() { /* for (int i = 0; i < N; ++i) { printf("This is iteration number %d\n", i); } */ printf("This is iteration number %d\n", threadIdx.x); } int main() { /* * When refactoring `loop` to launch as a kernel, be sure * to use the execution configuration to control how many * "iterations" to perform. * * For this exercise, only use 1 block of threads. */ loop<<<1, 10>>>(); cudaDeviceSynchronize(); }
9238f5e8b2201a5aca213edc99a530787af8ade6.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <chrono> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define INSTRUMENT 0 #define STREAM_COMPACTION 1 #define MATERIAL_SORT 0 #define FIRST_BOUNCE_CACHE 0 #define ANTI_ALIASING 1 #define DEPTH_OF_FIELD 1 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char* msg, const char* file, int line) { #if ERRORCHECK hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene* hst_scene = NULL; static glm::vec3* dev_image = NULL; static Geom* dev_geoms = NULL; static Material* dev_materials = NULL; static PathSegment* dev_paths = NULL; static ShadeableIntersection* dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc #if FIRST_BOUNCE_CACHE static ShadeableIntersection* dev_firstBounceIntersections = NULL; #endif #if MESH_CULL static Mesh* dev_meshes = NULL; static Geom* dev_triangles = NULL; #endif void pathtraceInit(Scene* scene) { hst_scene = scene; const Camera& cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need #if FIRST_BOUNCE_CACHE hipMalloc(&dev_firstBounceIntersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_firstBounceIntersections, 0, pixelcount * sizeof(ShadeableIntersection)); #endif #if MESH_CULL hipMalloc(&dev_meshes, scene->meshes.size() * sizeof(Mesh)); hipMemcpy(dev_meshes, scene->meshes.data(), scene->meshes.size() * sizeof(Mesh), hipMemcpyHostToDevice); hipMalloc(&dev_triangles, scene->triangles.size() * sizeof(Geom)); hipMemcpy(dev_triangles, scene->triangles.data(), scene->triangles.size() * sizeof(Geom), hipMemcpyHostToDevice); #endif checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_paths); hipFree(dev_geoms); hipFree(dev_materials); hipFree(dev_intersections); // TODO: clean up any extra device memory you created #if FIRST_BOUNCE_CACHE hipFree(dev_firstBounceIntersections); #endif #if MESH_CULL hipFree(dev_meshes); hipFree(dev_triangles); #endif checkCUDAError("pathtraceFree"); } /** * Maps a random point to a sample on a unit disk */ __host__ __device__ glm::vec3 concentricSampleDisk(glm::vec2 u) { // Map input to -1 to 1 range // glm::vec2 uOffset = 2.f * u - glm::vec2(1.f); glm::vec2 uOffset = u; // Handle degeneracy at origin if (uOffset.x == 0.f && uOffset.y == 0.f) { return glm::vec3(0.f); } // Apply concentric mapping to point float theta, r; if (glm::abs(uOffset.x) > glm::abs(uOffset.y)) { r = uOffset.x; theta = PI / 4.f * (uOffset.y / uOffset.x); } else { r = uOffset.y; theta = PI / 2.f - PI / 4.f * (uOffset.x / uOffset.y); } return r * glm::vec3(glm::cos(theta), glm::sin(theta), 0); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment& segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); float xOffset = ((float)x - (float)cam.resolution.x * 0.5f); float yOffset = ((float)y - (float)cam.resolution.y * 0.5f); thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, traceDepth); thrust::uniform_real_distribution<float> disp(-1.f, 1.f); #if ANTI_ALIASING xOffset += 0.5 * disp(rng); yOffset += 0.5 * disp(rng); #endif // DONE: implement antialiasing by jittering the ray segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * xOffset - cam.up * cam.pixelLength.y * yOffset ); #if DEPTH_OF_FIELD float lensRadius = cam.lensRadius; float focalDistance = cam.focalLength; float sample1 = disp(rng); float sample2 = disp(rng); glm::vec3 pLens = lensRadius * concentricSampleDisk(glm::vec2(sample1, sample2)); float ft = focalDistance / glm::dot(cam.view, segment.ray.direction); glm::vec3 pFocus = cam.position + ft * segment.ray.direction; segment.ray.origin += cam.right * pLens.x + cam.up + pLens.y; segment.ray.direction = glm::normalize(pFocus - segment.ray.origin); #if ANTI_ALIASING glm::vec3 aaOffset = glm::vec3(0.001 * disp(rng), 0.001 * disp(rng), 0.f); segment.ray.direction = glm::normalize(segment.ray.direction + aaOffset); #endif #endif segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment* pathSegments , Geom* geoms , int geoms_size , ShadeableIntersection* intersections #if MESH_CULL , Mesh* meshes , int meshes_size , Geom* triangles , int triangles_size #endif ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; bool triangleHit = false; int triangleHitMaterialId; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms // If MESH_CULL is disabled, all mesh triangles will be in the geoms array for (int i = 0; i < geoms_size; i++) { Geom& geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } #if !MESH_CULL else if (geom.type == TRIANGLE) { t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } #endif // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } #if MESH_CULL // If MESH_CULL is enabled, mesh triangle checking will happen on a per mesh basis for (int i = 0; i < meshes_size; i++) { Mesh& mesh = meshes[i]; float b = boxIntersectionTest(mesh.boundingBox, pathSegment.ray, tmp_intersect, tmp_normal, outside); if (b > 0.0f) // Mesh bounding box was hit by ray, so check all triangles inside { for (int j = mesh.triangleDataStartIndex; j < mesh.triangleDataStartIndex + mesh.numTriangles; j++) { Geom& triangle = triangles[j]; t = triangleIntersectionTest(triangle, pathSegment.ray, tmp_intersect, tmp_normal, outside); if (t > 0.0f && t_min > t) { t_min = t; triangleHitMaterialId = triangle.materialid; intersect_point = tmp_intersect; normal = tmp_normal; triangleHit = true; } } } } #endif if (hit_geom_index == -1 && !triangleHit) { intersections[path_index].t = -1.0f; } #if MESH_CULL else if (triangleHit) { //The ray hits a triangle from a mesh intersections[path_index].t = t_min; intersections[path_index].materialId = triangleHitMaterialId; intersections[path_index].surfaceNormal = normal; } #endif else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial( int iter , int num_paths , ShadeableIntersection* shadeableIntersections , PathSegment* pathSegments , Material* materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; pathSegments[idx].color *= u01(rng); // apply some noise because why not } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); } } } /** * Implementation of shader that uses BSDF algorithm */ __global__ void shadeMaterialBSDF( int iter, int num_paths, ShadeableIntersection* shadeableIntersections, PathSegment* pathSegments, Material* materials, int depth ) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_paths) { return; } PathSegment* segment = &pathSegments[index]; if (segment->remainingBounces == 0) // // No light was hit, so pixel is black { return; } // Check for existence of intersection ShadeableIntersection intersection = shadeableIntersections[index]; if (intersection.t > 0.f) { // Set up the RNG thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, depth); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) // Light has been hit { pathSegments[index].color *= (material.color * material.emittance); segment->remainingBounces = 0; } else // Perform pseudo-lighting computation { glm::vec3 intersectionPoint = getPointOnRay(segment->ray, intersection.t); scatterRay(pathSegments[index], intersectionPoint, intersection.surfaceNormal, material, rng); segment->remainingBounces--; } } else { // Terminate the ray and set color to black segment->color = glm::vec3(0.f); segment->remainingBounces = 0; } } /** * Predicate for thrust::remove_if to remove rays that have reached the end of their life */ struct hasNoBounces { __host__ __device__ bool operator()(const PathSegment &p) { return glm::length(p.color) < 0.0001f; // return p.remainingBounces <= 0; // Testing whether remaining bounces exist did not work, so instead I Had to check the color being black. // This does result in rays being filtered, as seen by observing the number of remaining paths in the pathtrace function. } }; /** * Predicate for thrust::sort to sort intersections by material type */ struct sortByMaterialType { __host__ __device__ bool operator()(const ShadeableIntersection &l, const ShadeableIntersection &r) { return l.materialId < r.materialId; } }; // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3* image, PathSegment* iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4* pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera& cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing hipLaunchKernelGGL(( generateRayFromCamera), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks #if INSTRUMENT auto startTime = std::chrono::high_resolution_clock::now(); double intersectTime = 0.0; double shadeTime = 0.0; #endif bool iterationComplete = false; while (!iterationComplete) { // clean shading chunks hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; #if INSTRUMENT auto startIntersectTime = std::chrono::high_resolution_clock::now(); #endif #if FIRST_BOUNCE_CACHE if (depth == 0 && iter <= 1) { hipLaunchKernelGGL(( computeIntersections), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_firstBounceIntersections #if MESH_CULL , dev_meshes , hst_scene->meshes.size() , dev_triangles , hst_scene->triangles.size() #endif ); checkCUDAError("trace first bounce"); hipDeviceSynchronize(); hipMemcpy(dev_intersections, dev_firstBounceIntersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); } else if (depth == 0) { hipDeviceSynchronize(); hipMemcpy(dev_intersections, dev_firstBounceIntersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); } else if (depth > 0) { hipLaunchKernelGGL(( computeIntersections), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections #if MESH_CULL , dev_meshes , hst_scene->meshes.size() , dev_triangles , hst_scene->triangles.size() #endif ); checkCUDAError("trace one bounce"); hipDeviceSynchronize(); } #else hipLaunchKernelGGL(( computeIntersections), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections #if MESH_CULL , dev_meshes , hst_scene->meshes.size() , dev_triangles , hst_scene->triangles.size() #endif ); checkCUDAError("trace one bounce"); hipDeviceSynchronize(); #endif #if INSTRUMENT auto stopIntersectTime = std::chrono::high_resolution_clock::now(); #endif depth++; // DONE: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. #if MATERIAL_SORT thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, sortByMaterialType()); #endif #if INSTRUMENT auto startShadeTime = std::chrono::high_resolution_clock::now(); #endif hipLaunchKernelGGL(( shadeMaterialBSDF), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, iter, num_paths, dev_intersections, dev_paths, dev_materials, depth ); #if INSTRUMENT auto stopShadeTime = std::chrono::high_resolution_clock::now(); #endif #if STREAM_COMPACTION // Remove terminated rays thrust::device_ptr<PathSegment> thrust_path_start = thrust::device_pointer_cast(dev_paths); thrust::device_ptr<PathSegment> thrust_path_end = thrust::device_pointer_cast(dev_path_end); thrust_path_end = thrust::remove_if(thrust::device, thrust_path_start, thrust_path_end, hasNoBounces()); // Determine how many paths are remaining dev_paths = thrust::raw_pointer_cast(thrust_path_start); dev_path_end = thrust::raw_pointer_cast(thrust_path_end); #endif num_paths = dev_path_end - dev_paths; iterationComplete = (depth >= traceDepth) || (num_paths == 0); #if INSTRUMENT intersectTime += std::chrono::duration_cast<std::chrono::microseconds>(stopIntersectTime - startIntersectTime).count(); shadeTime += std::chrono::duration_cast<std::chrono::microseconds>(stopShadeTime - startShadeTime).count(); #endif } #if INSTRUMENT auto stopTime = std::chrono::high_resolution_clock::now(); double totalTime = std::chrono::duration_cast<std::chrono::microseconds>(stopTime - startTime).count(); std::cout << totalTime << "," << intersectTime << "," << shadeTime << std::endl; #endif // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, num_paths, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
9238f5e8b2201a5aca213edc99a530787af8ade6.cu
#include <cstdio> #include <cuda.h> #include <cmath> #include <chrono> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define INSTRUMENT 0 #define STREAM_COMPACTION 1 #define MATERIAL_SORT 0 #define FIRST_BOUNCE_CACHE 0 #define ANTI_ALIASING 1 #define DEPTH_OF_FIELD 1 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char* msg, const char* file, int line) { #if ERRORCHECK cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene* hst_scene = NULL; static glm::vec3* dev_image = NULL; static Geom* dev_geoms = NULL; static Material* dev_materials = NULL; static PathSegment* dev_paths = NULL; static ShadeableIntersection* dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc #if FIRST_BOUNCE_CACHE static ShadeableIntersection* dev_firstBounceIntersections = NULL; #endif #if MESH_CULL static Mesh* dev_meshes = NULL; static Geom* dev_triangles = NULL; #endif void pathtraceInit(Scene* scene) { hst_scene = scene; const Camera& cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need #if FIRST_BOUNCE_CACHE cudaMalloc(&dev_firstBounceIntersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_firstBounceIntersections, 0, pixelcount * sizeof(ShadeableIntersection)); #endif #if MESH_CULL cudaMalloc(&dev_meshes, scene->meshes.size() * sizeof(Mesh)); cudaMemcpy(dev_meshes, scene->meshes.data(), scene->meshes.size() * sizeof(Mesh), cudaMemcpyHostToDevice); cudaMalloc(&dev_triangles, scene->triangles.size() * sizeof(Geom)); cudaMemcpy(dev_triangles, scene->triangles.data(), scene->triangles.size() * sizeof(Geom), cudaMemcpyHostToDevice); #endif checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_paths); cudaFree(dev_geoms); cudaFree(dev_materials); cudaFree(dev_intersections); // TODO: clean up any extra device memory you created #if FIRST_BOUNCE_CACHE cudaFree(dev_firstBounceIntersections); #endif #if MESH_CULL cudaFree(dev_meshes); cudaFree(dev_triangles); #endif checkCUDAError("pathtraceFree"); } /** * Maps a random point to a sample on a unit disk */ __host__ __device__ glm::vec3 concentricSampleDisk(glm::vec2 u) { // Map input to -1 to 1 range // glm::vec2 uOffset = 2.f * u - glm::vec2(1.f); glm::vec2 uOffset = u; // Handle degeneracy at origin if (uOffset.x == 0.f && uOffset.y == 0.f) { return glm::vec3(0.f); } // Apply concentric mapping to point float theta, r; if (glm::abs(uOffset.x) > glm::abs(uOffset.y)) { r = uOffset.x; theta = PI / 4.f * (uOffset.y / uOffset.x); } else { r = uOffset.y; theta = PI / 2.f - PI / 4.f * (uOffset.x / uOffset.y); } return r * glm::vec3(glm::cos(theta), glm::sin(theta), 0); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment& segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); float xOffset = ((float)x - (float)cam.resolution.x * 0.5f); float yOffset = ((float)y - (float)cam.resolution.y * 0.5f); thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, traceDepth); thrust::uniform_real_distribution<float> disp(-1.f, 1.f); #if ANTI_ALIASING xOffset += 0.5 * disp(rng); yOffset += 0.5 * disp(rng); #endif // DONE: implement antialiasing by jittering the ray segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * xOffset - cam.up * cam.pixelLength.y * yOffset ); #if DEPTH_OF_FIELD float lensRadius = cam.lensRadius; float focalDistance = cam.focalLength; float sample1 = disp(rng); float sample2 = disp(rng); glm::vec3 pLens = lensRadius * concentricSampleDisk(glm::vec2(sample1, sample2)); float ft = focalDistance / glm::dot(cam.view, segment.ray.direction); glm::vec3 pFocus = cam.position + ft * segment.ray.direction; segment.ray.origin += cam.right * pLens.x + cam.up + pLens.y; segment.ray.direction = glm::normalize(pFocus - segment.ray.origin); #if ANTI_ALIASING glm::vec3 aaOffset = glm::vec3(0.001 * disp(rng), 0.001 * disp(rng), 0.f); segment.ray.direction = glm::normalize(segment.ray.direction + aaOffset); #endif #endif segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment* pathSegments , Geom* geoms , int geoms_size , ShadeableIntersection* intersections #if MESH_CULL , Mesh* meshes , int meshes_size , Geom* triangles , int triangles_size #endif ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; bool triangleHit = false; int triangleHitMaterialId; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms // If MESH_CULL is disabled, all mesh triangles will be in the geoms array for (int i = 0; i < geoms_size; i++) { Geom& geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } #if !MESH_CULL else if (geom.type == TRIANGLE) { t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } #endif // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } #if MESH_CULL // If MESH_CULL is enabled, mesh triangle checking will happen on a per mesh basis for (int i = 0; i < meshes_size; i++) { Mesh& mesh = meshes[i]; float b = boxIntersectionTest(mesh.boundingBox, pathSegment.ray, tmp_intersect, tmp_normal, outside); if (b > 0.0f) // Mesh bounding box was hit by ray, so check all triangles inside { for (int j = mesh.triangleDataStartIndex; j < mesh.triangleDataStartIndex + mesh.numTriangles; j++) { Geom& triangle = triangles[j]; t = triangleIntersectionTest(triangle, pathSegment.ray, tmp_intersect, tmp_normal, outside); if (t > 0.0f && t_min > t) { t_min = t; triangleHitMaterialId = triangle.materialid; intersect_point = tmp_intersect; normal = tmp_normal; triangleHit = true; } } } } #endif if (hit_geom_index == -1 && !triangleHit) { intersections[path_index].t = -1.0f; } #if MESH_CULL else if (triangleHit) { //The ray hits a triangle from a mesh intersections[path_index].t = t_min; intersections[path_index].materialId = triangleHitMaterialId; intersections[path_index].surfaceNormal = normal; } #endif else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial( int iter , int num_paths , ShadeableIntersection* shadeableIntersections , PathSegment* pathSegments , Material* materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; pathSegments[idx].color *= u01(rng); // apply some noise because why not } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); } } } /** * Implementation of shader that uses BSDF algorithm */ __global__ void shadeMaterialBSDF( int iter, int num_paths, ShadeableIntersection* shadeableIntersections, PathSegment* pathSegments, Material* materials, int depth ) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_paths) { return; } PathSegment* segment = &pathSegments[index]; if (segment->remainingBounces == 0) // // No light was hit, so pixel is black { return; } // Check for existence of intersection ShadeableIntersection intersection = shadeableIntersections[index]; if (intersection.t > 0.f) { // Set up the RNG thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, depth); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) // Light has been hit { pathSegments[index].color *= (material.color * material.emittance); segment->remainingBounces = 0; } else // Perform pseudo-lighting computation { glm::vec3 intersectionPoint = getPointOnRay(segment->ray, intersection.t); scatterRay(pathSegments[index], intersectionPoint, intersection.surfaceNormal, material, rng); segment->remainingBounces--; } } else { // Terminate the ray and set color to black segment->color = glm::vec3(0.f); segment->remainingBounces = 0; } } /** * Predicate for thrust::remove_if to remove rays that have reached the end of their life */ struct hasNoBounces { __host__ __device__ bool operator()(const PathSegment &p) { return glm::length(p.color) < 0.0001f; // return p.remainingBounces <= 0; // Testing whether remaining bounces exist did not work, so instead I Had to check the color being black. // This does result in rays being filtered, as seen by observing the number of remaining paths in the pathtrace function. } }; /** * Predicate for thrust::sort to sort intersections by material type */ struct sortByMaterialType { __host__ __device__ bool operator()(const ShadeableIntersection &l, const ShadeableIntersection &r) { return l.materialId < r.materialId; } }; // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3* image, PathSegment* iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4* pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera& cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing generateRayFromCamera<<<blocksPerGrid2d, blockSize2d>>>(cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks #if INSTRUMENT auto startTime = std::chrono::high_resolution_clock::now(); double intersectTime = 0.0; double shadeTime = 0.0; #endif bool iterationComplete = false; while (!iterationComplete) { // clean shading chunks cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; #if INSTRUMENT auto startIntersectTime = std::chrono::high_resolution_clock::now(); #endif #if FIRST_BOUNCE_CACHE if (depth == 0 && iter <= 1) { computeIntersections<<<numblocksPathSegmentTracing, blockSize1d>>>( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_firstBounceIntersections #if MESH_CULL , dev_meshes , hst_scene->meshes.size() , dev_triangles , hst_scene->triangles.size() #endif ); checkCUDAError("trace first bounce"); cudaDeviceSynchronize(); cudaMemcpy(dev_intersections, dev_firstBounceIntersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); } else if (depth == 0) { cudaDeviceSynchronize(); cudaMemcpy(dev_intersections, dev_firstBounceIntersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); } else if (depth > 0) { computeIntersections<<<numblocksPathSegmentTracing, blockSize1d>>>( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections #if MESH_CULL , dev_meshes , hst_scene->meshes.size() , dev_triangles , hst_scene->triangles.size() #endif ); checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); } #else computeIntersections<<<numblocksPathSegmentTracing, blockSize1d>>>( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections #if MESH_CULL , dev_meshes , hst_scene->meshes.size() , dev_triangles , hst_scene->triangles.size() #endif ); checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); #endif #if INSTRUMENT auto stopIntersectTime = std::chrono::high_resolution_clock::now(); #endif depth++; // DONE: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. #if MATERIAL_SORT thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, sortByMaterialType()); #endif #if INSTRUMENT auto startShadeTime = std::chrono::high_resolution_clock::now(); #endif shadeMaterialBSDF<<<numblocksPathSegmentTracing, blockSize1d>>>( iter, num_paths, dev_intersections, dev_paths, dev_materials, depth ); #if INSTRUMENT auto stopShadeTime = std::chrono::high_resolution_clock::now(); #endif #if STREAM_COMPACTION // Remove terminated rays thrust::device_ptr<PathSegment> thrust_path_start = thrust::device_pointer_cast(dev_paths); thrust::device_ptr<PathSegment> thrust_path_end = thrust::device_pointer_cast(dev_path_end); thrust_path_end = thrust::remove_if(thrust::device, thrust_path_start, thrust_path_end, hasNoBounces()); // Determine how many paths are remaining dev_paths = thrust::raw_pointer_cast(thrust_path_start); dev_path_end = thrust::raw_pointer_cast(thrust_path_end); #endif num_paths = dev_path_end - dev_paths; iterationComplete = (depth >= traceDepth) || (num_paths == 0); #if INSTRUMENT intersectTime += std::chrono::duration_cast<std::chrono::microseconds>(stopIntersectTime - startIntersectTime).count(); shadeTime += std::chrono::duration_cast<std::chrono::microseconds>(stopShadeTime - startShadeTime).count(); #endif } #if INSTRUMENT auto stopTime = std::chrono::high_resolution_clock::now(); double totalTime = std::chrono::duration_cast<std::chrono::microseconds>(stopTime - startTime).count(); std::cout << totalTime << "," << intersectTime << "," << shadeTime << std::endl; #endif // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather<<<numBlocksPixels, blockSize1d>>>(num_paths, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
8eef7bdab7fa5e7eda2710e2c9013f21bac365c8.hip
// !!! This is a file automatically generated by hipify!!! // This is used to Test my Spmspv performance in applications. #include <iostream> #include <string> #include <float.h> #include <typeinfo> #include <limits> #include <algorithm> #include <vector> #include <random> #include <omp.h> #include <hipsparse.h> #include <cusparse_v2.h> #include <hip/hip_runtime.h> #include <thrust/execution_policy.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <thrust/sort.h> #include "spmspv/csc-spmspv/spmspv.h" #include "spmspv/class.hpp" #include "spmspv/mtx.hpp" #include "spmspv/readsvmdata.hpp" #include "spmspv/config.h" #ifndef VALUE_TYPE #define VALUE_TYPE float #endif #ifndef NUM_RUN #define NUM_RUN 10 #endif #define IS_DOUBLE_ZERO(d) (abs(d) < DBL_EPSILON) #define IS_FLOAT_ZERO(d) (abs(d) < FLT_EPSILON) template<typename T> int readSVMToCoo(char* file_name, SvmData<T>* svmdata); template<typename T> int freeSVMToCoo(SvmData<T>* svmdata); template<typename T> int ConvertSVMDataToCSR(SvmData<T> svmdata, int* csr_row, int* csr_col, T* csr_val, int numVects, int dimVects, int numNonZeros); void TestCoo2Csr(int m, int mat_nnz, int* coo_row, int* csr_row) { int* d_csr_row = NULL; int* d_coo_row = NULL; cudaErrCheck(hipMalloc((void** )&d_csr_row, (m + 1) * sizeof(int))); cudaErrCheck(hipMalloc((void** )&d_coo_row, mat_nnz * sizeof(int))); cudaErrCheck(hipMemcpy(d_coo_row, coo_row, mat_nnz * sizeof(int), hipMemcpyHostToDevice)); hipsparseHandle_t sparse_handle; CUSP_CALL(hipsparseCreate(&sparse_handle)); hipsparseMatDescr_t descr = 0; CUSP_CALL(hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); cudaErrCheck(hipDeviceSynchronize()); hipEvent_t st, ed; float tm = 0; hipEventCreate(&st); hipEventCreate(&ed); hipEventRecord(st, 0); CUSP_CALL(hipsparseXcoo2csr(sparse_handle, d_coo_row, mat_nnz, m, d_csr_row, HIPSPARSE_INDEX_BASE_ZERO)); hipEventRecord(ed, 0); hipEventSynchronize(ed); //unit: ms. hipEventElapsedTime(&tm, st, ed); hipEventDestroy(st); hipEventDestroy(ed); std::cout << "csr2coo time = " << tm <<" ms." << std::endl; cudaErrCheck(hipMemcpy(csr_row, d_csr_row, (m + 1) * sizeof(int), hipMemcpyDeviceToHost)); cudaErrCheck(hipDeviceSynchronize()); cudaErrCheck(hipFree(d_csr_row)); cudaErrCheck(hipFree(d_coo_row)); } int TestSparse2Dense(int x_nnz, int* x_key, VALUE_TYPE* x_val, int m, int n, int nnz, VALUE_TYPE* x_dense){ int err = 0; //hipError_t err_cuda = hipSuccess; int device_id = 0; hipSetDevice(device_id); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; // double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); // double gflop = SpmvGetFlops<int>(mat_nnz); std::cout << "DEBUG: n = " << n << std::endl; std::cout << "DEBUG: xnnz = " << x_nnz << std::endl; VALUE_TYPE* d_x = NULL; cudaErrCheck(hipMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); int* d_x_key = NULL; VALUE_TYPE* d_x_val = NULL; cudaErrCheck(hipMalloc((void** )&d_x_key, (x_nnz) * sizeof(int))); cudaErrCheck(hipMalloc((void** )&d_x_val, (x_nnz) * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemcpy(d_x_key, x_key, x_nnz * sizeof(int), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(d_x_val, x_val, x_nnz * sizeof(VALUE_TYPE), hipMemcpyHostToDevice)); SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, nnz); //current vector type is sparse type. err = A.set_vector_type(0); err = A.set_sparse_x(d_x_key, d_x_val, x_nnz); err = A.set_x(d_x); A.allocPreBuffer(); SpmspvTimer s2d_timer; s2d_timer.Start(); //sparse vect to dense vec. err = A.sparse2dense(); double time = s2d_timer.Stop(); std::cout << "sparse2dense time = " << time << " ms." << std::endl; cudaErrCheck(hipFree(d_x)); cudaErrCheck(hipFree(d_x_key)); cudaErrCheck(hipFree(d_x_val)); A.deallocPreBuffer(); A.Destroy(); return err; } int TestDense2Sparse(int x_nnz, int* x_key, VALUE_TYPE* x_val, int m, int n, int nnz, VALUE_TYPE* x_dense){ int err = 0; //hipError_t err_cuda = hipSuccess; int device_id = 0; hipSetDevice(device_id); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; // double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); // double gflop = SpmvGetFlops<int>(mat_nnz); std::cout << "DEBUG: n = " << n << std::endl; std::cout << "DEBUG: xnnz = " << x_nnz << std::endl; VALUE_TYPE* d_x = NULL; cudaErrCheck(hipMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE), hipMemcpyHostToDevice)); int* d_x_key = NULL; VALUE_TYPE* d_x_val = NULL; cudaErrCheck(hipMalloc((void** )&d_x_key, (x_nnz) * sizeof(int))); cudaErrCheck(hipMalloc((void** )&d_x_val, (x_nnz) * sizeof(VALUE_TYPE))); //TODO: check d_x_key and d_x_val 0 SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, nnz); //only assignment. Don not have memory alloc. //current vector type is dense type. err = A.set_vector_type(1); err = A.set_sparse_x(d_x_key, d_x_val, x_nnz); err = A.set_x(d_x); A.allocPreBuffer(); SpmspvTimer d2s_timer; d2s_timer.Start(); //dense vect to sparse vec. err = A.dense2sparse(); double time = d2s_timer.Stop(); std::cout << "dense2sparse time = " << time << " ms." << std::endl; cudaErrCheck(hipFree(d_x)); cudaErrCheck(hipFree(d_x_key)); cudaErrCheck(hipFree(d_x_val)); A.deallocPreBuffer(); A.Destroy(); return err; } int Testsparse2bitarray(int x_nnz, int* x_key, VALUE_TYPE* x_val, int m, int n, int nnz, VALUE_TYPE* x_dense){ int err = 0; //hipError_t err_cuda = hipSuccess; int device_id = 0; hipSetDevice(device_id); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; // double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); // double gflop = SpmvGetFlops<int>(mat_nnz); std::cout << "DEBUG: n = " << n << std::endl; std::cout << "DEBUG: xnnz = " << x_nnz << std::endl; VALUE_TYPE* d_x = NULL; cudaErrCheck(hipMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); int* d_x_key = NULL; VALUE_TYPE* d_x_val = NULL; cudaErrCheck(hipMalloc((void** )&d_x_key, (x_nnz) * sizeof(int))); cudaErrCheck(hipMalloc((void** )&d_x_val, (x_nnz) * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemcpy(d_x_key, x_key, x_nnz * sizeof(int), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(d_x_val, x_val, x_nnz * sizeof(VALUE_TYPE), hipMemcpyHostToDevice)); SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, nnz); //current vector type is sparse type. err = A.set_vector_type(0); err = A.set_sparse_x(d_x_key, d_x_val, x_nnz); err = A.set_x(d_x); //alloc for bitarray in this kernel. A.allocPreBuffer(); SpmspvTimer s2b_timer; s2b_timer.Start(); //sparse vect to dense vec. err = A.sparse2bitarray(); double time = s2b_timer.Stop(); std::cout << "sparse2bitarray time = " << time << " ms." << std::endl; cudaErrCheck(hipFree(d_x)); cudaErrCheck(hipFree(d_x_key)); cudaErrCheck(hipFree(d_x_val)); A.deallocPreBuffer(); A.Destroy(); return err; } int Testdense2bitarray(int x_nnz, int* x_key, VALUE_TYPE* x_val, int m, int n, int nnz, VALUE_TYPE* x_dense){ int err = 0; //hipError_t err_cuda = hipSuccess; int device_id = 0; hipSetDevice(device_id); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; // double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); // double gflop = SpmvGetFlops<int>(mat_nnz); std::cout << "DEBUG: n = " << n << std::endl; std::cout << "DEBUG: xnnz = " << x_nnz << std::endl; VALUE_TYPE* d_x = NULL; cudaErrCheck(hipMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE), hipMemcpyHostToDevice)); int* d_x_key = NULL; VALUE_TYPE* d_x_val = NULL; cudaErrCheck(hipMalloc((void** )&d_x_key, (x_nnz) * sizeof(int))); cudaErrCheck(hipMalloc((void** )&d_x_val, (x_nnz) * sizeof(VALUE_TYPE))); //TODO: check d_x_key and d_x_val SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, nnz); //only assignment. Don not have memory alloc. //current vector type is dense type. err = A.set_vector_type(1); err = A.set_sparse_x(d_x_key, d_x_val, x_nnz); err = A.set_x(d_x); A.allocPreBuffer(); SpmspvTimer d2b_timer; d2b_timer.Start(); //dense vect to sparse vec. err = A.dense2bitarray(); double time = d2b_timer.Stop(); std::cout << "dense2bitarray time = " << time << " ms." << std::endl; cudaErrCheck(hipFree(d_x)); cudaErrCheck(hipFree(d_x_key)); cudaErrCheck(hipFree(d_x_val)); A.deallocPreBuffer(); A.Destroy(); return err; } int SelectSpMV(int m, int n, int mat_nnz, int* csr_row, int* csr_col, VALUE_TYPE* csr_val, VALUE_TYPE* x_dense, VALUE_TYPE* y_dense, VALUE_TYPE alpha) { int err = 0; hipError_t err_cuda = hipSuccess; int device_id = 0; hipSetDevice(device_id); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); double gflop = SpmvGetFlops<int>(mat_nnz); int* d_csr_row = NULL; int* d_csr_col = NULL; VALUE_TYPE* d_csr_val = NULL; cudaErrCheck(hipMalloc((void **)&d_csr_row, (m+1) * sizeof(int))); cudaErrCheck(hipMalloc((void **)&d_csr_col, mat_nnz * sizeof(int))); cudaErrCheck(hipMalloc((void **)&d_csr_val, mat_nnz * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemcpy(d_csr_row, csr_row, (m+1) * sizeof(int), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(d_csr_col, csr_col, mat_nnz * sizeof(int), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(d_csr_val, csr_val, mat_nnz * sizeof(VALUE_TYPE), hipMemcpyHostToDevice)); SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, mat_nnz); err = A.InputCSR(mat_nnz, d_csr_row, d_csr_col, d_csr_val); VALUE_TYPE* d_x = NULL; cudaErrCheck(hipMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE), hipMemcpyHostToDevice)); VALUE_TYPE* d_y = NULL; cudaErrCheck(hipMalloc((void** )&d_y, m * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemset(d_y, 0, m * sizeof(VALUE_TYPE))); //initialized to zero. err = A.set_vector_type(1);//current vector type is sparse type. err = A.setX(d_x); A.setSigma(ANONYMOUSLIB_AUTO_TUNED_SIGMA); A.warmup(); SpmspvTimer timer; timer.Start(); err = A.CSR5Preprocess(); std::cout << "CSR->CSR5 time = " << timer.Stop() << " ms." << std::endl; //check correctness by running 1 time err = A.csr5spmv(alpha, d_y); cudaErrCheck(hipMemcpy(y_dense, d_y, (m) * sizeof(VALUE_TYPE), hipMemcpyDeviceToHost)); // warm up by running 50 times if (NUM_RUN) { for (int i = 0; i < 50; i++) err = A.csr5spmv(alpha, d_y); } err_cuda = hipDeviceSynchronize(); // test timer.Start(); for (int i = 0; i < NUM_RUN; i++) err = A.csr5spmv(alpha, d_y); err_cuda = hipDeviceSynchronize(); double time = timer.Stop() / (double)NUM_RUN; timer.Start(); A.CSR5Postprocess(); std::cout << "CSR5->CSR time = " << timer.Stop() << " ms." << std::endl; if (NUM_RUN) std::cout << "CSR5-based SpMV time = " << time << " ms. Bandwidth = " << gb/(1.0e+6 * time) << " GB/s. GFlops = " << gflop/(1.0e+6 * time) << " GFlops." << std::endl; if (d_csr_row) cudaErrCheck(hipFree(d_csr_row)); if (d_csr_col) cudaErrCheck(hipFree(d_csr_col)); if (d_csr_val) cudaErrCheck(hipFree(d_csr_val)); if (d_x) hipFree(d_x); if (d_y) hipFree(d_y); A.Destroy(); return err; } int TestCscNoSortOrBucketSpmspv(int m, int n, int mat_nnz, int* csr_row, int* csr_col, VALUE_TYPE* csr_val, int x_nnz, int* x_key, VALUE_TYPE* x_val, VALUE_TYPE* x_dense, int* y_nnz, int* y_inx, VALUE_TYPE* y_val, VALUE_TYPE* y_dense, VALUE_TYPE alpha) { /****step-1: format conversion****/ int err = 0; hipError_t err_cuda = hipSuccess; int device_id = 0; hipSetDevice(device_id); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; // double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); // double gflop = SpmvGetFlops<int>(mat_nnz); //std::cout << "DEBUG: xnnz = " << x_nnz << std::endl; int* d_csr_row = NULL; int* d_csr_col = NULL; VALUE_TYPE* d_csr_val = NULL; cudaErrCheck(hipMalloc((void **)&d_csr_row, (m+1) * sizeof(int))); cudaErrCheck(hipMalloc((void **)&d_csr_col, mat_nnz * sizeof(int))); cudaErrCheck(hipMalloc((void **)&d_csr_val, mat_nnz * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemcpy(d_csr_row, csr_row, (m+1) * sizeof(int), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(d_csr_col, csr_col, mat_nnz * sizeof(int), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(d_csr_val, csr_val, mat_nnz * sizeof(VALUE_TYPE), hipMemcpyHostToDevice)); VALUE_TYPE* d_x = NULL; cudaErrCheck(hipMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE), hipMemcpyHostToDevice)); int* d_x_key = NULL; VALUE_TYPE* d_x_val = NULL; cudaErrCheck(hipMalloc((void** )&d_x_key, (x_nnz) * sizeof(int))); cudaErrCheck(hipMalloc((void** )&d_x_val, (x_nnz) * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemcpy(d_x_key, x_key, x_nnz * sizeof(int), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(d_x_val, x_val, x_nnz * sizeof(VALUE_TYPE), hipMemcpyHostToDevice)); //result vec. VALUE_TYPE* d_y = NULL; cudaErrCheck(hipMalloc((void** )&d_y, m * sizeof(VALUE_TYPE))); cudaErrCheck(hipMemset(d_y, 0, m * sizeof(VALUE_TYPE))); //initialized to zero. SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, mat_nnz); err = A.InputCSR(mat_nnz, d_csr_row, d_csr_col, d_csr_val); err = A.ToCSC(); err = A.set_vector_type(0);//current vector type is sparse type. err = A.set_sparse_x(d_x_key, d_x_val, x_nnz); err = A.set_x(d_x); /*********select spmv.***********/ SpmspvTimer timer; err = A.holaPreprocess(); timer.Start(); for (int i = 0; i < NUM_RUN; i++) err = A.holaspmv(alpha, d_y); err_cuda = hipDeviceSynchronize(); double holaspmv_time = timer.Stop()/ (double)NUM_RUN; err = A.CSR5Preprocess(); timer.Start(); for (int i = 0; i < NUM_RUN; i++) err = A.csr5spmv(alpha, d_y); err_cuda = hipDeviceSynchronize(); double csr5spmv_time = timer.Stop() / (double)NUM_RUN; if (csr5spmv_time /holaspmv_time > 1.2) { A.set_spmv_type(0);// A.CSR5Postprocess(); }else{ A.set_spmv_type(1);// A.holaPostprocess(); } A.allocPreBuffer(); //generate x: iterate begin. timer.Start(); int bin_len = A.computeBinlen(); double time = timer.Stop(); std::cout << "DEBUG: compute bin_len time = " << time << "ms." << std::endl; std::cout << "DEBUG: TestCscNoSortOrBucketSpmspv: bin_len = " << bin_len << std::endl; int GM1 = A.computeGM1(); std::cout << "DEBUG: TestCscNoSortOrBucketSpmspv: GM1 = " << GM1 << std::endl; int GM2 = A.computeGM2(); std::cout << "DEBUG: TestCscNoSortOrBucketSpmspv: GM2 = " << GM2 << std::endl; int GM3 = A.computeGM3(); std::cout << "DEBUG: TestCscNoSortOrBucketSpmspv: GM3 = " << GM3 << std::endl; timer.Start(); err = A.CscBasedNoSortSpmspv(alpha, y_nnz, d_y); time = timer.Stop(); std::cout << "csc spmspv time = " << time << "ms." << std::endl; //cudaErrCheck(hipMemcpy(y_dense, d_y, (m) * sizeof(VALUE_TYPE), hipMemcpyDeviceToHost)); if (d_csr_row) cudaErrCheck(hipFree(d_csr_row)); if (d_csr_col) cudaErrCheck(hipFree(d_csr_col)); if (d_csr_val) cudaErrCheck(hipFree(d_csr_val)); if (d_x) hipFree(d_x); if (d_x_key) hipFree(d_x_key); if (d_x_val) hipFree(d_x_val); if (d_y) hipFree(d_y); A.deallocPreBuffer(); A.Destroy(); return err; } bool key_present(int dst, const int* key, const int start, const int end) { bool retval = false; for (int i = start; i < end; i++) { if (key[i] == dst) { retval = true; break; } } return retval; } template <typename vT=int> void generate_random_sparse_vector(int n, int nnz, int* key, vT* value) { if (nnz > n) { nnz = n; } std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dist(1, n); for (int j = 0; j < nnz; j++) { int dst; do { dst = dist(gen); } while(key_present(dst, key, 0, j)); key[j] = dst; value[j] = 1; } } int main(int argc, char** argv) { int m, n, mat_nnz; int* csr_row; int* csr_col; VALUE_TYPE* csr_val; // report precision of floating-point std::cout << "-------------------------" << std::endl; std::string precision; if (sizeof(VALUE_TYPE) == 4) { precision = "32-bit Single Precision (float)"; } else if (sizeof(VALUE_TYPE) == 8) { precision = "64-bit Double Precision (double)"; } else { std::cout << "Wrong precision. Program exit!" << std::endl; return 0; } std::cout << "PRECISION = " << precision << std::endl; std::cout << "-------------------------" << std::endl; char* file_name; if (argc == 2) { file_name = argv[1]; } else { std::cout << "Usage: matrix_file_name" << std::endl; exit(1); } std::cout << "-------" << file_name << "---------" << std::endl; #ifndef READ_SVM_DATA MTX<VALUE_TYPE> mtx; fileToMtxCoo<VALUE_TYPE>(file_name, &mtx, true); m = mtx.rows; n = mtx.cols; mat_nnz = mtx.nnz; //coo2csr: attention memory alloc and free. csr_row = (int* )malloc((m + 1) * sizeof(int)); CHECK_MALLOC(csr_row); TestCoo2Csr(m, mat_nnz, mtx.row, csr_row); csr_col = mtx.col; csr_val = mtx.data; #else SvmData<VALUE_TYPE> svm_data; readSVMToCoo<VALUE_TYPE>(file_name, &svm_data); m = svm_data.numVects; n = svm_data.dimVects; mat_nnz = svm_data.numNonZeros; //coo2csr: attention memory alloc and free. csr_row = (int* )malloc((m + 1) * sizeof(int)); CHECK_MALLOC(csr_row); csr_col = (int* )malloc(mat_nnz * sizeof(int)); CHECK_MALLOC(csr_col); csr_val = (VALUE_TYPE* )malloc(mat_nnz * sizeof(VALUE_TYPE)); CHECK_MALLOC(csr_val); ConvertSVMDataToCSR(svm_data, csr_row, csr_col, csr_val, m, n, mat_nnz); freeSVMToCoo(&svm_data); #endif // easy for test correctness. for (int i = 0; i < mat_nnz; i++) { csr_val[i] = 1.0; } // SparseVec* x_sparse = (SparseVec* )malloc(n * sizeof(SparseVec)); // CHECK_MALLOC(x_sparse); int* x_sparse_key = (int* )malloc(n * sizeof(int)); CHECK_MALLOC(x_sparse_key); VALUE_TYPE* x_sparse_val = (VALUE_TYPE* )malloc(n * sizeof(VALUE_TYPE)); CHECK_MALLOC(x_sparse_val); VALUE_TYPE* x_dense = (VALUE_TYPE* )malloc(n * sizeof(VALUE_TYPE)); CHECK_MALLOC(x_dense); VALUE_TYPE* y_dense = (VALUE_TYPE* )malloc(m * sizeof(VALUE_TYPE)); //SparseVec* y_sparse = (SparseVec* )malloc(m * sizeof(SparseVec)); int* y_sparse_key = (int* )malloc(m * sizeof(int)); VALUE_TYPE* y_sparse_val = (VALUE_TYPE* )malloc(m * sizeof(VALUE_TYPE)); CHECK_MALLOC(y_dense); //CHECK_MALLOC(y_sparse); CHECK_MALLOC(y_sparse_key); CHECK_MALLOC(y_sparse_val); #ifdef READ_SVM_DATA //read sparse vector from file char* suffix = ".out"; char svm_file_name[35]; sprintf(svm_file_name, "%s%s", file_name, suffix); std::cout << "svm_file_name = " << svm_file_name << std::endl; int iter = 0; getNumofRows(svm_file_name, &iter); std::cout << "svm iter = " << iter << std::endl; int* ind = (int*)malloc(iter*sizeof(int)); CHECK_MALLOC(ind); readSparseXInxFromFile(svm_file_name, iter, ind); int new_iter = 0; removeRepetition(iter, ind, &new_iter, ind); std::cout << "iter = " << iter << " ,new_iter = " << new_iter << std::endl; std::cout << "-------------------------------" << std::endl; //run the iterations of spmspv //for (int i = 0; i < new_iter; i++) { for (int i = 0; i < new_iter; i++) { //the number of the nonzeroes of the ind[i]-th row. //i = 524; int x_nnz = csr_row[ind[i]+1] - csr_row[ind[i]]; printf("ind[%d] = %d, x_nnz = %d\n", i, ind[i], x_nnz); memset(x_sparse_key, 0, n * sizeof(int)); memset(x_sparse_val, 0, n * sizeof(VALUE_TYPE)); extractSparseXfromMat<int, VALUE_TYPE>(ind[i], csr_row, csr_col, csr_val, m, n, mat_nnz, x_sparse_key, x_sparse_val); #else int iter = 11; int xnnz_vec[11]={1,1,3851,814949,2166177,511191,13677,2306,272,34,4}; for (int i = 0; i < iter; i++) { int x_nnz = xnnz_vec[i]; if(x_nnz >= n) x_nnz = n; //if(x_nnz > n) break; printf("x_nnz = %d\n", x_nnz); memset(x_sparse_key, 0, n * sizeof(int)); memset(x_sparse_val, 0, n * sizeof(VALUE_TYPE)); memset(x_dense, 0, n * sizeof(VALUE_TYPE)); #ifdef SPARSE_X_RANDOM srand(time(NULL)); generate_random_sparse_vector(n, x_nnz, x_sparse_key, x_sparse_val); #else for (int i = 0; i < x_nnz; i++) { x_sparse_key[i] = i; x_sparse_val[i] = 1; x_dense[i] = 1; } #endif #endif //end of different methods to read sparse x. #ifdef TEST_S2D_D2S TestSparse2Dense(x_nnz, x_sparse_key, x_sparse_val, m, n, mat_nnz, x_dense); TestDense2Sparse(x_nnz, x_sparse_key, x_sparse_val, m, n, mat_nnz, x_dense); Testsparse2bitarray(x_nnz, x_sparse_key, x_sparse_val, m, n, mat_nnz, x_dense); Testdense2bitarray(x_nnz, x_sparse_key, x_sparse_val, m, n, mat_nnz, x_dense); #endif #ifdef SMSV_CUDA VALUE_TYPE alpha = 1.0; memset(y_dense, 0, m * sizeof(VALUE_TYPE));// SelectSpMV(m, n, mat_nnz, csr_row, csr_col, csr_val, x_dense, y_dense, alpha); std::cout << std::endl; int cu_y_nnz = 0; memset(y_dense, 0, m * sizeof(VALUE_TYPE));// TestCscNoSortOrBucketSpmspv(m, n, mat_nnz, csr_row, csr_col, csr_val, x_nnz, x_sparse_key, x_sparse_val, x_dense, &cu_y_nnz, y_sparse_key, y_sparse_val, y_dense, alpha); std::cout << std::endl; #endif }//end of iteration. if (x_sparse_key) free(x_sparse_key); if (x_sparse_val) free(x_sparse_val); if (x_dense) free(x_dense); //if (y_sparse) free(y_sparse); if (y_dense) free(y_dense); if (y_sparse_key) free(y_sparse_key); if (y_sparse_val) free(y_sparse_val); #ifndef READ_SVM_DATA if (mtx.row) free(mtx.row); if (mtx.col) free(mtx.col); if (mtx.data) free(mtx.data); #else if (csr_row) free(csr_row); if (csr_col) free(csr_col); if (csr_val) free(csr_val); if (ind) free(ind); #endif return 0; }
8eef7bdab7fa5e7eda2710e2c9013f21bac365c8.cu
// This is used to Test my Spmspv performance in applications. #include <iostream> #include <string> #include <float.h> #include <typeinfo> #include <limits> #include <algorithm> #include <vector> #include <random> #include <omp.h> #include <cusparse.h> #include <cusparse_v2.h> #include <cuda_runtime.h> #include <thrust/execution_policy.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <thrust/sort.h> #include "spmspv/csc-spmspv/spmspv.h" #include "spmspv/class.hpp" #include "spmspv/mtx.hpp" #include "spmspv/readsvmdata.hpp" #include "spmspv/config.h" #ifndef VALUE_TYPE #define VALUE_TYPE float #endif #ifndef NUM_RUN #define NUM_RUN 10 #endif #define IS_DOUBLE_ZERO(d) (abs(d) < DBL_EPSILON) #define IS_FLOAT_ZERO(d) (abs(d) < FLT_EPSILON) template<typename T> int readSVMToCoo(char* file_name, SvmData<T>* svmdata); template<typename T> int freeSVMToCoo(SvmData<T>* svmdata); template<typename T> int ConvertSVMDataToCSR(SvmData<T> svmdata, int* csr_row, int* csr_col, T* csr_val, int numVects, int dimVects, int numNonZeros); void TestCoo2Csr(int m, int mat_nnz, int* coo_row, int* csr_row) { int* d_csr_row = NULL; int* d_coo_row = NULL; cudaErrCheck(cudaMalloc((void** )&d_csr_row, (m + 1) * sizeof(int))); cudaErrCheck(cudaMalloc((void** )&d_coo_row, mat_nnz * sizeof(int))); cudaErrCheck(cudaMemcpy(d_coo_row, coo_row, mat_nnz * sizeof(int), cudaMemcpyHostToDevice)); cusparseHandle_t sparse_handle; CUSP_CALL(cusparseCreate(&sparse_handle)); cusparseMatDescr_t descr = 0; CUSP_CALL(cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); cudaErrCheck(cudaDeviceSynchronize()); cudaEvent_t st, ed; float tm = 0; cudaEventCreate(&st); cudaEventCreate(&ed); cudaEventRecord(st, 0); CUSP_CALL(cusparseXcoo2csr(sparse_handle, d_coo_row, mat_nnz, m, d_csr_row, CUSPARSE_INDEX_BASE_ZERO)); cudaEventRecord(ed, 0); cudaEventSynchronize(ed); //unit: ms. cudaEventElapsedTime(&tm, st, ed); cudaEventDestroy(st); cudaEventDestroy(ed); std::cout << "csr2coo time = " << tm <<" ms." << std::endl; cudaErrCheck(cudaMemcpy(csr_row, d_csr_row, (m + 1) * sizeof(int), cudaMemcpyDeviceToHost)); cudaErrCheck(cudaDeviceSynchronize()); cudaErrCheck(cudaFree(d_csr_row)); cudaErrCheck(cudaFree(d_coo_row)); } int TestSparse2Dense(int x_nnz, int* x_key, VALUE_TYPE* x_val, int m, int n, int nnz, VALUE_TYPE* x_dense){ int err = 0; //cudaError_t err_cuda = cudaSuccess; int device_id = 0; cudaSetDevice(device_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; // double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); // double gflop = SpmvGetFlops<int>(mat_nnz); std::cout << "DEBUG: n = " << n << std::endl; std::cout << "DEBUG: xnnz = " << x_nnz << std::endl; VALUE_TYPE* d_x = NULL; cudaErrCheck(cudaMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); int* d_x_key = NULL; VALUE_TYPE* d_x_val = NULL; cudaErrCheck(cudaMalloc((void** )&d_x_key, (x_nnz) * sizeof(int))); cudaErrCheck(cudaMalloc((void** )&d_x_val, (x_nnz) * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemcpy(d_x_key, x_key, x_nnz * sizeof(int), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(d_x_val, x_val, x_nnz * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, nnz); //current vector type is sparse type. err = A.set_vector_type(0); err = A.set_sparse_x(d_x_key, d_x_val, x_nnz); err = A.set_x(d_x); A.allocPreBuffer(); SpmspvTimer s2d_timer; s2d_timer.Start(); //sparse vect to dense vec. err = A.sparse2dense(); double time = s2d_timer.Stop(); std::cout << "sparse2dense time = " << time << " ms." << std::endl; cudaErrCheck(cudaFree(d_x)); cudaErrCheck(cudaFree(d_x_key)); cudaErrCheck(cudaFree(d_x_val)); A.deallocPreBuffer(); A.Destroy(); return err; } int TestDense2Sparse(int x_nnz, int* x_key, VALUE_TYPE* x_val, int m, int n, int nnz, VALUE_TYPE* x_dense){ int err = 0; //cudaError_t err_cuda = cudaSuccess; int device_id = 0; cudaSetDevice(device_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; // double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); // double gflop = SpmvGetFlops<int>(mat_nnz); std::cout << "DEBUG: n = " << n << std::endl; std::cout << "DEBUG: xnnz = " << x_nnz << std::endl; VALUE_TYPE* d_x = NULL; cudaErrCheck(cudaMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); int* d_x_key = NULL; VALUE_TYPE* d_x_val = NULL; cudaErrCheck(cudaMalloc((void** )&d_x_key, (x_nnz) * sizeof(int))); cudaErrCheck(cudaMalloc((void** )&d_x_val, (x_nnz) * sizeof(VALUE_TYPE))); //TODO: check d_x_key and d_x_val 需要初始化为0吗? SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, nnz); //only assignment. Don not have memory alloc. //current vector type is dense type. err = A.set_vector_type(1); err = A.set_sparse_x(d_x_key, d_x_val, x_nnz); err = A.set_x(d_x); A.allocPreBuffer(); SpmspvTimer d2s_timer; d2s_timer.Start(); //dense vect to sparse vec. err = A.dense2sparse(); double time = d2s_timer.Stop(); std::cout << "dense2sparse time = " << time << " ms." << std::endl; cudaErrCheck(cudaFree(d_x)); cudaErrCheck(cudaFree(d_x_key)); cudaErrCheck(cudaFree(d_x_val)); A.deallocPreBuffer(); A.Destroy(); return err; } int Testsparse2bitarray(int x_nnz, int* x_key, VALUE_TYPE* x_val, int m, int n, int nnz, VALUE_TYPE* x_dense){ int err = 0; //cudaError_t err_cuda = cudaSuccess; int device_id = 0; cudaSetDevice(device_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; // double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); // double gflop = SpmvGetFlops<int>(mat_nnz); std::cout << "DEBUG: n = " << n << std::endl; std::cout << "DEBUG: xnnz = " << x_nnz << std::endl; VALUE_TYPE* d_x = NULL; cudaErrCheck(cudaMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); int* d_x_key = NULL; VALUE_TYPE* d_x_val = NULL; cudaErrCheck(cudaMalloc((void** )&d_x_key, (x_nnz) * sizeof(int))); cudaErrCheck(cudaMalloc((void** )&d_x_val, (x_nnz) * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemcpy(d_x_key, x_key, x_nnz * sizeof(int), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(d_x_val, x_val, x_nnz * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, nnz); //current vector type is sparse type. err = A.set_vector_type(0); err = A.set_sparse_x(d_x_key, d_x_val, x_nnz); err = A.set_x(d_x); //alloc for bitarray in this kernel. A.allocPreBuffer(); SpmspvTimer s2b_timer; s2b_timer.Start(); //sparse vect to dense vec. err = A.sparse2bitarray(); double time = s2b_timer.Stop(); std::cout << "sparse2bitarray time = " << time << " ms." << std::endl; cudaErrCheck(cudaFree(d_x)); cudaErrCheck(cudaFree(d_x_key)); cudaErrCheck(cudaFree(d_x_val)); A.deallocPreBuffer(); A.Destroy(); return err; } int Testdense2bitarray(int x_nnz, int* x_key, VALUE_TYPE* x_val, int m, int n, int nnz, VALUE_TYPE* x_dense){ int err = 0; //cudaError_t err_cuda = cudaSuccess; int device_id = 0; cudaSetDevice(device_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; // double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); // double gflop = SpmvGetFlops<int>(mat_nnz); std::cout << "DEBUG: n = " << n << std::endl; std::cout << "DEBUG: xnnz = " << x_nnz << std::endl; VALUE_TYPE* d_x = NULL; cudaErrCheck(cudaMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); int* d_x_key = NULL; VALUE_TYPE* d_x_val = NULL; cudaErrCheck(cudaMalloc((void** )&d_x_key, (x_nnz) * sizeof(int))); cudaErrCheck(cudaMalloc((void** )&d_x_val, (x_nnz) * sizeof(VALUE_TYPE))); //TODO: check d_x_key and d_x_val SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, nnz); //only assignment. Don not have memory alloc. //current vector type is dense type. err = A.set_vector_type(1); err = A.set_sparse_x(d_x_key, d_x_val, x_nnz); err = A.set_x(d_x); A.allocPreBuffer(); SpmspvTimer d2b_timer; d2b_timer.Start(); //dense vect to sparse vec. err = A.dense2bitarray(); double time = d2b_timer.Stop(); std::cout << "dense2bitarray time = " << time << " ms." << std::endl; cudaErrCheck(cudaFree(d_x)); cudaErrCheck(cudaFree(d_x_key)); cudaErrCheck(cudaFree(d_x_val)); A.deallocPreBuffer(); A.Destroy(); return err; } int SelectSpMV(int m, int n, int mat_nnz, int* csr_row, int* csr_col, VALUE_TYPE* csr_val, VALUE_TYPE* x_dense, VALUE_TYPE* y_dense, VALUE_TYPE alpha) { int err = 0; cudaError_t err_cuda = cudaSuccess; int device_id = 0; cudaSetDevice(device_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); double gflop = SpmvGetFlops<int>(mat_nnz); int* d_csr_row = NULL; int* d_csr_col = NULL; VALUE_TYPE* d_csr_val = NULL; cudaErrCheck(cudaMalloc((void **)&d_csr_row, (m+1) * sizeof(int))); cudaErrCheck(cudaMalloc((void **)&d_csr_col, mat_nnz * sizeof(int))); cudaErrCheck(cudaMalloc((void **)&d_csr_val, mat_nnz * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemcpy(d_csr_row, csr_row, (m+1) * sizeof(int), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(d_csr_col, csr_col, mat_nnz * sizeof(int), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(d_csr_val, csr_val, mat_nnz * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, mat_nnz); err = A.InputCSR(mat_nnz, d_csr_row, d_csr_col, d_csr_val); VALUE_TYPE* d_x = NULL; cudaErrCheck(cudaMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); VALUE_TYPE* d_y = NULL; cudaErrCheck(cudaMalloc((void** )&d_y, m * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemset(d_y, 0, m * sizeof(VALUE_TYPE))); //initialized to zero. err = A.set_vector_type(1);//current vector type is sparse type. err = A.setX(d_x); A.setSigma(ANONYMOUSLIB_AUTO_TUNED_SIGMA); A.warmup(); SpmspvTimer timer; timer.Start(); err = A.CSR5Preprocess(); std::cout << "CSR->CSR5 time = " << timer.Stop() << " ms." << std::endl; //check correctness by running 1 time err = A.csr5spmv(alpha, d_y); cudaErrCheck(cudaMemcpy(y_dense, d_y, (m) * sizeof(VALUE_TYPE), cudaMemcpyDeviceToHost)); // warm up by running 50 times if (NUM_RUN) { for (int i = 0; i < 50; i++) err = A.csr5spmv(alpha, d_y); } err_cuda = cudaDeviceSynchronize(); // test timer.Start(); for (int i = 0; i < NUM_RUN; i++) err = A.csr5spmv(alpha, d_y); err_cuda = cudaDeviceSynchronize(); double time = timer.Stop() / (double)NUM_RUN; timer.Start(); A.CSR5Postprocess(); std::cout << "CSR5->CSR time = " << timer.Stop() << " ms." << std::endl; if (NUM_RUN) std::cout << "CSR5-based SpMV time = " << time << " ms. Bandwidth = " << gb/(1.0e+6 * time) << " GB/s. GFlops = " << gflop/(1.0e+6 * time) << " GFlops." << std::endl; if (d_csr_row) cudaErrCheck(cudaFree(d_csr_row)); if (d_csr_col) cudaErrCheck(cudaFree(d_csr_col)); if (d_csr_val) cudaErrCheck(cudaFree(d_csr_val)); if (d_x) cudaFree(d_x); if (d_y) cudaFree(d_y); A.Destroy(); return err; } int TestCscNoSortOrBucketSpmspv(int m, int n, int mat_nnz, int* csr_row, int* csr_col, VALUE_TYPE* csr_val, int x_nnz, int* x_key, VALUE_TYPE* x_val, VALUE_TYPE* x_dense, int* y_nnz, int* y_inx, VALUE_TYPE* y_val, VALUE_TYPE* y_dense, VALUE_TYPE alpha) { /****step-1: format conversion****/ int err = 0; cudaError_t err_cuda = cudaSuccess; int device_id = 0; cudaSetDevice(device_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device_id); std::cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl; // double gb = SpmvGetBytes<int, VALUE_TYPE>(m, mat_nnz); // double gflop = SpmvGetFlops<int>(mat_nnz); //std::cout << "DEBUG: xnnz = " << x_nnz << std::endl; int* d_csr_row = NULL; int* d_csr_col = NULL; VALUE_TYPE* d_csr_val = NULL; cudaErrCheck(cudaMalloc((void **)&d_csr_row, (m+1) * sizeof(int))); cudaErrCheck(cudaMalloc((void **)&d_csr_col, mat_nnz * sizeof(int))); cudaErrCheck(cudaMalloc((void **)&d_csr_val, mat_nnz * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemcpy(d_csr_row, csr_row, (m+1) * sizeof(int), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(d_csr_col, csr_col, mat_nnz * sizeof(int), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(d_csr_val, csr_val, mat_nnz * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); VALUE_TYPE* d_x = NULL; cudaErrCheck(cudaMalloc((void** )&d_x, n * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); int* d_x_key = NULL; VALUE_TYPE* d_x_val = NULL; cudaErrCheck(cudaMalloc((void** )&d_x_key, (x_nnz) * sizeof(int))); cudaErrCheck(cudaMalloc((void** )&d_x_val, (x_nnz) * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemcpy(d_x_key, x_key, x_nnz * sizeof(int), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(d_x_val, x_val, x_nnz * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); //result vec. VALUE_TYPE* d_y = NULL; cudaErrCheck(cudaMalloc((void** )&d_y, m * sizeof(VALUE_TYPE))); cudaErrCheck(cudaMemset(d_y, 0, m * sizeof(VALUE_TYPE))); //initialized to zero. SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, mat_nnz); err = A.InputCSR(mat_nnz, d_csr_row, d_csr_col, d_csr_val); err = A.ToCSC(); err = A.set_vector_type(0);//current vector type is sparse type. err = A.set_sparse_x(d_x_key, d_x_val, x_nnz); err = A.set_x(d_x); /*********select spmv.***********/ SpmspvTimer timer; err = A.holaPreprocess(); timer.Start(); for (int i = 0; i < NUM_RUN; i++) err = A.holaspmv(alpha, d_y); err_cuda = cudaDeviceSynchronize(); double holaspmv_time = timer.Stop()/ (double)NUM_RUN; err = A.CSR5Preprocess(); timer.Start(); for (int i = 0; i < NUM_RUN; i++) err = A.csr5spmv(alpha, d_y); err_cuda = cudaDeviceSynchronize(); double csr5spmv_time = timer.Stop() / (double)NUM_RUN; if (csr5spmv_time /holaspmv_time > 1.2) { A.set_spmv_type(0);// A.CSR5Postprocess(); }else{ A.set_spmv_type(1);// A.holaPostprocess(); } A.allocPreBuffer(); //generate x: iterate begin. timer.Start(); int bin_len = A.computeBinlen(); double time = timer.Stop(); std::cout << "DEBUG: compute bin_len time = " << time << "ms." << std::endl; std::cout << "DEBUG: TestCscNoSortOrBucketSpmspv: bin_len = " << bin_len << std::endl; int GM1 = A.computeGM1(); std::cout << "DEBUG: TestCscNoSortOrBucketSpmspv: GM1 = " << GM1 << std::endl; int GM2 = A.computeGM2(); std::cout << "DEBUG: TestCscNoSortOrBucketSpmspv: GM2 = " << GM2 << std::endl; int GM3 = A.computeGM3(); std::cout << "DEBUG: TestCscNoSortOrBucketSpmspv: GM3 = " << GM3 << std::endl; timer.Start(); err = A.CscBasedNoSortSpmspv(alpha, y_nnz, d_y); time = timer.Stop(); std::cout << "csc spmspv time = " << time << "ms." << std::endl; //cudaErrCheck(cudaMemcpy(y_dense, d_y, (m) * sizeof(VALUE_TYPE), cudaMemcpyDeviceToHost)); if (d_csr_row) cudaErrCheck(cudaFree(d_csr_row)); if (d_csr_col) cudaErrCheck(cudaFree(d_csr_col)); if (d_csr_val) cudaErrCheck(cudaFree(d_csr_val)); if (d_x) cudaFree(d_x); if (d_x_key) cudaFree(d_x_key); if (d_x_val) cudaFree(d_x_val); if (d_y) cudaFree(d_y); A.deallocPreBuffer(); A.Destroy(); return err; } bool key_present(int dst, const int* key, const int start, const int end) { bool retval = false; for (int i = start; i < end; i++) { if (key[i] == dst) { retval = true; break; } } return retval; } template <typename vT=int> void generate_random_sparse_vector(int n, int nnz, int* key, vT* value) { if (nnz > n) { nnz = n; } std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dist(1, n); for (int j = 0; j < nnz; j++) { int dst; do { dst = dist(gen); } while(key_present(dst, key, 0, j)); key[j] = dst; value[j] = 1; } } int main(int argc, char** argv) { int m, n, mat_nnz; int* csr_row; int* csr_col; VALUE_TYPE* csr_val; // report precision of floating-point std::cout << "-------------------------" << std::endl; std::string precision; if (sizeof(VALUE_TYPE) == 4) { precision = "32-bit Single Precision (float)"; } else if (sizeof(VALUE_TYPE) == 8) { precision = "64-bit Double Precision (double)"; } else { std::cout << "Wrong precision. Program exit!" << std::endl; return 0; } std::cout << "PRECISION = " << precision << std::endl; std::cout << "-------------------------" << std::endl; char* file_name; if (argc == 2) { file_name = argv[1]; } else { std::cout << "Usage: matrix_file_name" << std::endl; exit(1); } std::cout << "-------" << file_name << "---------" << std::endl; #ifndef READ_SVM_DATA MTX<VALUE_TYPE> mtx; fileToMtxCoo<VALUE_TYPE>(file_name, &mtx, true); m = mtx.rows; n = mtx.cols; mat_nnz = mtx.nnz; //coo2csr: attention memory alloc and free. csr_row = (int* )malloc((m + 1) * sizeof(int)); CHECK_MALLOC(csr_row); TestCoo2Csr(m, mat_nnz, mtx.row, csr_row); csr_col = mtx.col; csr_val = mtx.data; #else SvmData<VALUE_TYPE> svm_data; readSVMToCoo<VALUE_TYPE>(file_name, &svm_data); m = svm_data.numVects; n = svm_data.dimVects; mat_nnz = svm_data.numNonZeros; //coo2csr: attention memory alloc and free. csr_row = (int* )malloc((m + 1) * sizeof(int)); CHECK_MALLOC(csr_row); csr_col = (int* )malloc(mat_nnz * sizeof(int)); CHECK_MALLOC(csr_col); csr_val = (VALUE_TYPE* )malloc(mat_nnz * sizeof(VALUE_TYPE)); CHECK_MALLOC(csr_val); ConvertSVMDataToCSR(svm_data, csr_row, csr_col, csr_val, m, n, mat_nnz); freeSVMToCoo(&svm_data); #endif // easy for test correctness. for (int i = 0; i < mat_nnz; i++) { csr_val[i] = 1.0; } // SparseVec* x_sparse = (SparseVec* )malloc(n * sizeof(SparseVec)); // CHECK_MALLOC(x_sparse); int* x_sparse_key = (int* )malloc(n * sizeof(int)); CHECK_MALLOC(x_sparse_key); VALUE_TYPE* x_sparse_val = (VALUE_TYPE* )malloc(n * sizeof(VALUE_TYPE)); CHECK_MALLOC(x_sparse_val); VALUE_TYPE* x_dense = (VALUE_TYPE* )malloc(n * sizeof(VALUE_TYPE)); CHECK_MALLOC(x_dense); VALUE_TYPE* y_dense = (VALUE_TYPE* )malloc(m * sizeof(VALUE_TYPE)); //SparseVec* y_sparse = (SparseVec* )malloc(m * sizeof(SparseVec)); int* y_sparse_key = (int* )malloc(m * sizeof(int)); VALUE_TYPE* y_sparse_val = (VALUE_TYPE* )malloc(m * sizeof(VALUE_TYPE)); CHECK_MALLOC(y_dense); //CHECK_MALLOC(y_sparse); CHECK_MALLOC(y_sparse_key); CHECK_MALLOC(y_sparse_val); #ifdef READ_SVM_DATA //read sparse vector from file char* suffix = ".out"; char svm_file_name[35]; sprintf(svm_file_name, "%s%s", file_name, suffix); std::cout << "svm_file_name = " << svm_file_name << std::endl; int iter = 0; getNumofRows(svm_file_name, &iter); std::cout << "svm iter = " << iter << std::endl; int* ind = (int*)malloc(iter*sizeof(int)); CHECK_MALLOC(ind); readSparseXInxFromFile(svm_file_name, iter, ind); int new_iter = 0; removeRepetition(iter, ind, &new_iter, ind); std::cout << "iter = " << iter << " ,new_iter = " << new_iter << std::endl; std::cout << "-------------------------------" << std::endl; //run the iterations of spmspv //for (int i = 0; i < new_iter; i++) { for (int i = 0; i < new_iter; i++) { //the number of the nonzeroes of the ind[i]-th row. //i = 524; int x_nnz = csr_row[ind[i]+1] - csr_row[ind[i]]; printf("ind[%d] = %d, x_nnz = %d\n", i, ind[i], x_nnz); memset(x_sparse_key, 0, n * sizeof(int)); memset(x_sparse_val, 0, n * sizeof(VALUE_TYPE)); extractSparseXfromMat<int, VALUE_TYPE>(ind[i], csr_row, csr_col, csr_val, m, n, mat_nnz, x_sparse_key, x_sparse_val); #else int iter = 11; int xnnz_vec[11]={1,1,3851,814949,2166177,511191,13677,2306,272,34,4}; for (int i = 0; i < iter; i++) { int x_nnz = xnnz_vec[i]; if(x_nnz >= n) x_nnz = n; //if(x_nnz > n) break; printf("x_nnz = %d\n", x_nnz); memset(x_sparse_key, 0, n * sizeof(int)); memset(x_sparse_val, 0, n * sizeof(VALUE_TYPE)); memset(x_dense, 0, n * sizeof(VALUE_TYPE)); #ifdef SPARSE_X_RANDOM srand(time(NULL)); generate_random_sparse_vector(n, x_nnz, x_sparse_key, x_sparse_val); #else for (int i = 0; i < x_nnz; i++) { x_sparse_key[i] = i; x_sparse_val[i] = 1; x_dense[i] = 1; } #endif #endif //end of different methods to read sparse x. #ifdef TEST_S2D_D2S TestSparse2Dense(x_nnz, x_sparse_key, x_sparse_val, m, n, mat_nnz, x_dense); TestDense2Sparse(x_nnz, x_sparse_key, x_sparse_val, m, n, mat_nnz, x_dense); Testsparse2bitarray(x_nnz, x_sparse_key, x_sparse_val, m, n, mat_nnz, x_dense); Testdense2bitarray(x_nnz, x_sparse_key, x_sparse_val, m, n, mat_nnz, x_dense); #endif #ifdef SMSV_CUDA VALUE_TYPE alpha = 1.0; memset(y_dense, 0, m * sizeof(VALUE_TYPE));// SelectSpMV(m, n, mat_nnz, csr_row, csr_col, csr_val, x_dense, y_dense, alpha); std::cout << std::endl; int cu_y_nnz = 0; memset(y_dense, 0, m * sizeof(VALUE_TYPE));// TestCscNoSortOrBucketSpmspv(m, n, mat_nnz, csr_row, csr_col, csr_val, x_nnz, x_sparse_key, x_sparse_val, x_dense, &cu_y_nnz, y_sparse_key, y_sparse_val, y_dense, alpha); std::cout << std::endl; #endif }//end of iteration. if (x_sparse_key) free(x_sparse_key); if (x_sparse_val) free(x_sparse_val); if (x_dense) free(x_dense); //if (y_sparse) free(y_sparse); if (y_dense) free(y_dense); if (y_sparse_key) free(y_sparse_key); if (y_sparse_val) free(y_sparse_val); #ifndef READ_SVM_DATA if (mtx.row) free(mtx.row); if (mtx.col) free(mtx.col); if (mtx.data) free(mtx.data); #else if (csr_row) free(csr_row); if (csr_col) free(csr_col); if (csr_val) free(csr_val); if (ind) free(ind); #endif return 0; }
2f4b1828646ac923bb8f5a8d308249362e7dcb24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ########################################################## // By Eugene Ch'ng | www.complexity.io // Email: [email protected] // ---------------------------------------------------------- // The ERC 'Lost Frontiers' Project // Development for the Parallelisation of ABM Simulation // ---------------------------------------------------------- // A Basic CUDA Application for ABM Development // // Filling arrays with block generated IDs // identify a specific block ID and make changes for that kernel // previously we used fillArray<<1,N>>, 1 block and N threads // we now use <<N,1>> for N blocks and 1 thread // // LIMITS OF THREADS AND BLOCKS (use 01.DeviceInfo to check your GPU) // The particular GPU used here has 1024 threads per block // This presents a limit, but we can also use blocks per grid // Each block (for this old AlienWare GPU) has 65535 blocks per grid // Blocks and Threads have 3 dimensions (type dim3) // We will explore how to combine both blocks and threads to create // arbitrarily long numbers // ---------------------------------------------------------- // How to compile: // nvcc <filename>.cu -o <outputfile> // ########################################################## #include <stdio.h> #include <iostream> using namespace std; // blockIdx is limited at 65535 #define N 10 // --------------------- CUDA KERNELS // Fill arrays with device thread IDs __global__ void fillArray(int *dev_arr) { // note that we no longer use the for loop here // blockIdx.x is a device variable - it's the ID of the block // fillArray kernel is called for each block and has its own ID int bid = blockIdx.x; // assign the dev_array element with threadIDx.x dev_arr[bid] = bid; // identifying a threads if(bid == 5) { printf("**blockIdx.x 5 is called!!\n"); dev_arr[bid] = bid + 100; } } // the main is a host code int main(int argc, const char * argv[]) { cout << "------------ initialising device and host arrays" << endl; int arr[N]; // host variable int *dev_arr; // device variable for(int i=0; i<N; i++) { arr[i] = 0; printf("host arr[%d] = %d\n", i, arr[i]); } cout << "------------ allocate device memory dev_arr" << endl; // allocating a device array to copy to // note the N * sizeof(int) hipMalloc( (void**)&dev_arr, N * sizeof(int) ); cout << "------------ copy arr to dev_arr" << endl; // copying host array to device // note the N * sizeof(int) hipMemcpy(dev_arr, arr, N * sizeof(int), hipMemcpyHostToDevice); cout << "------------ calling kernel fillArray" << endl; // N block, and 1 thread // previously we used fillArray<<1,N>>, 1 block and N threads // we now use <<N,1>> for N blocks and 1 thread hipLaunchKernelGGL(( fillArray), dim3(N),dim3(1), 0, 0, dev_arr); cout << "------------ copy dev_arr to arr" << endl; // note the N * sizeof(int) hipMemcpy(arr, dev_arr, N * sizeof(int), hipMemcpyDeviceToHost); cout << "------------ printing changed host array" << endl; for(int i=0; i<N; i++) { printf("** changed host arr[%d] = %d\n", i, arr[i]); } // ---- FREE ALLOCATED KERNEL MEMORY hipFree( dev_arr ); return 0; }
2f4b1828646ac923bb8f5a8d308249362e7dcb24.cu
// ########################################################## // By Eugene Ch'ng | www.complexity.io // Email: [email protected] // ---------------------------------------------------------- // The ERC 'Lost Frontiers' Project // Development for the Parallelisation of ABM Simulation // ---------------------------------------------------------- // A Basic CUDA Application for ABM Development // // Filling arrays with block generated IDs // identify a specific block ID and make changes for that kernel // previously we used fillArray<<1,N>>, 1 block and N threads // we now use <<N,1>> for N blocks and 1 thread // // LIMITS OF THREADS AND BLOCKS (use 01.DeviceInfo to check your GPU) // The particular GPU used here has 1024 threads per block // This presents a limit, but we can also use blocks per grid // Each block (for this old AlienWare GPU) has 65535 blocks per grid // Blocks and Threads have 3 dimensions (type dim3) // We will explore how to combine both blocks and threads to create // arbitrarily long numbers // ---------------------------------------------------------- // How to compile: // nvcc <filename>.cu -o <outputfile> // ########################################################## #include <stdio.h> #include <iostream> using namespace std; // blockIdx is limited at 65535 #define N 10 // --------------------- CUDA KERNELS // Fill arrays with device thread IDs __global__ void fillArray(int *dev_arr) { // note that we no longer use the for loop here // blockIdx.x is a device variable - it's the ID of the block // fillArray kernel is called for each block and has its own ID int bid = blockIdx.x; // assign the dev_array element with threadIDx.x dev_arr[bid] = bid; // identifying a threads if(bid == 5) { printf("**blockIdx.x 5 is called!!\n"); dev_arr[bid] = bid + 100; } } // the main is a host code int main(int argc, const char * argv[]) { cout << "------------ initialising device and host arrays" << endl; int arr[N]; // host variable int *dev_arr; // device variable for(int i=0; i<N; i++) { arr[i] = 0; printf("host arr[%d] = %d\n", i, arr[i]); } cout << "------------ allocate device memory dev_arr" << endl; // allocating a device array to copy to // note the N * sizeof(int) cudaMalloc( (void**)&dev_arr, N * sizeof(int) ); cout << "------------ copy arr to dev_arr" << endl; // copying host array to device // note the N * sizeof(int) cudaMemcpy(dev_arr, arr, N * sizeof(int), cudaMemcpyHostToDevice); cout << "------------ calling kernel fillArray" << endl; // N block, and 1 thread // previously we used fillArray<<1,N>>, 1 block and N threads // we now use <<N,1>> for N blocks and 1 thread fillArray<<<N,1>>>(dev_arr); cout << "------------ copy dev_arr to arr" << endl; // note the N * sizeof(int) cudaMemcpy(arr, dev_arr, N * sizeof(int), cudaMemcpyDeviceToHost); cout << "------------ printing changed host array" << endl; for(int i=0; i<N; i++) { printf("** changed host arr[%d] = %d\n", i, arr[i]); } // ---- FREE ALLOCATED KERNEL MEMORY cudaFree( dev_arr ); return 0; }
ba9fe156ef516da7f3b9b628a3ce683546ba76ea.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "group_reductions.hpp" #include <bitmask/legacy/bit_mask.cuh> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <groupby/sort/legacy/sort_helper.hpp> #include <thrust/reduce.h> #include <thrust/iterator/discard_iterator.h> #include <cmath> namespace { struct var_functor { template <typename T> std::enable_if_t<std::is_arithmetic<T>::value, void > operator()(gdf_column const& values, rmm::device_vector<cudf::size_type> const& group_labels, rmm::device_vector<cudf::size_type> const& group_sizes, gdf_column * result, cudf::size_type ddof, bool is_std, hipStream_t stream) { auto values_data = static_cast<const T*>(values.data); auto result_data = static_cast<double *>(result->data); auto values_valid = reinterpret_cast<const bit_mask::bit_mask_t*>(values.valid); auto result_valid = reinterpret_cast<bit_mask::bit_mask_t*>(result->valid); const cudf::size_type* d_group_labels = group_labels.data().get(); const cudf::size_type* d_group_sizes = group_sizes.data().get(); // Calculate sum // TODO: replace with mean function call when that gets an internal API rmm::device_vector<T> sums(group_sizes.size()); thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream), group_labels.begin(), group_labels.end(), thrust::make_transform_iterator( thrust::make_counting_iterator(0), [=] __device__ (cudf::size_type i) -> T { return (values_valid and not bit_mask::is_valid(values_valid, i)) ? 0 : values_data[i]; }), thrust::make_discard_iterator(), sums.begin()); // TODO: use target_type for sums and result_data T* d_sums = sums.data().get(); auto values_it = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [=] __device__ (cudf::size_type i) { if (values_valid and not bit_mask::is_valid(values_valid, i)) return 0.0; double x = values_data[i]; cudf::size_type group_idx = d_group_labels[i]; cudf::size_type group_size = d_group_sizes[group_idx]; // prevent divide by zero error if (group_size == 0 or group_size - ddof <= 0) return 0.0; double mean = static_cast<double>(d_sums[group_idx])/group_size; return (x - mean) * (x - mean) / (group_size - ddof); } ); thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream), group_labels.begin(), group_labels.end(), values_it, thrust::make_discard_iterator(), result_data); // set nulls if (result_valid) { thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), group_sizes.size(), [=] __device__ (cudf::size_type i){ cudf::size_type group_size = d_group_sizes[i]; if (group_size == 0 or group_size - ddof <= 0) bit_mask::clear_bit_safe(result_valid, i); else bit_mask::set_bit_safe(result_valid, i); }); set_null_count(*result); } // if std, do a sqrt if (is_std) { thrust::transform(rmm::exec_policy(stream)->on(stream), result_data, result_data + group_sizes.size(), result_data, [] __device__ (double data) { return sqrt(data); }); } } template <typename T, typename... Args> std::enable_if_t<!std::is_arithmetic<T>::value, void > operator()(Args&&... args) { CUDF_FAIL("Only numeric types are supported in variance"); } }; } // namespace anonymous namespace cudf { namespace detail { void group_var(gdf_column const& values, rmm::device_vector<size_type> const& group_labels, rmm::device_vector<size_type> const& group_sizes, gdf_column * result, size_type ddof, hipStream_t stream) { type_dispatcher(values.dtype, var_functor{}, values, group_labels, group_sizes, result, ddof, false, stream); } void group_std(gdf_column const& values, rmm::device_vector<size_type> const& group_labels, rmm::device_vector<size_type> const& group_sizes, gdf_column * result, size_type ddof, hipStream_t stream) { type_dispatcher(values.dtype, var_functor{}, values, group_labels, group_sizes, result, ddof, true, stream); } std::pair<cudf::table, cudf::table> group_var_std(cudf::table const& keys, cudf::table const& values, cudf::size_type ddof, bool is_std) { groupby::sort::detail::helper gb_obj(keys); auto group_labels = gb_obj.group_labels(); cudf::table result_table(gb_obj.num_groups(), std::vector<gdf_dtype>(values.num_columns(), GDF_FLOAT64), std::vector<gdf_dtype_extra_info>(values.num_columns()), true); for (cudf::size_type i = 0; i < values.num_columns(); i++) { gdf_column sorted_values; rmm::device_vector<cudf::size_type> group_sizes; std::tie(sorted_values, group_sizes) = gb_obj.sort_values(*(values.get_column(i))); gdf_column* result_col = result_table.get_column(i); if (is_std) { detail::group_std(sorted_values, group_labels, group_sizes, result_col, ddof); } else { detail::group_var(sorted_values, group_labels, group_sizes, result_col, ddof); } gdf_column_free(&sorted_values); } return std::make_pair(gb_obj.unique_keys(), std::move(result_table)); } } // namespace detail std::pair<cudf::table, cudf::table> group_std(cudf::table const& keys, cudf::table const& values, cudf::size_type ddof) { return detail::group_var_std(keys, values, ddof, true); } std::pair<cudf::table, cudf::table> group_var(cudf::table const& keys, cudf::table const& values, cudf::size_type ddof) { return detail::group_var_std(keys, values, ddof, false); } } // namespace cudf
ba9fe156ef516da7f3b9b628a3ce683546ba76ea.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "group_reductions.hpp" #include <bitmask/legacy/bit_mask.cuh> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <groupby/sort/legacy/sort_helper.hpp> #include <thrust/reduce.h> #include <thrust/iterator/discard_iterator.h> #include <cmath> namespace { struct var_functor { template <typename T> std::enable_if_t<std::is_arithmetic<T>::value, void > operator()(gdf_column const& values, rmm::device_vector<cudf::size_type> const& group_labels, rmm::device_vector<cudf::size_type> const& group_sizes, gdf_column * result, cudf::size_type ddof, bool is_std, cudaStream_t stream) { auto values_data = static_cast<const T*>(values.data); auto result_data = static_cast<double *>(result->data); auto values_valid = reinterpret_cast<const bit_mask::bit_mask_t*>(values.valid); auto result_valid = reinterpret_cast<bit_mask::bit_mask_t*>(result->valid); const cudf::size_type* d_group_labels = group_labels.data().get(); const cudf::size_type* d_group_sizes = group_sizes.data().get(); // Calculate sum // TODO: replace with mean function call when that gets an internal API rmm::device_vector<T> sums(group_sizes.size()); thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream), group_labels.begin(), group_labels.end(), thrust::make_transform_iterator( thrust::make_counting_iterator(0), [=] __device__ (cudf::size_type i) -> T { return (values_valid and not bit_mask::is_valid(values_valid, i)) ? 0 : values_data[i]; }), thrust::make_discard_iterator(), sums.begin()); // TODO: use target_type for sums and result_data T* d_sums = sums.data().get(); auto values_it = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [=] __device__ (cudf::size_type i) { if (values_valid and not bit_mask::is_valid(values_valid, i)) return 0.0; double x = values_data[i]; cudf::size_type group_idx = d_group_labels[i]; cudf::size_type group_size = d_group_sizes[group_idx]; // prevent divide by zero error if (group_size == 0 or group_size - ddof <= 0) return 0.0; double mean = static_cast<double>(d_sums[group_idx])/group_size; return (x - mean) * (x - mean) / (group_size - ddof); } ); thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream), group_labels.begin(), group_labels.end(), values_it, thrust::make_discard_iterator(), result_data); // set nulls if (result_valid) { thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), group_sizes.size(), [=] __device__ (cudf::size_type i){ cudf::size_type group_size = d_group_sizes[i]; if (group_size == 0 or group_size - ddof <= 0) bit_mask::clear_bit_safe(result_valid, i); else bit_mask::set_bit_safe(result_valid, i); }); set_null_count(*result); } // if std, do a sqrt if (is_std) { thrust::transform(rmm::exec_policy(stream)->on(stream), result_data, result_data + group_sizes.size(), result_data, [] __device__ (double data) { return sqrt(data); }); } } template <typename T, typename... Args> std::enable_if_t<!std::is_arithmetic<T>::value, void > operator()(Args&&... args) { CUDF_FAIL("Only numeric types are supported in variance"); } }; } // namespace anonymous namespace cudf { namespace detail { void group_var(gdf_column const& values, rmm::device_vector<size_type> const& group_labels, rmm::device_vector<size_type> const& group_sizes, gdf_column * result, size_type ddof, cudaStream_t stream) { type_dispatcher(values.dtype, var_functor{}, values, group_labels, group_sizes, result, ddof, false, stream); } void group_std(gdf_column const& values, rmm::device_vector<size_type> const& group_labels, rmm::device_vector<size_type> const& group_sizes, gdf_column * result, size_type ddof, cudaStream_t stream) { type_dispatcher(values.dtype, var_functor{}, values, group_labels, group_sizes, result, ddof, true, stream); } std::pair<cudf::table, cudf::table> group_var_std(cudf::table const& keys, cudf::table const& values, cudf::size_type ddof, bool is_std) { groupby::sort::detail::helper gb_obj(keys); auto group_labels = gb_obj.group_labels(); cudf::table result_table(gb_obj.num_groups(), std::vector<gdf_dtype>(values.num_columns(), GDF_FLOAT64), std::vector<gdf_dtype_extra_info>(values.num_columns()), true); for (cudf::size_type i = 0; i < values.num_columns(); i++) { gdf_column sorted_values; rmm::device_vector<cudf::size_type> group_sizes; std::tie(sorted_values, group_sizes) = gb_obj.sort_values(*(values.get_column(i))); gdf_column* result_col = result_table.get_column(i); if (is_std) { detail::group_std(sorted_values, group_labels, group_sizes, result_col, ddof); } else { detail::group_var(sorted_values, group_labels, group_sizes, result_col, ddof); } gdf_column_free(&sorted_values); } return std::make_pair(gb_obj.unique_keys(), std::move(result_table)); } } // namespace detail std::pair<cudf::table, cudf::table> group_std(cudf::table const& keys, cudf::table const& values, cudf::size_type ddof) { return detail::group_var_std(keys, values, ddof, true); } std::pair<cudf::table, cudf::table> group_var(cudf::table const& keys, cudf::table const& values, cudf::size_type ddof) { return detail::group_var_std(keys, values, ddof, false); } } // namespace cudf
41ea88ca13aa786ea7aa2216ca448b9bb35875e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 13, 22, 34, 49, 51 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {13,22,34,49,51} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } system("pause"); return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
41ea88ca13aa786ea7aa2216ca448b9bb35875e5.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 13, 22, 34, 49, 51 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {13,22,34,49,51} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } system("pause"); return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
c4e0e914d1b4baff7465fce36246527a1fec3e29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef PADDLE_WITH_HIP // To-do(qili93): fix this after issue resolved // https://github.com/ROCmSoftwarePlatform/rocPRIM/issues/202 #include "paddle/phi/kernels/multinomial_kernel.h" #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/scalar.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/arg_min_max_kernel.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/distribution_helper.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/funcs/inclusive_scan.h" #include "paddle/phi/kernels/funcs/multinomial_functor.h" #include "paddle/phi/kernels/top_k_kernel.h" namespace phi { template <typename T, typename MT> __global__ void NormalizeProbability(MT* norm_probs, const T* in_data, MT* sum_rows, int64_t num_distributions, int64_t num_categories) { int id = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if (id < num_distributions * num_categories) { PADDLE_ENFORCE( static_cast<MT>(in_data[id]) >= 0.0, "The input of multinomial distribution should be >= 0, but got %f.", static_cast<MT>(in_data[id])); int64_t row_id = id / num_categories; PADDLE_ENFORCE(sum_rows[row_id] > 0.0, "The sum of one multinomial distribution probability should " "be > 0, but got %f.", sum_rows[row_id]); norm_probs[id] = static_cast<MT>(in_data[id]) / sum_rows[row_id]; } } template <typename T> __device__ int binarySearchFunctor(T* cumulative_probs_data, T* norm_probs_data, int num_categories, T rng_number) { int left = 0; int right = num_categories; while (right - left > 0) { int mid = left + (right - left) / 2; T temp_prob = cumulative_probs_data[mid]; if (temp_prob < rng_number) { left = mid + 1; } else { right = mid; } } if (left == num_categories) { left = num_categories - 1; } while (left >= 1 && norm_probs_data[left] == 0) left--; return left; } template <typename T> __global__ void sampleMultinomialWithReplacement( const int64_t num_samples, int64_t* out_data, const int64_t num_distributions, const int64_t num_categories, T* cumulative_probs_data, T* norm_probs_data, uint64_t seed, uint64_t offset) { // use binary search to get the selected category sample id. // let cumulative_probs_data[id-1] < rng_number < cumulative_probs_data[id]. size_t idx = gridDim.x * blockDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(seed, idx, offset, &state); int sample = blockIdx.x * blockDim.x + threadIdx.x; for (int dist = blockIdx.y; dist < num_distributions; dist += gridDim.y) { if (sample < num_samples) { T rng_number = static_cast<T>(hiprand_uniform4(&state).x); // Find the bucket that a uniform random number lies in int selected_category = binarySearchFunctor<T>(cumulative_probs_data + dist * num_categories, norm_probs_data + dist * num_categories, num_categories, rng_number); out_data[sample + dist * num_samples] = selected_category; } } } template <typename T, typename Context> void MultinomialKernel(const Context& dev_ctx, const DenseTensor& x, const Scalar& num_samples, bool replacement, DenseTensor* out) { using MT = typename kps::details::MPTypeTrait<T>::Type; auto int_num_samples = num_samples.to<int>(); auto* in_data = x.data<T>(); int64_t* out_data = dev_ctx.template Alloc<int64_t>(out); auto in_dims = x.dims(); int64_t dim_size = in_dims.size(); const int64_t num_categories = in_dims[dim_size - 1]; const int64_t num_distributions = dim_size > 1 ? in_dims[dim_size - 2] : 1; // If replacement is False, it's not a replaceable sample. Every category // can be used only once. if (!replacement) { int64_t in_data_numel = x.numel(); int64_t out_data_numel = out->numel(); phi::DenseTensor cpu_tensor; phi::Copy<Context>(dev_ctx, x, phi::CPUPlace(), false, &cpu_tensor); T* cpu_in_data = cpu_tensor.data<T>(); for (size_t i = 0; i < num_distributions; ++i) { int zero_num = 0; for (size_t j = 0; j < num_categories; ++j) { T weight = cpu_in_data[i * num_categories + j]; PADDLE_ENFORCE_GE( static_cast<MT>(weight), 0, errors::InvalidArgument( "Each element of multinomial'input must >= 0, but got %f.", static_cast<MT>(weight))); if (weight == static_cast<T>(0)) { zero_num++; } } int valid_samples = num_categories - zero_num; PADDLE_ENFORCE_LE( int_num_samples, valid_samples, errors::InvalidArgument("When replacement=False, 'num_samples' " "must less than or eaqual to the number of " "positive item of input")); } // Refer to [gumbel softmax algorithm] DenseTensor rand = EmptyLike<T, Context>(dev_ctx, x); T* rand_data = rand.data<T>(); funcs::uniform_distribution<MT> dist; funcs::exponential_transform<MT> trans(1.0); funcs::distribution_and_transform<T>(dev_ctx, &rand, dist, trans); funcs::ForRange<Context> for_range(dev_ctx, x.numel()); for_range([rand_data, in_data] __device__(size_t idx) { rand_data[idx] = in_data[idx] / rand_data[idx]; }); if (int_num_samples == 1) { ArgMaxKernel<T, Context>( dev_ctx, rand, -1, true, false, 3 /*proto::VarType::INT64*/, out); } else { std::vector<int64_t> out_dim_vec = vectorize<int64_t>(out->dims()); DenseTensor value = Empty<T, Context>(dev_ctx, IntArray(out_dim_vec)); TopkKernel<T, Context>( dev_ctx, rand, num_samples, -1, true, true, &value, out); } return; } // Sum of input may not be 1. To get probability in range [0, 1], calculate // sum of each row of input, and then use the sum to normalize the input. // sum_row_data: sum of each row DenseTensor sum_rows_tensor; sum_rows_tensor.Resize({num_distributions}); auto* sum_rows_data = dev_ctx.template Alloc<MT>(&sum_rows_tensor); auto& place = *dev_ctx.eigen_device(); if (num_distributions == 1) { auto eigen_input = EigenVector<T>::Flatten(x); auto eigen_sum_rows = EigenVector<MT>::Flatten(sum_rows_tensor); eigen_sum_rows.device(place) = eigen_input.sum(Eigen::DSizes<int, 1>(1)) .template cast<MT>() .eval() .template cast<MT>() .reshape(Eigen::DSizes<int, 1>(sum_rows_tensor.dims()[0])) .template cast<MT>(); } else { auto eigen_input = EigenMatrix<T>::From(x); auto eigen_sum_rows = EigenVector<MT>::Flatten(sum_rows_tensor); eigen_sum_rows.device(place) = eigen_input.sum(Eigen::DSizes<int, 1>(1)).template cast<MT>(); } // Normalize row of each distribution to get the probability in range [0, // 1]. // norm_probs_data: probability of the distribution DenseTensor norm_probs_tensor; norm_probs_tensor.Resize({num_distributions, num_categories}); auto* norm_probs_data = dev_ctx.template Alloc<MT>(&norm_probs_tensor); // number of threads in a block is min(num_categories, 512) int block_size = num_categories < 512 ? num_categories : 512; dim3 block_norm(block_size); dim3 grid_norm((num_distributions * num_categories - 1) / block_norm.x + 1); hipLaunchKernelGGL(( NormalizeProbability<T, MT>) , dim3(grid_norm), dim3(block_norm), 0, dev_ctx.stream(), norm_probs_data, in_data, sum_rows_data, num_distributions, num_categories); // Get cumulative probability of each distribution. It's the same function // of ``cumsum`` op. DenseTensor cumulative_probs_tensor; cumulative_probs_tensor.Resize({num_distributions, num_categories}); auto* cumulative_probs_data = dev_ctx.template Alloc<MT>(&cumulative_probs_tensor); // 'phi::funcs::InclusiveScan' has higher accuracy than // 'thrust::inclusive_scan' funcs::InclusiveScan<MT, std::plus<MT>>( /*in*/ norm_probs_data, /*out*/ cumulative_probs_data, /*outer_dim*/ static_cast<size_t>(num_distributions), /*mid_dim*/ static_cast<size_t>(num_categories), /*inner_dim*/ static_cast<size_t>(1), /*init*/ static_cast<T>(0), std::plus<MT>(), /*reverse=*/false, dev_ctx); // Sample the multinomial distributions. dim3 block(128); int64_t device_id = dev_ctx.GetPlace().GetDeviceId(); const auto& prop = phi::backends::gpu::GetDeviceProperties(device_id); int grid_y = std::min<int64_t>(num_distributions, prop.maxGridSize[1]); dim3 grid((int_num_samples - 1) / block.x + 1, grid_y); auto gen_cuda = dev_ctx.GetGenerator(); size_t curand4_loop_times = (num_distributions + 4 * grid_y - 1) / (4 * grid_y); // 'increment' shoulde be multiple of 4 uint64_t increment = curand4_loop_times * 4; auto seed_offset = gen_cuda->IncrementOffset(increment); hipLaunchKernelGGL(( sampleMultinomialWithReplacement<MT>) , dim3(grid), dim3(block), 0, dev_ctx.stream(), int_num_samples, out_data, num_distributions, num_categories, cumulative_probs_data, norm_probs_data, seed_offset.first, seed_offset.second); } } // namespace phi PD_REGISTER_KERNEL(multinomial, // cuda_only GPU, ALL_LAYOUT, phi::MultinomialKernel, phi::dtype::float16, phi::dtype::bfloat16, float, double) { kernel->OutputAt(0).SetDataType(phi::DataType::INT64); } #endif
c4e0e914d1b4baff7465fce36246527a1fec3e29.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef PADDLE_WITH_HIP // To-do(qili93): fix this after issue resolved // https://github.com/ROCmSoftwarePlatform/rocPRIM/issues/202 #include "paddle/phi/kernels/multinomial_kernel.h" #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/scalar.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/arg_min_max_kernel.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/distribution_helper.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/funcs/inclusive_scan.h" #include "paddle/phi/kernels/funcs/multinomial_functor.h" #include "paddle/phi/kernels/top_k_kernel.h" namespace phi { template <typename T, typename MT> __global__ void NormalizeProbability(MT* norm_probs, const T* in_data, MT* sum_rows, int64_t num_distributions, int64_t num_categories) { int id = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if (id < num_distributions * num_categories) { PADDLE_ENFORCE( static_cast<MT>(in_data[id]) >= 0.0, "The input of multinomial distribution should be >= 0, but got %f.", static_cast<MT>(in_data[id])); int64_t row_id = id / num_categories; PADDLE_ENFORCE(sum_rows[row_id] > 0.0, "The sum of one multinomial distribution probability should " "be > 0, but got %f.", sum_rows[row_id]); norm_probs[id] = static_cast<MT>(in_data[id]) / sum_rows[row_id]; } } template <typename T> __device__ int binarySearchFunctor(T* cumulative_probs_data, T* norm_probs_data, int num_categories, T rng_number) { int left = 0; int right = num_categories; while (right - left > 0) { int mid = left + (right - left) / 2; T temp_prob = cumulative_probs_data[mid]; if (temp_prob < rng_number) { left = mid + 1; } else { right = mid; } } if (left == num_categories) { left = num_categories - 1; } while (left >= 1 && norm_probs_data[left] == 0) left--; return left; } template <typename T> __global__ void sampleMultinomialWithReplacement( const int64_t num_samples, int64_t* out_data, const int64_t num_distributions, const int64_t num_categories, T* cumulative_probs_data, T* norm_probs_data, uint64_t seed, uint64_t offset) { // use binary search to get the selected category sample id. // let cumulative_probs_data[id-1] < rng_number < cumulative_probs_data[id]. size_t idx = gridDim.x * blockDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(seed, idx, offset, &state); int sample = blockIdx.x * blockDim.x + threadIdx.x; for (int dist = blockIdx.y; dist < num_distributions; dist += gridDim.y) { if (sample < num_samples) { T rng_number = static_cast<T>(curand_uniform4(&state).x); // Find the bucket that a uniform random number lies in int selected_category = binarySearchFunctor<T>(cumulative_probs_data + dist * num_categories, norm_probs_data + dist * num_categories, num_categories, rng_number); out_data[sample + dist * num_samples] = selected_category; } } } template <typename T, typename Context> void MultinomialKernel(const Context& dev_ctx, const DenseTensor& x, const Scalar& num_samples, bool replacement, DenseTensor* out) { using MT = typename kps::details::MPTypeTrait<T>::Type; auto int_num_samples = num_samples.to<int>(); auto* in_data = x.data<T>(); int64_t* out_data = dev_ctx.template Alloc<int64_t>(out); auto in_dims = x.dims(); int64_t dim_size = in_dims.size(); const int64_t num_categories = in_dims[dim_size - 1]; const int64_t num_distributions = dim_size > 1 ? in_dims[dim_size - 2] : 1; // If replacement is False, it's not a replaceable sample. Every category // can be used only once. if (!replacement) { int64_t in_data_numel = x.numel(); int64_t out_data_numel = out->numel(); phi::DenseTensor cpu_tensor; phi::Copy<Context>(dev_ctx, x, phi::CPUPlace(), false, &cpu_tensor); T* cpu_in_data = cpu_tensor.data<T>(); for (size_t i = 0; i < num_distributions; ++i) { int zero_num = 0; for (size_t j = 0; j < num_categories; ++j) { T weight = cpu_in_data[i * num_categories + j]; PADDLE_ENFORCE_GE( static_cast<MT>(weight), 0, errors::InvalidArgument( "Each element of multinomial'input must >= 0, but got %f.", static_cast<MT>(weight))); if (weight == static_cast<T>(0)) { zero_num++; } } int valid_samples = num_categories - zero_num; PADDLE_ENFORCE_LE( int_num_samples, valid_samples, errors::InvalidArgument("When replacement=False, 'num_samples' " "must less than or eaqual to the number of " "positive item of input")); } // Refer to [gumbel softmax algorithm] DenseTensor rand = EmptyLike<T, Context>(dev_ctx, x); T* rand_data = rand.data<T>(); funcs::uniform_distribution<MT> dist; funcs::exponential_transform<MT> trans(1.0); funcs::distribution_and_transform<T>(dev_ctx, &rand, dist, trans); funcs::ForRange<Context> for_range(dev_ctx, x.numel()); for_range([rand_data, in_data] __device__(size_t idx) { rand_data[idx] = in_data[idx] / rand_data[idx]; }); if (int_num_samples == 1) { ArgMaxKernel<T, Context>( dev_ctx, rand, -1, true, false, 3 /*proto::VarType::INT64*/, out); } else { std::vector<int64_t> out_dim_vec = vectorize<int64_t>(out->dims()); DenseTensor value = Empty<T, Context>(dev_ctx, IntArray(out_dim_vec)); TopkKernel<T, Context>( dev_ctx, rand, num_samples, -1, true, true, &value, out); } return; } // Sum of input may not be 1. To get probability in range [0, 1], calculate // sum of each row of input, and then use the sum to normalize the input. // sum_row_data: sum of each row DenseTensor sum_rows_tensor; sum_rows_tensor.Resize({num_distributions}); auto* sum_rows_data = dev_ctx.template Alloc<MT>(&sum_rows_tensor); auto& place = *dev_ctx.eigen_device(); if (num_distributions == 1) { auto eigen_input = EigenVector<T>::Flatten(x); auto eigen_sum_rows = EigenVector<MT>::Flatten(sum_rows_tensor); eigen_sum_rows.device(place) = eigen_input.sum(Eigen::DSizes<int, 1>(1)) .template cast<MT>() .eval() .template cast<MT>() .reshape(Eigen::DSizes<int, 1>(sum_rows_tensor.dims()[0])) .template cast<MT>(); } else { auto eigen_input = EigenMatrix<T>::From(x); auto eigen_sum_rows = EigenVector<MT>::Flatten(sum_rows_tensor); eigen_sum_rows.device(place) = eigen_input.sum(Eigen::DSizes<int, 1>(1)).template cast<MT>(); } // Normalize row of each distribution to get the probability in range [0, // 1]. // norm_probs_data: probability of the distribution DenseTensor norm_probs_tensor; norm_probs_tensor.Resize({num_distributions, num_categories}); auto* norm_probs_data = dev_ctx.template Alloc<MT>(&norm_probs_tensor); // number of threads in a block is min(num_categories, 512) int block_size = num_categories < 512 ? num_categories : 512; dim3 block_norm(block_size); dim3 grid_norm((num_distributions * num_categories - 1) / block_norm.x + 1); NormalizeProbability<T, MT> <<<grid_norm, block_norm, 0, dev_ctx.stream()>>>(norm_probs_data, in_data, sum_rows_data, num_distributions, num_categories); // Get cumulative probability of each distribution. It's the same function // of ``cumsum`` op. DenseTensor cumulative_probs_tensor; cumulative_probs_tensor.Resize({num_distributions, num_categories}); auto* cumulative_probs_data = dev_ctx.template Alloc<MT>(&cumulative_probs_tensor); // 'phi::funcs::InclusiveScan' has higher accuracy than // 'thrust::inclusive_scan' funcs::InclusiveScan<MT, std::plus<MT>>( /*in*/ norm_probs_data, /*out*/ cumulative_probs_data, /*outer_dim*/ static_cast<size_t>(num_distributions), /*mid_dim*/ static_cast<size_t>(num_categories), /*inner_dim*/ static_cast<size_t>(1), /*init*/ static_cast<T>(0), std::plus<MT>(), /*reverse=*/false, dev_ctx); // Sample the multinomial distributions. dim3 block(128); int64_t device_id = dev_ctx.GetPlace().GetDeviceId(); const auto& prop = phi::backends::gpu::GetDeviceProperties(device_id); int grid_y = std::min<int64_t>(num_distributions, prop.maxGridSize[1]); dim3 grid((int_num_samples - 1) / block.x + 1, grid_y); auto gen_cuda = dev_ctx.GetGenerator(); size_t curand4_loop_times = (num_distributions + 4 * grid_y - 1) / (4 * grid_y); // 'increment' shoulde be multiple of 4 uint64_t increment = curand4_loop_times * 4; auto seed_offset = gen_cuda->IncrementOffset(increment); sampleMultinomialWithReplacement<MT> <<<grid, block, 0, dev_ctx.stream()>>>(int_num_samples, out_data, num_distributions, num_categories, cumulative_probs_data, norm_probs_data, seed_offset.first, seed_offset.second); } } // namespace phi PD_REGISTER_KERNEL(multinomial, // cuda_only GPU, ALL_LAYOUT, phi::MultinomialKernel, phi::dtype::float16, phi::dtype::bfloat16, float, double) { kernel->OutputAt(0).SetDataType(phi::DataType::INT64); } #endif
gather_kernel_util.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/user/kernels/gather_kernel_util.h" #include "oneflow/core/kernel/kernel.h" #include "oneflow/core/ep/cuda/cuda_stream.h" #include "oneflow/core/common/nd_index_offset_helper.h" #include <assert.h> #if TORCH_HIP_VERSION >= 11000 #include <cuda_bf16.h> #endif // TORCH_HIP_VERSION >= 11000 namespace oneflow { namespace { template<typename T, typename K, typename IDX> __global__ void GatherForwardGpu(const IDX elem_cnt, NdIndexOffsetHelper<IDX, 3> in_helper, NdIndexOffsetHelper<IDX, 3> out_helper, const K* indices, const T* in, const IDX gather_dim_size, T* out, const IDX offset) { IDX index[3]; CUDA_1D_KERNEL_LOOP_T(IDX, i, elem_cnt) { out_helper.OffsetToNdIndex(i, index); index[1] = indices[index[1]] - offset; T v{}; if (index[1] >= 0 && index[1] < gather_dim_size) { v = in[in_helper.NdIndexToOffset(index)]; } out[i] = v; } } bool IsSafeUseIndex32(int64_t outer_dim_size, int64_t gather_dim_size, int64_t inner_dim_size, int64_t num_indices) { const int64_t in_elem_cnt = outer_dim_size * gather_dim_size * inner_dim_size; const int64_t out_elem_cnt = outer_dim_size * num_indices * inner_dim_size; return ::max(out_elem_cnt, in_elem_cnt) < GetMaxVal<int32_t>() / 2; } template<typename T, typename K> void DispatchIndexSize(ep::Stream* stream, int64_t outer_dim_size, int64_t gather_dim_size, int64_t inner_dim_size, int64_t num_indices, int64_t offset, const K* indices, const T* in, T* out) { const int64_t out_elem_cnt = outer_dim_size * num_indices * inner_dim_size; if (IsSafeUseIndex32(outer_dim_size, gather_dim_size, inner_dim_size, num_indices)) { NdIndexOffsetHelper<int32_t, 3> in_helper(outer_dim_size, gather_dim_size, inner_dim_size); NdIndexOffsetHelper<int32_t, 3> out_helper(outer_dim_size, num_indices, inner_dim_size); hipLaunchKernelGGL(( GatherForwardGpu<T, K, int32_t>), dim3(BlocksNum4ThreadsNum(out_elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), out_elem_cnt, in_helper, out_helper, indices, in, gather_dim_size, out, offset); } else { NdIndexOffsetHelper<int64_t, 3> in_helper(outer_dim_size, gather_dim_size, inner_dim_size); NdIndexOffsetHelper<int64_t, 3> out_helper(outer_dim_size, num_indices, inner_dim_size); hipLaunchKernelGGL(( GatherForwardGpu<T, K, int64_t>), dim3(BlocksNum4ThreadsNum(out_elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), out_elem_cnt, in_helper, out_helper, indices, in, gather_dim_size, out, offset); } } template<typename K, typename T> bool TryDispatchMovementType(ep::Stream* stream, int64_t outer_dim_size, int64_t gather_dim_size, int64_t inner_dim_size, int64_t num_indices, int64_t offset, const K* indices, const void* in, void* out) { if (reinterpret_cast<uintptr_t>(in) % sizeof(T) == 0 && reinterpret_cast<uintptr_t>(out) % sizeof(T) == 0 && inner_dim_size % sizeof(T) == 0) { DispatchIndexSize<T, K>(stream, outer_dim_size, gather_dim_size, inner_dim_size / sizeof(T), num_indices, offset, indices, static_cast<const T*>(in), static_cast<T*>(out)); return true; } else { return false; } } template<typename K> void DispatchMovementSize(ep::Stream* stream, int64_t outer_dim_size, int64_t gather_dim_size, int64_t inner_dim_size, int64_t num_indices, int64_t offset, const K* indices, const void* in, void* out) { using Func = bool (*)(ep::Stream * stream, int64_t outer_dim_size, int64_t gather_dim_size, int64_t inner_dim_size, int64_t num_indices, int64_t offset, const K* indices, const void* in, void* out); Func funcs[] = { TryDispatchMovementType<K, ulonglong2>, // 16B TryDispatchMovementType<K, uint64_t>, // 8B TryDispatchMovementType<K, uint32_t>, // 4B TryDispatchMovementType<K, uint16_t>, // 2B TryDispatchMovementType<K, uint8_t>, // 1B }; for (size_t i = 0; i < sizeof(funcs) / sizeof(funcs[0]); ++i) { if (funcs[i](stream, outer_dim_size, gather_dim_size, inner_dim_size, num_indices, offset, indices, in, out)) { break; } } } } // namespace template<typename T, typename K> struct GatherKernelUtilImpl<DeviceType::kCUDA, T, K> final { static void Forward(ep::Stream* stream, const K* indices, int64_t num_indices, const T* in, const Shape& flat_in_shape, T* out, const int64_t offset) { DispatchMovementSize(stream, flat_in_shape.At(0), flat_in_shape.At(1), flat_in_shape.At(2) * sizeof(T), num_indices, offset, indices, in, out); } }; #define INITIATE_GATHER_KERNEL_UTIL_CUDA_IMPL(in_type_pair, index_type_pair) \ template struct GatherKernelUtilImpl<DeviceType::kCUDA, OF_PP_PAIR_FIRST(in_type_pair), \ OF_PP_PAIR_FIRST(index_type_pair)>; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_GATHER_KERNEL_UTIL_CUDA_IMPL, GATHER_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ, GATHER_INDEX_TYPE_SEQ); #if TORCH_HIP_VERSION >= 11000 OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_GATHER_KERNEL_UTIL_CUDA_IMPL, OF_PP_MAKE_TUPLE_SEQ(nv_bfloat16, DataType::kBFloat16), GATHER_INDEX_TYPE_SEQ); #endif #undef INITIATE_GATHER_KERNEL_UTIL_CUDA_IMPL } // namespace oneflow
gather_kernel_util.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/user/kernels/gather_kernel_util.h" #include "oneflow/core/kernel/kernel.h" #include "oneflow/core/ep/cuda/cuda_stream.h" #include "oneflow/core/common/nd_index_offset_helper.h" #include <assert.h> #if CUDA_VERSION >= 11000 #include <cuda_bf16.h> #endif // CUDA_VERSION >= 11000 namespace oneflow { namespace { template<typename T, typename K, typename IDX> __global__ void GatherForwardGpu(const IDX elem_cnt, NdIndexOffsetHelper<IDX, 3> in_helper, NdIndexOffsetHelper<IDX, 3> out_helper, const K* indices, const T* in, const IDX gather_dim_size, T* out, const IDX offset) { IDX index[3]; CUDA_1D_KERNEL_LOOP_T(IDX, i, elem_cnt) { out_helper.OffsetToNdIndex(i, index); index[1] = indices[index[1]] - offset; T v{}; if (index[1] >= 0 && index[1] < gather_dim_size) { v = in[in_helper.NdIndexToOffset(index)]; } out[i] = v; } } bool IsSafeUseIndex32(int64_t outer_dim_size, int64_t gather_dim_size, int64_t inner_dim_size, int64_t num_indices) { const int64_t in_elem_cnt = outer_dim_size * gather_dim_size * inner_dim_size; const int64_t out_elem_cnt = outer_dim_size * num_indices * inner_dim_size; return std::max(out_elem_cnt, in_elem_cnt) < GetMaxVal<int32_t>() / 2; } template<typename T, typename K> void DispatchIndexSize(ep::Stream* stream, int64_t outer_dim_size, int64_t gather_dim_size, int64_t inner_dim_size, int64_t num_indices, int64_t offset, const K* indices, const T* in, T* out) { const int64_t out_elem_cnt = outer_dim_size * num_indices * inner_dim_size; if (IsSafeUseIndex32(outer_dim_size, gather_dim_size, inner_dim_size, num_indices)) { NdIndexOffsetHelper<int32_t, 3> in_helper(outer_dim_size, gather_dim_size, inner_dim_size); NdIndexOffsetHelper<int32_t, 3> out_helper(outer_dim_size, num_indices, inner_dim_size); GatherForwardGpu<T, K, int32_t><<<BlocksNum4ThreadsNum(out_elem_cnt), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( out_elem_cnt, in_helper, out_helper, indices, in, gather_dim_size, out, offset); } else { NdIndexOffsetHelper<int64_t, 3> in_helper(outer_dim_size, gather_dim_size, inner_dim_size); NdIndexOffsetHelper<int64_t, 3> out_helper(outer_dim_size, num_indices, inner_dim_size); GatherForwardGpu<T, K, int64_t><<<BlocksNum4ThreadsNum(out_elem_cnt), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( out_elem_cnt, in_helper, out_helper, indices, in, gather_dim_size, out, offset); } } template<typename K, typename T> bool TryDispatchMovementType(ep::Stream* stream, int64_t outer_dim_size, int64_t gather_dim_size, int64_t inner_dim_size, int64_t num_indices, int64_t offset, const K* indices, const void* in, void* out) { if (reinterpret_cast<uintptr_t>(in) % sizeof(T) == 0 && reinterpret_cast<uintptr_t>(out) % sizeof(T) == 0 && inner_dim_size % sizeof(T) == 0) { DispatchIndexSize<T, K>(stream, outer_dim_size, gather_dim_size, inner_dim_size / sizeof(T), num_indices, offset, indices, static_cast<const T*>(in), static_cast<T*>(out)); return true; } else { return false; } } template<typename K> void DispatchMovementSize(ep::Stream* stream, int64_t outer_dim_size, int64_t gather_dim_size, int64_t inner_dim_size, int64_t num_indices, int64_t offset, const K* indices, const void* in, void* out) { using Func = bool (*)(ep::Stream * stream, int64_t outer_dim_size, int64_t gather_dim_size, int64_t inner_dim_size, int64_t num_indices, int64_t offset, const K* indices, const void* in, void* out); Func funcs[] = { TryDispatchMovementType<K, ulonglong2>, // 16B TryDispatchMovementType<K, uint64_t>, // 8B TryDispatchMovementType<K, uint32_t>, // 4B TryDispatchMovementType<K, uint16_t>, // 2B TryDispatchMovementType<K, uint8_t>, // 1B }; for (size_t i = 0; i < sizeof(funcs) / sizeof(funcs[0]); ++i) { if (funcs[i](stream, outer_dim_size, gather_dim_size, inner_dim_size, num_indices, offset, indices, in, out)) { break; } } } } // namespace template<typename T, typename K> struct GatherKernelUtilImpl<DeviceType::kCUDA, T, K> final { static void Forward(ep::Stream* stream, const K* indices, int64_t num_indices, const T* in, const Shape& flat_in_shape, T* out, const int64_t offset) { DispatchMovementSize(stream, flat_in_shape.At(0), flat_in_shape.At(1), flat_in_shape.At(2) * sizeof(T), num_indices, offset, indices, in, out); } }; #define INITIATE_GATHER_KERNEL_UTIL_CUDA_IMPL(in_type_pair, index_type_pair) \ template struct GatherKernelUtilImpl<DeviceType::kCUDA, OF_PP_PAIR_FIRST(in_type_pair), \ OF_PP_PAIR_FIRST(index_type_pair)>; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_GATHER_KERNEL_UTIL_CUDA_IMPL, GATHER_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ, GATHER_INDEX_TYPE_SEQ); #if CUDA_VERSION >= 11000 OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_GATHER_KERNEL_UTIL_CUDA_IMPL, OF_PP_MAKE_TUPLE_SEQ(nv_bfloat16, DataType::kBFloat16), GATHER_INDEX_TYPE_SEQ); #endif #undef INITIATE_GATHER_KERNEL_UTIL_CUDA_IMPL } // namespace oneflow
9696cedb52fd3908328cc426bbe8abd7d2d64e16.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
9696cedb52fd3908328cc426bbe8abd7d2d64e16.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
1a6bdd8336f7edba5ee69817d8c40d57b2fd34b1.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <CudaMathEngine.h> #include <CudaCommon.h> #include <CudaAssert.h> #include <CudaDevice.h> #include <CublasFunctions.h> #include <MathEngineCommon.h> #include <MemoryHandleInternal.h> #include <hip/hip_runtime_api.h> namespace NeoML { void CCudaMathEngine::VectorDotProduct(const CConstFloatHandle& firstHandle, const CConstFloatHandle& secondHandle, int vectorSize, const CFloatHandle& resultHandle) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sdot( cublasHandle, vectorSize, GetRaw( firstHandle ), 1, GetRaw( secondHandle ), 1, GetRaw( resultHandle ) ) ); } void CCudaMathEngine::VectorMultiplyAndAdd( const CConstFloatHandle& firstHandle, const CConstFloatHandle& secondHandle, const CFloatHandle& resultHandle, int vectorSize, const CConstFloatHandle& multHandle ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( multHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); const float* first = GetRaw( firstHandle ); const float* second = GetRaw( secondHandle ); float* result = GetRaw( resultHandle ); const float* mult = GetRaw( multHandle ); if( result != first ) { ASSERT_CUDA( hipMemcpy( result, first, vectorSize * sizeof( float ), hipMemcpyDeviceToDevice ) ); } ASSERT_CUBLAS( cublas->Saxpy( cublasHandle, vectorSize, mult, second, 1, result, 1 ) ); } void CCudaMathEngine::MultiplyMatrixByTransposedMatrix( const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondHeight, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstZero, GetRaw( resultHandle ), resultRowSize ) ); } void CCudaMathEngine::MultiplyMatrixByTransposedMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondHeight, const CFloatHandle& resultHandle, int resultBufferSize ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), firstWidth, firstWidth * secondHeight, GetRaw( firstHandle ), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw( resultHandle ), secondHeight, secondHeight * firstHeight, batchSize ) ); } void CCudaMathEngine::MultiplyTransposedMatrixByMatrixAndAdd( const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondWidth, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, secondWidth, firstWidth, firstHeight, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstOne, GetRaw( resultHandle ), resultRowSize ) ); } void CCudaMathEngine::MultiplyTransposedMatrixByMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, secondWidth, firstWidth, firstHeight, cudaConstOne, GetRaw(secondHandle), secondWidth, firstHeight * secondWidth, GetRaw(firstHandle), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw(resultHandle), secondWidth, firstWidth * secondWidth, batchSize ) ); } void CCudaMathEngine::MultiplyMatrixByMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); if( batchSize == 1 ) { ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, secondWidth, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondWidth, GetRaw( firstHandle ), firstWidth, cudaConstZero, GetRaw( resultHandle ), secondWidth ) ); } else { ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, secondWidth, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondWidth, firstWidth * secondWidth, GetRaw( firstHandle ), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw( resultHandle ), secondWidth, secondWidth * firstHeight, batchSize ) ); } } void CCudaMathEngine::multiplyMatrixByTransposedMatrixAndAdd(const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondHeight, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize) { SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstOne, GetRaw( resultHandle ), resultRowSize ) ); } void CCudaMathEngine::MultiplyMatrixByDiagMatrix(const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, const CFloatHandle& resultHandle, int) { if( firstHeight == 1 ) { VectorEltwiseMultiply( firstHandle, secondHandle, resultHandle, firstWidth ); return; } else if( firstWidth == 1 ) { VectorMultiply( firstHandle, resultHandle, firstHeight, secondHandle ); return; } ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sdgmm( cublasHandle, HIPBLAS_SIDE_LEFT, firstWidth, firstHeight, GetRaw( firstHandle ), firstWidth, GetRaw( secondHandle ), 1, GetRaw( resultHandle ), firstWidth ) ); } } // namespace NeoML #endif // NEOML_USE_CUDA
1a6bdd8336f7edba5ee69817d8c40d57b2fd34b1.cu
/* Copyright © 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <CudaMathEngine.h> #include <CudaCommon.h> #include <CudaAssert.h> #include <CudaDevice.h> #include <CublasFunctions.h> #include <MathEngineCommon.h> #include <MemoryHandleInternal.h> #include <cuda_runtime_api.h> namespace NeoML { void CCudaMathEngine::VectorDotProduct(const CConstFloatHandle& firstHandle, const CConstFloatHandle& secondHandle, int vectorSize, const CFloatHandle& resultHandle) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sdot( cublasHandle, vectorSize, GetRaw( firstHandle ), 1, GetRaw( secondHandle ), 1, GetRaw( resultHandle ) ) ); } void CCudaMathEngine::VectorMultiplyAndAdd( const CConstFloatHandle& firstHandle, const CConstFloatHandle& secondHandle, const CFloatHandle& resultHandle, int vectorSize, const CConstFloatHandle& multHandle ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); ASSERT_EXPR( multHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); const float* first = GetRaw( firstHandle ); const float* second = GetRaw( secondHandle ); float* result = GetRaw( resultHandle ); const float* mult = GetRaw( multHandle ); if( result != first ) { ASSERT_CUDA( cudaMemcpy( result, first, vectorSize * sizeof( float ), cudaMemcpyDeviceToDevice ) ); } ASSERT_CUBLAS( cublas->Saxpy( cublasHandle, vectorSize, mult, second, 1, result, 1 ) ); } void CCudaMathEngine::MultiplyMatrixByTransposedMatrix( const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondHeight, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstZero, GetRaw( resultHandle ), resultRowSize ) ); } void CCudaMathEngine::MultiplyMatrixByTransposedMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondHeight, const CFloatHandle& resultHandle, int resultBufferSize ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), firstWidth, firstWidth * secondHeight, GetRaw( firstHandle ), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw( resultHandle ), secondHeight, secondHeight * firstHeight, batchSize ) ); } void CCudaMathEngine::MultiplyTransposedMatrixByMatrixAndAdd( const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondWidth, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, secondWidth, firstWidth, firstHeight, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstOne, GetRaw( resultHandle ), resultRowSize ) ); } void CCudaMathEngine::MultiplyTransposedMatrixByMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, secondWidth, firstWidth, firstHeight, cudaConstOne, GetRaw(secondHandle), secondWidth, firstHeight * secondWidth, GetRaw(firstHandle), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw(resultHandle), secondWidth, firstWidth * secondWidth, batchSize ) ); } void CCudaMathEngine::MultiplyMatrixByMatrix( int batchSize, const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, int secondWidth, const CFloatHandle& resultHandle, int ) { ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); if( batchSize == 1 ) { ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, secondWidth, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondWidth, GetRaw( firstHandle ), firstWidth, cudaConstZero, GetRaw( resultHandle ), secondWidth ) ); } else { ASSERT_CUBLAS( cublas->SgemmStridedBatched( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, secondWidth, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondWidth, firstWidth * secondWidth, GetRaw( firstHandle ), firstWidth, firstHeight * firstWidth, cudaConstZero, GetRaw( resultHandle ), secondWidth, secondWidth * firstHeight, batchSize ) ); } } void CCudaMathEngine::multiplyMatrixByTransposedMatrixAndAdd(const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, int firstRowSize, const CConstFloatHandle& secondHandle, int secondHeight, int secondRowSize, const CFloatHandle& resultHandle, int resultRowSize) { SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sgemm( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, secondHeight, firstHeight, firstWidth, cudaConstOne, GetRaw( secondHandle ), secondRowSize, GetRaw( firstHandle ), firstRowSize, cudaConstOne, GetRaw( resultHandle ), resultRowSize ) ); } void CCudaMathEngine::MultiplyMatrixByDiagMatrix(const CConstFloatHandle& firstHandle, int firstHeight, int firstWidth, const CConstFloatHandle& secondHandle, const CFloatHandle& resultHandle, int) { if( firstHeight == 1 ) { VectorEltwiseMultiply( firstHandle, secondHandle, resultHandle, firstWidth ); return; } else if( firstWidth == 1 ) { VectorMultiply( firstHandle, resultHandle, firstHeight, secondHandle ); return; } ASSERT_EXPR( firstHandle.GetMathEngine() == this ); ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); SetCudaDevice( device->DeviceNumber ); ASSERT_CUBLAS( cublas->Sdgmm( cublasHandle, CUBLAS_SIDE_LEFT, firstWidth, firstHeight, GetRaw( firstHandle ), firstWidth, GetRaw( secondHandle ), 1, GetRaw( resultHandle ), firstWidth ) ); } } // namespace NeoML #endif // NEOML_USE_CUDA
3034225a2aad2313e5c5186e8e05ccb26565c3fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Mary Barker HW 10 Ray Tracing with constant memory to compile and run: nvcc Barker10.cu -lm -lGL -lGLU -lglut ./a.out */ #include <GL/glut.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #define INF 2e10f #define SPHERES 20 #define rnd( x ) (x * rand() / RAND_MAX) #define MIN(x,y) (x< y) ? x : y #define xmin -50 #define xmax 50 #define ymin -50 #define ymax 50 struct Sphere { float r, g, b; float x, y, z; float radius; }; Sphere s[SPHERES]; __constant__ Sphere GPUs[SPHERES]; // arrays to hold pixels float * pixels = NULL; float * GPUpixels = NULL; //thread format for screen display unsigned int window_width = 1024; unsigned int window_height = 1024; float stepSizeX = (xmax - xmin)/((float)window_width - 1.0); float stepSizeY = (ymax - ymin)/((float)window_height - 1.0); dim3 nthreads = MIN(window_width, 1024); dim3 nblocks = (window_width*window_height - 1) / nthreads.x + 1; __device__ float hit(float x, float y, float z, float radius, float ox, float oy, float *n ) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf( radius * radius - dx*dx - dy*dy ); *n = dz / sqrtf( radius * radius ); return dz + z; } return -INF; } __global__ void trace_rays(float * pix, float dx, float dy, int nx, int ny) { int i = blockDim.x * blockIdx.x + threadIdx.x; float xx, yy, maxz=-INF; if(i < nx * ny) { float rr = 0, gg = 0, bb = 0; xx = (xmin + threadIdx.x * dx); yy = (ymin + blockIdx.x * dy); for(int j = 0; j < SPHERES; j++){ float n, t = hit(GPUs[j].x, GPUs[j].y, GPUs[j].z, GPUs[j].radius, xx, yy, &n); if(t > maxz){ rr = n * GPUs[j].r; gg = n * GPUs[j].g; bb = n * GPUs[j].b; maxz = t; } } pix[3*i+0] = rr; pix[3*i+1] = gg; pix[3*i+2] = bb; } } void allocate_memory() { pixels = (float*)malloc(3*window_width*window_height * sizeof(float)); hipMalloc(&GPUpixels, 3*window_width*window_height * sizeof(float)); for(int i = 0; i < SPHERES; i++){ s[i].x = rnd(100.0f) - 50; s[i].y = rnd(100.0f) - 50; s[i].z = rnd(100.0f) - 50; s[i].r = rnd(1.0f); s[i].g = rnd(1.0f); s[i].b = rnd(1.0f); s[i].radius = rnd(10.0f) + 2; } hipMemcpyToSymbol(GPUs, s, SPHERES*sizeof(Sphere)); } void display(void) { allocate_memory(); hipLaunchKernelGGL(( trace_rays), dim3(nblocks),dim3(nthreads), 0, 0, GPUpixels, stepSizeX, stepSizeY, window_width, window_height); hipMemcpy(pixels, GPUpixels, 3*window_width*window_height*sizeof(float), hipMemcpyDeviceToHost); glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixels); glFlush(); } int main(int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Fractals man, fractals."); glutDisplayFunc(display); glutMainLoop(); }
3034225a2aad2313e5c5186e8e05ccb26565c3fa.cu
/* Mary Barker HW 10 Ray Tracing with constant memory to compile and run: nvcc Barker10.cu -lm -lGL -lGLU -lglut ./a.out */ #include <GL/glut.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #define INF 2e10f #define SPHERES 20 #define rnd( x ) (x * rand() / RAND_MAX) #define MIN(x,y) (x< y) ? x : y #define xmin -50 #define xmax 50 #define ymin -50 #define ymax 50 struct Sphere { float r, g, b; float x, y, z; float radius; }; Sphere s[SPHERES]; __constant__ Sphere GPUs[SPHERES]; // arrays to hold pixels float * pixels = NULL; float * GPUpixels = NULL; //thread format for screen display unsigned int window_width = 1024; unsigned int window_height = 1024; float stepSizeX = (xmax - xmin)/((float)window_width - 1.0); float stepSizeY = (ymax - ymin)/((float)window_height - 1.0); dim3 nthreads = MIN(window_width, 1024); dim3 nblocks = (window_width*window_height - 1) / nthreads.x + 1; __device__ float hit(float x, float y, float z, float radius, float ox, float oy, float *n ) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf( radius * radius - dx*dx - dy*dy ); *n = dz / sqrtf( radius * radius ); return dz + z; } return -INF; } __global__ void trace_rays(float * pix, float dx, float dy, int nx, int ny) { int i = blockDim.x * blockIdx.x + threadIdx.x; float xx, yy, maxz=-INF; if(i < nx * ny) { float rr = 0, gg = 0, bb = 0; xx = (xmin + threadIdx.x * dx); yy = (ymin + blockIdx.x * dy); for(int j = 0; j < SPHERES; j++){ float n, t = hit(GPUs[j].x, GPUs[j].y, GPUs[j].z, GPUs[j].radius, xx, yy, &n); if(t > maxz){ rr = n * GPUs[j].r; gg = n * GPUs[j].g; bb = n * GPUs[j].b; maxz = t; } } pix[3*i+0] = rr; pix[3*i+1] = gg; pix[3*i+2] = bb; } } void allocate_memory() { pixels = (float*)malloc(3*window_width*window_height * sizeof(float)); cudaMalloc(&GPUpixels, 3*window_width*window_height * sizeof(float)); for(int i = 0; i < SPHERES; i++){ s[i].x = rnd(100.0f) - 50; s[i].y = rnd(100.0f) - 50; s[i].z = rnd(100.0f) - 50; s[i].r = rnd(1.0f); s[i].g = rnd(1.0f); s[i].b = rnd(1.0f); s[i].radius = rnd(10.0f) + 2; } cudaMemcpyToSymbol(GPUs, s, SPHERES*sizeof(Sphere)); } void display(void) { allocate_memory(); trace_rays<<<nblocks,nthreads>>>(GPUpixels, stepSizeX, stepSizeY, window_width, window_height); cudaMemcpy(pixels, GPUpixels, 3*window_width*window_height*sizeof(float), cudaMemcpyDeviceToHost); glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixels); glFlush(); } int main(int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Fractals man, fractals."); glutDisplayFunc(display); glutMainLoop(); }
32116a12f8cb6ef7a6e3f83de6a2297b4c78f4a7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <chrono> #include <random> #include <hip/hip_runtime_api.h> #include <cudf/stream_compaction.hpp> #include <utilities/error_utils.hpp> #include <tests/utilities/column_wrapper.cuh> template <typename T> T random_int(T min, T max) { static unsigned seed = 13377331; static std::mt19937 engine{seed}; static std::uniform_int_distribution<T> uniform{min, max}; return uniform(engine); } // default implementation template <typename T> struct TypeName { static const char* Get() { return typeid(T).name(); } }; gdf_dtype type_from_name(const std::string &name) { if (name == "a") return GDF_INT8; else if (name == "s") return GDF_INT16; else if (name == "i") return GDF_INT32; else if (name == "l") return GDF_INT64; else if (name == "f") return GDF_FLOAT32; else if (name == "d") return GDF_FLOAT64; else return N_GDF_TYPES; } const char* name_from_type(gdf_dtype type) { switch (type) { case GDF_INT8: return "GDF_INT8"; case GDF_INT16: return "GDF_INT16"; case GDF_INT32: return "GDF_INT32"; case GDF_INT64: return "GDF_INT64"; case GDF_FLOAT32: return "GDF_FLOAT32"; case GDF_FLOAT64: return "GDF_FLOAT64"; default: return "GDF_INVALID"; } } struct warmup { template <typename T, typename Init, typename Bench> void operator()(Init init, Bench bench, int fraction=100) { auto columns = init(T{0}, fraction); bench(columns.first, columns.second); } }; struct benchmark { template <typename T, typename Init, typename Bench> void operator()(Init init, Bench bench, int iters = 100, int fraction=100, bool shmoo=false) { auto columns = init(T{0}, fraction); bench(columns.first, columns.second); // warm up hipProfilerStart(); auto start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < iters; ++i) { bench(columns.first, columns.second); } hipDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> diff = end-start; hipProfilerStop(); std::cout << diff.count() / iters << (shmoo ? "," : " s") << std::flush; } }; template <typename Init, typename Bench> void benchmark_types(gdf_dtype type, Init init, Bench bench, int iters = 100, int pct_step=100, bool shmoo = false) { std::vector<gdf_dtype> types{}; if (type == N_GDF_TYPES) types = {GDF_INT8, GDF_INT16, GDF_INT32, GDF_INT64, GDF_FLOAT32, GDF_FLOAT64}; else types = {type}; for (gdf_dtype t : types) { cudf::type_dispatcher(t, warmup(), init, bench); std::cout << name_from_type(t) << ","; if (shmoo) { for (int fraction = 0; fraction <= 100; fraction += pct_step) cudf::type_dispatcher(t, benchmark(), init, bench, iters, fraction, shmoo); } else { cudf::type_dispatcher(t, benchmark(), init, bench, iters, 50); } std::cout << "\n"; } } // Shmoo the fraction of true elements that are masked from 0 to 100 template <typename Bench> void benchmark_fraction_shmoo(gdf_dtype type, Bench bench, gdf_size_type column_size, int pct_step = 5, int iters = 100) { gdf_size_type fraction = 0; auto init_fraction_true = [column_size](auto a, int fraction) { using TypeParam = decltype(a); cudf::test::column_wrapper<TypeParam> source( column_size, [](gdf_index_type row) { return TypeParam(row); }, [](gdf_index_type row) { return true; }); cudf::test::column_wrapper<cudf::bool8> mask( column_size, [&](gdf_index_type row) { return cudf::bool8{random_int(0, 100) < fraction}; }, [](gdf_index_type row) { return true; }); return std::make_pair(source, mask); }; std::cout << "type,"; for (fraction = 0; fraction <= 100; fraction += pct_step) std::cout << fraction << ","; std::cout << "\n"; benchmark_types(type, init_fraction_true, bench, iters, pct_step, true); } int main(int argc, char **argv) { gdf_size_type column_size{42000000}; int iters{100}; int index = -1; // all benches gdf_dtype type = N_GDF_TYPES; bool shmoo = false; if (argc > 1) column_size = std::stoi(argv[1]); if (argc > 2) iters = std::stoi(argv[2]); if (argc > 3) { shmoo = (std::string(argv[3]) == "shmoo"); if (!shmoo) index = std::stoi(argv[3]); } if (argc > 4) type = type_from_name(argv[4]); rmmOptions_t options{PoolAllocation, 0, false}; rmmInitialize(&options); auto bench = [](gdf_column const& source, gdf_column const& mask) { gdf_column result = cudf::apply_boolean_mask(source, mask); gdf_column_free(&result); }; if (!shmoo) { if (index == -1 || index == 0) { auto init = [column_size](auto a, int) { using TypeParam = decltype(a); cudf::test::column_wrapper<TypeParam> source( column_size, [](gdf_index_type row) { return TypeParam(row); }, [](gdf_index_type row) { return row % 2 == 0; }); cudf::test::column_wrapper<cudf::bool8> mask( column_size, [](gdf_index_type row) { return cudf::bool8{true}; }, [](gdf_index_type row) { return row % 2 == 1; }); return std::make_pair(source, mask); }; std::cout << "With null masks: Avg time to apply_boolean_mask for " << column_size << " elements:\n"; benchmark_types(type, init, bench, iters); } if (index == -1 || index == 1) { auto init_no_null = [column_size](auto a, int) { using TypeParam = decltype(a); cudf::test::column_wrapper<TypeParam> source(column_size, false); cudf::test::column_wrapper<cudf::bool8> mask( column_size, [](gdf_index_type row) { return cudf::bool8{true}; }, [](gdf_index_type row) { return row % 2 == 1; }); cudf::test::column_wrapper<TypeParam> output(column_size, false); return std::make_pair(source, mask); }; std::cout << "Without null masks: Avg time to apply_boolean_mask for " << column_size << " elements:\n"; benchmark_types(type, init_no_null, bench, iters); } if (index == -1 || index == 2) { auto init_all_false_mask = [column_size](auto a, int) { using TypeParam = decltype(a); cudf::test::column_wrapper<TypeParam> source( column_size, [](gdf_index_type row) { return TypeParam(row); }, [](gdf_index_type row) { return row % 2 == 0; }); cudf::test::column_wrapper<cudf::bool8> mask( column_size, [](gdf_index_type row) { return cudf::bool8{false}; }, [](gdf_index_type row) { return row % 2 == 1; }); return std::make_pair(source, mask); }; std::cout << "All false mask: Avg time to apply_boolean_mask for " << column_size << " elements:\n"; benchmark_types(type, init_all_false_mask, bench, iters); } } else { // shmoo benchmark_fraction_shmoo(type, bench, column_size, 5, 100); } rmmFinalize(); return 0; }
32116a12f8cb6ef7a6e3f83de6a2297b4c78f4a7.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <chrono> #include <random> #include <cuda_profiler_api.h> #include <cudf/stream_compaction.hpp> #include <utilities/error_utils.hpp> #include <tests/utilities/column_wrapper.cuh> template <typename T> T random_int(T min, T max) { static unsigned seed = 13377331; static std::mt19937 engine{seed}; static std::uniform_int_distribution<T> uniform{min, max}; return uniform(engine); } // default implementation template <typename T> struct TypeName { static const char* Get() { return typeid(T).name(); } }; gdf_dtype type_from_name(const std::string &name) { if (name == "a") return GDF_INT8; else if (name == "s") return GDF_INT16; else if (name == "i") return GDF_INT32; else if (name == "l") return GDF_INT64; else if (name == "f") return GDF_FLOAT32; else if (name == "d") return GDF_FLOAT64; else return N_GDF_TYPES; } const char* name_from_type(gdf_dtype type) { switch (type) { case GDF_INT8: return "GDF_INT8"; case GDF_INT16: return "GDF_INT16"; case GDF_INT32: return "GDF_INT32"; case GDF_INT64: return "GDF_INT64"; case GDF_FLOAT32: return "GDF_FLOAT32"; case GDF_FLOAT64: return "GDF_FLOAT64"; default: return "GDF_INVALID"; } } struct warmup { template <typename T, typename Init, typename Bench> void operator()(Init init, Bench bench, int fraction=100) { auto columns = init(T{0}, fraction); bench(columns.first, columns.second); } }; struct benchmark { template <typename T, typename Init, typename Bench> void operator()(Init init, Bench bench, int iters = 100, int fraction=100, bool shmoo=false) { auto columns = init(T{0}, fraction); bench(columns.first, columns.second); // warm up cudaProfilerStart(); auto start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < iters; ++i) { bench(columns.first, columns.second); } cudaDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> diff = end-start; cudaProfilerStop(); std::cout << diff.count() / iters << (shmoo ? "," : " s") << std::flush; } }; template <typename Init, typename Bench> void benchmark_types(gdf_dtype type, Init init, Bench bench, int iters = 100, int pct_step=100, bool shmoo = false) { std::vector<gdf_dtype> types{}; if (type == N_GDF_TYPES) types = {GDF_INT8, GDF_INT16, GDF_INT32, GDF_INT64, GDF_FLOAT32, GDF_FLOAT64}; else types = {type}; for (gdf_dtype t : types) { cudf::type_dispatcher(t, warmup(), init, bench); std::cout << name_from_type(t) << ","; if (shmoo) { for (int fraction = 0; fraction <= 100; fraction += pct_step) cudf::type_dispatcher(t, benchmark(), init, bench, iters, fraction, shmoo); } else { cudf::type_dispatcher(t, benchmark(), init, bench, iters, 50); } std::cout << "\n"; } } // Shmoo the fraction of true elements that are masked from 0 to 100 template <typename Bench> void benchmark_fraction_shmoo(gdf_dtype type, Bench bench, gdf_size_type column_size, int pct_step = 5, int iters = 100) { gdf_size_type fraction = 0; auto init_fraction_true = [column_size](auto a, int fraction) { using TypeParam = decltype(a); cudf::test::column_wrapper<TypeParam> source( column_size, [](gdf_index_type row) { return TypeParam(row); }, [](gdf_index_type row) { return true; }); cudf::test::column_wrapper<cudf::bool8> mask( column_size, [&](gdf_index_type row) { return cudf::bool8{random_int(0, 100) < fraction}; }, [](gdf_index_type row) { return true; }); return std::make_pair(source, mask); }; std::cout << "type,"; for (fraction = 0; fraction <= 100; fraction += pct_step) std::cout << fraction << ","; std::cout << "\n"; benchmark_types(type, init_fraction_true, bench, iters, pct_step, true); } int main(int argc, char **argv) { gdf_size_type column_size{42000000}; int iters{100}; int index = -1; // all benches gdf_dtype type = N_GDF_TYPES; bool shmoo = false; if (argc > 1) column_size = std::stoi(argv[1]); if (argc > 2) iters = std::stoi(argv[2]); if (argc > 3) { shmoo = (std::string(argv[3]) == "shmoo"); if (!shmoo) index = std::stoi(argv[3]); } if (argc > 4) type = type_from_name(argv[4]); rmmOptions_t options{PoolAllocation, 0, false}; rmmInitialize(&options); auto bench = [](gdf_column const& source, gdf_column const& mask) { gdf_column result = cudf::apply_boolean_mask(source, mask); gdf_column_free(&result); }; if (!shmoo) { if (index == -1 || index == 0) { auto init = [column_size](auto a, int) { using TypeParam = decltype(a); cudf::test::column_wrapper<TypeParam> source( column_size, [](gdf_index_type row) { return TypeParam(row); }, [](gdf_index_type row) { return row % 2 == 0; }); cudf::test::column_wrapper<cudf::bool8> mask( column_size, [](gdf_index_type row) { return cudf::bool8{true}; }, [](gdf_index_type row) { return row % 2 == 1; }); return std::make_pair(source, mask); }; std::cout << "With null masks: Avg time to apply_boolean_mask for " << column_size << " elements:\n"; benchmark_types(type, init, bench, iters); } if (index == -1 || index == 1) { auto init_no_null = [column_size](auto a, int) { using TypeParam = decltype(a); cudf::test::column_wrapper<TypeParam> source(column_size, false); cudf::test::column_wrapper<cudf::bool8> mask( column_size, [](gdf_index_type row) { return cudf::bool8{true}; }, [](gdf_index_type row) { return row % 2 == 1; }); cudf::test::column_wrapper<TypeParam> output(column_size, false); return std::make_pair(source, mask); }; std::cout << "Without null masks: Avg time to apply_boolean_mask for " << column_size << " elements:\n"; benchmark_types(type, init_no_null, bench, iters); } if (index == -1 || index == 2) { auto init_all_false_mask = [column_size](auto a, int) { using TypeParam = decltype(a); cudf::test::column_wrapper<TypeParam> source( column_size, [](gdf_index_type row) { return TypeParam(row); }, [](gdf_index_type row) { return row % 2 == 0; }); cudf::test::column_wrapper<cudf::bool8> mask( column_size, [](gdf_index_type row) { return cudf::bool8{false}; }, [](gdf_index_type row) { return row % 2 == 1; }); return std::make_pair(source, mask); }; std::cout << "All false mask: Avg time to apply_boolean_mask for " << column_size << " elements:\n"; benchmark_types(type, init_all_false_mask, bench, iters); } } else { // shmoo benchmark_fraction_shmoo(type, bench, column_size, 5, 100); } rmmFinalize(); return 0; }
74fb3d1bdf1ab836b8652e93475759b89bf70e20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<math.h> #include<cuda.h> #define TILE_WIDTH 32 #define printError(func) \ { \ hipError_t E = func; \ if(E != hipSuccess) \ { \ printf( "\nError at line: %d ", __LINE__); \ printf( "\nError: %s ", hipGetErrorString(E)); \ } \ } \ __global__ void MatrixMult(int m, int n, int k, int *A, int *B, int *C) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if(Row<m && Col<k) { int sum = 0; for(int i=0;i<n;i++) { sum = sum + (A[Row*n + i] * B[k*i + Col]); } C[Row*k + Col] = sum; } } int checkProd(int m, int n, int k, int *A, int *B, int*C) { for(int row= 0;row<m;row++) { for(int col=0;col<k;col++) { int sum=0; for(int i=0;i<n;i++) { sum = sum + A[row*n + i] * B[col + i*k]; } if(C[row*k + col] != sum) return 0; } } return 1; } int main() { int *A; int *B; int *C; int *deviceA; int *deviceB; int *deviceC; // Matrix A of size (m,n) and Matrix B of size (n,k) int m = 1024; int n = 512; int k = 1024; A = (int *)malloc(m * n * sizeof(int)); B = (int *)malloc(n * k * sizeof(int)); C = (int *)malloc(m * k * sizeof(int)); for(int i=0;i<m*n;i++) { A[i] = rand()%10; //printf("%d ",A[i]); } printf("\n"); for(int i=0;i<n*k;i++) { B[i] = rand()%10; //printf("%d ",B[i]); } printf("\n"); printError(hipMalloc((void **)&deviceA, m * n * sizeof(int))); printError(hipMalloc((void **)&deviceB, n * k * sizeof(int))); printError(hipMalloc((void **)&deviceC, m * k * sizeof(int))); hipMemcpy(deviceA, A, m * n * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceB, B, n * k * sizeof(int), hipMemcpyHostToDevice); dim3 dimGrid((k-1)/TILE_WIDTH+1, (m-1)/TILE_WIDTH+1,1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); //dim3 dimGrid(32,32,1); //dim3 dimBlock(32,32,1); hipLaunchKernelGGL(( MatrixMult), dim3(dimGrid), dim3(dimBlock), 0, 0, m,n,k,deviceA,deviceB,deviceC); hipMemcpy(C, deviceC, m * k * sizeof(float), hipMemcpyDeviceToHost); for(int i=0;i<m*k;i++) printf("%d ",C[i]); if(checkProd(m, n, k, A, B, C)) printf("\nResult of matrix multiplication is correct\n"); else printf("\nResult of matrix multiplication is wrong\n"); hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); free(A); free(B); free(C); }
74fb3d1bdf1ab836b8652e93475759b89bf70e20.cu
#include<stdio.h> #include<math.h> #include<cuda.h> #define TILE_WIDTH 32 #define printError(func) \ { \ cudaError_t E = func; \ if(E != cudaSuccess) \ { \ printf( "\nError at line: %d ", __LINE__); \ printf( "\nError: %s ", cudaGetErrorString(E)); \ } \ } \ __global__ void MatrixMult(int m, int n, int k, int *A, int *B, int *C) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if(Row<m && Col<k) { int sum = 0; for(int i=0;i<n;i++) { sum = sum + (A[Row*n + i] * B[k*i + Col]); } C[Row*k + Col] = sum; } } int checkProd(int m, int n, int k, int *A, int *B, int*C) { for(int row= 0;row<m;row++) { for(int col=0;col<k;col++) { int sum=0; for(int i=0;i<n;i++) { sum = sum + A[row*n + i] * B[col + i*k]; } if(C[row*k + col] != sum) return 0; } } return 1; } int main() { int *A; int *B; int *C; int *deviceA; int *deviceB; int *deviceC; // Matrix A of size (m,n) and Matrix B of size (n,k) int m = 1024; int n = 512; int k = 1024; A = (int *)malloc(m * n * sizeof(int)); B = (int *)malloc(n * k * sizeof(int)); C = (int *)malloc(m * k * sizeof(int)); for(int i=0;i<m*n;i++) { A[i] = rand()%10; //printf("%d ",A[i]); } printf("\n"); for(int i=0;i<n*k;i++) { B[i] = rand()%10; //printf("%d ",B[i]); } printf("\n"); printError(cudaMalloc((void **)&deviceA, m * n * sizeof(int))); printError(cudaMalloc((void **)&deviceB, n * k * sizeof(int))); printError(cudaMalloc((void **)&deviceC, m * k * sizeof(int))); cudaMemcpy(deviceA, A, m * n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceB, B, n * k * sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid((k-1)/TILE_WIDTH+1, (m-1)/TILE_WIDTH+1,1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); //dim3 dimGrid(32,32,1); //dim3 dimBlock(32,32,1); MatrixMult<<<dimGrid, dimBlock>>>(m,n,k,deviceA,deviceB,deviceC); cudaMemcpy(C, deviceC, m * k * sizeof(float), cudaMemcpyDeviceToHost); for(int i=0;i<m*k;i++) printf("%d ",C[i]); if(checkProd(m, n, k, A, B, C)) printf("\nResult of matrix multiplication is correct\n"); else printf("\nResult of matrix multiplication is wrong\n"); cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); free(A); free(B); free(C); }
150349e5d538183adb0665778f19ce985d7fdafc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // DiffusionSolver.cu // // Copyright (C) 2013 by University of Stuttgart (VISUS). // All rights reserved. // // Created on : Sep 16, 2013 // Author : scharnkn // #include "stdafx.h" #include "DiffusionSolver.h" #include "cuda_error_check.h" #include "CUDAGrid.cuh" #include "helper_cuda.h" #include "helper_math.h" #include "mmcore/utility/log/Log.h" using namespace megamol; using namespace megamol::protein_cuda; typedef unsigned int uint; //#define USE_CUDA_TIMER __constant__ __device__ float isoval_D; // Isovalue defining the level sets /* * DiffusionSolver_InitGVF_D */ __global__ void DiffusionSolver_InitGVF_D( const float *volTarget_D, const unsigned int *cellStatesTarget_D, float *gvfConstData_D) { const uint idx = __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) + threadIdx.x; int volsize = gridSize_D.x*gridSize_D.y*gridSize_D.z; if (idx >= volsize) return; int3 cellC; uint cellIdx; int activeTarget=0; int3 gridC = make_int3( idx % (gridSize_D.x), (idx / gridSize_D.x) % gridSize_D.y, (idx / gridSize_D.x) / gridSize_D.y); /* Check neighbor cells in target volume */ // (-1, -1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, 1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, 1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, -1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, -1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, 1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, 1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, -1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; /* Sample gradients */ float3 gradTarget; int3 x1, x2; x1 = make_int3(clamp(gridC.x+1, 0, gridSize_D.x-1), gridC.y, gridC.z); x2 = make_int3(clamp(gridC.x-1, 0, gridSize_D.x-1), gridC.y, gridC.z); gradTarget.x = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, clamp(gridC.y+1, 0, gridSize_D.y-1), gridC.z); x2 = make_int3(gridC.x, clamp(gridC.y-1, 0, gridSize_D.y-1), gridC.z); gradTarget.y = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, gridC.y, clamp(gridC.z+1, 0, gridSize_D.z-1)); x2 = make_int3(gridC.x, gridC.y, clamp(gridC.z-1, 0, gridSize_D.z-1)); gradTarget.z = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; float len = length(gradTarget); if (len > 0.0) gradTarget/= len; /* Extract cont data*/ // Compute len^2 len = gradTarget.x*gradTarget.x + gradTarget.y*gradTarget.y + gradTarget.z*gradTarget.z; // Write b to device memory gvfConstData_D[4*idx+0] = len; // Write c1, c2, and c3 to device memory gvfConstData_D[4*idx+1] = len*gradTarget.x; gvfConstData_D[4*idx+2] = len*gradTarget.y; gvfConstData_D[4*idx+3] = len*gradTarget.z; } /* * DiffusionSolver_InitTwoWayGVF_D */ __global__ void DiffusionSolver_InitTwoWayGVF_D( const float *volSource_D, const float *volTarget_D, const unsigned int *cellStatesSource_D, const unsigned int *cellStatesTarget_D, float *gvfConstData_D) { const uint idx = __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) + threadIdx.x; int volsize = gridSize_D.x*gridSize_D.y*gridSize_D.z; if (idx >= volsize) return; int3 cellC; uint cellIdx; int activeSource=0, activeTarget=0; int3 gridC = make_int3( idx % (gridSize_D.x), (idx / gridSize_D.x) % gridSize_D.y, (idx / gridSize_D.x) / gridSize_D.y); /* Check neighbor cells in source volume */ // (-1, -1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (-1, 1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (-1, 1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (-1, -1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (1, -1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (1, 1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (1, 1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (1, -1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; /* Check neighbor cells in target volume */ // (-1, -1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, 1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, 1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, -1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, -1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, 1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, 1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, -1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; /* Sample gradients */ float3 gradSource, gradTarget, gradFinal; int3 x1, x2; x1 = make_int3(clamp(gridC.x+1, 0, gridSize_D.x-1), gridC.y, gridC.z); x2 = make_int3(clamp(gridC.x-1, 0, gridSize_D.x-1), gridC.y, gridC.z); gradSource.x = volSource_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volSource_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, clamp(gridC.y+1, 0, gridSize_D.y-1), gridC.z); x2 = make_int3(gridC.x, clamp(gridC.y-1, 0, gridSize_D.y-1), gridC.z); gradSource.y = volSource_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volSource_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, gridC.y, clamp(gridC.z+1, 0, gridSize_D.z-1)); x2 = make_int3(gridC.x, gridC.y, clamp(gridC.z-1, 0, gridSize_D.z-1)); gradSource.z = volSource_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volSource_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; float len = length(gradSource); if (len > 0.0) gradSource/= len; x1 = make_int3(clamp(gridC.x+1, 0, gridSize_D.x-1), gridC.y, gridC.z); x2 = make_int3(clamp(gridC.x-1, 0, gridSize_D.x-1), gridC.y, gridC.z); gradTarget.x = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, clamp(gridC.y+1, 0, gridSize_D.y-1), gridC.z); x2 = make_int3(gridC.x, clamp(gridC.y-1, 0, gridSize_D.y-1), gridC.z); gradTarget.y = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, gridC.y, clamp(gridC.z+1, 0, gridSize_D.z-1)); x2 = make_int3(gridC.x, gridC.y, clamp(gridC.z-1, 0, gridSize_D.z-1)); gradTarget.z = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; len = length(gradTarget); if (len > 0.0) gradTarget/= len; /* Compute final gradient and extract const data*/ gradFinal = activeSource*(activeSource-0.5*activeTarget)*gradSource + activeTarget*(activeTarget-0.5*activeSource)*gradTarget; // Compute len^2 //len = gradFinal.x*gradFinal.x + gradFinal.y*gradFinal.y + gradFinal.z*gradFinal.z; len = float(activeSource||activeTarget); // Write b to device memory gvfConstData_D[4*idx+0] = len; // Write c1, c2, and c3 to device memory gvfConstData_D[4*idx+1] = len*gradFinal.x; gvfConstData_D[4*idx+2] = len*gradFinal.y; gvfConstData_D[4*idx+3] = len*gradFinal.z; } /* * DiffusionSolver_UpdateGVF_D */ __global__ void DiffusionSolver_UpdateGVF_D( float *gvfIn_D, float *gvfOut_D, float *gvfConstData_D, // b, c1, c2, c3 float scl) { const uint idx = __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) + threadIdx.x; uint volsize = gridSize_D.x*gridSize_D.y*gridSize_D.z; if (idx >= volsize) return; /// Get const data from global device memory /// float b = gvfConstData_D[4*idx+0]; float c1 = gvfConstData_D[4*idx+1]; float c2 = gvfConstData_D[4*idx+2]; float c3 = gvfConstData_D[4*idx+3]; float3 gvf, gvfOld, gvfAdj[6]; //uint idxAdj[6]; // Get grid coordinates int3 gridC = make_int3( idx % gridSize_D.x, (idx / gridSize_D.x) % gridSize_D.y, (idx / gridSize_D.x) / gridSize_D.y); /// Update isotropic diffusion for all vector components /// // Get adjacent gvf values gvfOld = make_float3(gvfIn_D[4*idx+0], gvfIn_D[4*idx+1], gvfIn_D[4*idx+2]); uint idxAdj = ::GetPosIdxByGridCoords(make_int3(clamp(int(gridC.x)-1, 0, int(gridSize_D.x-1)), gridC.y, gridC.z)); gvfAdj[0] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); idxAdj = ::GetPosIdxByGridCoords(make_int3(clamp(int(gridC.x)+1, 0, int(gridSize_D.x-1)), gridC.y, gridC.z)); gvfAdj[1] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); idxAdj = ::GetPosIdxByGridCoords(make_int3(gridC.x, uint(clamp(int(gridC.y)-1, 0, int(gridSize_D.y-1))), gridC.z)); gvfAdj[2] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); idxAdj = ::GetPosIdxByGridCoords(make_int3(gridC.x, uint(clamp(int(gridC.y)+1, 0, int(gridSize_D.y-1))), gridC.z)); gvfAdj[3] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); idxAdj = ::GetPosIdxByGridCoords(make_int3(gridC.x, gridC.y, uint(clamp(int(gridC.z)-1, 0, int(gridSize_D.z-1))))); gvfAdj[4] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); idxAdj = ::GetPosIdxByGridCoords(make_int3(gridC.x, gridC.y, uint(clamp(int(gridC.z)+1, 0, int(gridSize_D.z-1))))); gvfAdj[5] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); // // Calculate maximum time step to ensure conversion // float dt = gridDelta_D.x*gridDelta_D.y*gridDelta_D.z/(scl*6); // dt /= 2.0; // // // Compute diffusion // gvf.x = (1.0-b*dt)*gvfOld.x; // gvf.x += (gvfAdj[0].x + gvfAdj[1].x + gvfAdj[2].x + gvfAdj[3].x + // gvfAdj[4].x + gvfAdj[5].x -6*gvfOld.x)*scl; // gvf.x += c1*dt; // // gvf.y = (1.0-b*dt)*gvfOld.y; // gvf.y += (gvfAdj[0].y + gvfAdj[1].y + gvfAdj[2].y + gvfAdj[3].y + // gvfAdj[4].y + gvfAdj[5].y -6*gvfOld.y)*scl; // gvf.y += c2*dt; // // gvf.z = (1.0-b*dt)*gvfOld.z; // gvf.z += (gvfAdj[0].z + gvfAdj[1].z + gvfAdj[2].z + gvfAdj[3].z + // gvfAdj[4].z + gvfAdj[5].z -6*gvfOld.z)*scl; // gvf.z += c3*dt; // Calculate maximum time step to ensure conversion float minStep = min(gridDelta_D.z, min(gridDelta_D.x, gridDelta_D.y))/6.0; float dt = minStep*0.5; // Compute diffusion gvf.x = c1 + (1.0-b)*(gvfOld.x + dt*(gvfAdj[0].x + gvfAdj[1].x + gvfAdj[2].x + gvfAdj[3].x + gvfAdj[4].x + gvfAdj[5].x -6*gvfOld.x)); gvf.y = c2 + (1.0-b)*(gvfOld.y + dt*(gvfAdj[0].y + gvfAdj[1].y + gvfAdj[2].y + gvfAdj[3].y + gvfAdj[4].y + gvfAdj[5].y -6*gvfOld.y)); gvf.z = c3 + (1.0-b)*(gvfOld.z + dt*(gvfAdj[0].z + gvfAdj[1].z + gvfAdj[2].z + gvfAdj[3].z + gvfAdj[4].z + gvfAdj[5].z -6*gvfOld.z)); gvfOut_D[4*idx+0] = gvf.x; gvfOut_D[4*idx+1] = gvf.y; gvfOut_D[4*idx+2] = gvf.z; } /* * DiffusionSolver::CalcGVF */ bool DiffusionSolver::CalcGVF( const float *volTarget_D, float *gvfConstData_D, const unsigned int *cellStatesTarget_D, int3 volDim, float3 volOrg, float3 volDelta, float *gvfIn_D, float *gvfOut_D, unsigned int maxIt, float scl) { using namespace megamol::core::utility::log; int volsize = volDim.x*volDim.y*volDim.z; uint3 voldim = make_uint3(volDim.x, volDim.y, volDim.z); // Init constant device parameters if (!initGridParams(volDim, volOrg, volDelta)) { Log::DefaultLog.WriteMsg(Log::LEVEL_ERROR, "%s: could not init constant device params", DiffusionSolver::ClassName()); return false; } #ifdef USE_CUDA_TIMER float dt_ms; hipEvent_t event1, event2; hipEventCreate(&event1); hipEventCreate(&event2); hipEventRecord(event1, 0); #endif // Init diffusion by calculating cont data hipLaunchKernelGGL(( DiffusionSolver_InitGVF_D) , dim3(Grid(volsize, 256)), dim3(256) , 0, 0, volTarget_D, cellStatesTarget_D, gvfConstData_D); #ifdef USE_CUDA_TIMER hipEventRecord(event2, 0); hipEventSynchronize(event1); hipEventSynchronize(event2); hipEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time for 'initTwoWayGVF_D': %.10f sec\n", dt_ms/1000.0f); #endif for (unsigned int it=(maxIt%2); it < maxIt+(maxIt%2); ++it) { #ifdef USE_CUDA_TIMER hipEventRecord(event1, 0); #endif if (it%2 == 0) { // Update diffusion hipLaunchKernelGGL(( DiffusionSolver_UpdateGVF_D) , dim3(Grid(volsize, 256)), dim3(256) , 0, 0, gvfIn_D, gvfOut_D, gvfConstData_D, scl); if (hipGetLastError() != hipSuccess) { return false; } } else { // Update diffusion hipLaunchKernelGGL(( DiffusionSolver_UpdateGVF_D) , dim3(Grid(volsize, 256)), dim3(256) , 0, 0, gvfOut_D, gvfIn_D, gvfConstData_D, scl); if (hipGetLastError() != hipSuccess) { return false; } } #ifdef USE_CUDA_TIMER hipEventRecord(event2, 0); hipEventSynchronize(event1); hipEventSynchronize(event2); hipEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time for 'updateGVF_D': %.10f sec\n", dt_ms/1000.0f); #endif } return true; } /* * DiffusionSolver::CalcTwoWayGVF */ bool DiffusionSolver::CalcTwoWayGVF( const float *volSource_D, const float *volTarget_D, const unsigned int *cellStatesSource_D, const unsigned int *cellStatesTarget_D, int3 volDim, float3 volOrg, float3 volDelta, float *gvfConstData_D, float *gvfIn_D, float *gvfOut_D, unsigned int maxIt, float scl) { using namespace megamol::core::utility::log; int volsize = volDim.x*volDim.y*volDim.z; uint3 voldim = make_uint3(volDim.x, volDim.y, volDim.z); // Init constant device parameters if (!initGridParams(volDim, volOrg, volDelta)) { Log::DefaultLog.WriteMsg(Log::LEVEL_ERROR, "%s: could not init constant device params", DiffusionSolver::ClassName()); return false; } #ifdef USE_CUDA_TIMER float dt_ms; hipEvent_t event1, event2; hipEventCreate(&event1); hipEventCreate(&event2); hipEventRecord(event1, 0); #endif // Init diffusion by calculating cont data hipLaunchKernelGGL(( DiffusionSolver_InitTwoWayGVF_D) , dim3(Grid(volsize, 256)), dim3(256) , 0, 0, volSource_D, volTarget_D, cellStatesSource_D, cellStatesTarget_D, gvfConstData_D); #ifdef USE_CUDA_TIMER hipEventRecord(event2, 0); hipEventSynchronize(event1); hipEventSynchronize(event2); hipEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time for 'initTwoWayGVF_D': %.10f sec\n", dt_ms/1000.0f); hipEventRecord(event1, 0); #endif for (unsigned int it=(maxIt%2); it < maxIt+(maxIt%2); ++it) { #ifdef USE_CUDA_TIMER hipEventRecord(event1, 0); #endif if (it%2 == 0) { // Update diffusion hipLaunchKernelGGL(( DiffusionSolver_UpdateGVF_D) , dim3(Grid(volsize, 256)), dim3(256) , 0, 0, gvfIn_D, gvfOut_D, gvfConstData_D, scl); if (hipGetLastError() != hipSuccess) { return false; } } else { // Update diffusion hipLaunchKernelGGL(( DiffusionSolver_UpdateGVF_D) , dim3(Grid(volsize, 256)), dim3(256) , 0, 0, gvfOut_D, gvfIn_D, gvfConstData_D, scl); if (hipGetLastError() != hipSuccess) { return false; } } #ifdef USE_CUDA_TIMER hipEventRecord(event2, 0); hipEventSynchronize(event1); hipEventSynchronize(event2); hipEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time for 'updateGVF_D': %.10f sec\n", dt_ms/1000.0f); #endif } return true; }
150349e5d538183adb0665778f19ce985d7fdafc.cu
// // DiffusionSolver.cu // // Copyright (C) 2013 by University of Stuttgart (VISUS). // All rights reserved. // // Created on : Sep 16, 2013 // Author : scharnkn // #include "stdafx.h" #include "DiffusionSolver.h" #include "cuda_error_check.h" #include "CUDAGrid.cuh" #include "helper_cuda.h" #include "helper_math.h" #include "mmcore/utility/log/Log.h" using namespace megamol; using namespace megamol::protein_cuda; typedef unsigned int uint; //#define USE_CUDA_TIMER __constant__ __device__ float isoval_D; // Isovalue defining the level sets /* * DiffusionSolver_InitGVF_D */ __global__ void DiffusionSolver_InitGVF_D( const float *volTarget_D, const unsigned int *cellStatesTarget_D, float *gvfConstData_D) { const uint idx = __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) + threadIdx.x; int volsize = gridSize_D.x*gridSize_D.y*gridSize_D.z; if (idx >= volsize) return; int3 cellC; uint cellIdx; int activeTarget=0; int3 gridC = make_int3( idx % (gridSize_D.x), (idx / gridSize_D.x) % gridSize_D.y, (idx / gridSize_D.x) / gridSize_D.y); /* Check neighbor cells in target volume */ // (-1, -1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, 1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, 1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, -1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, -1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, 1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, 1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, -1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; /* Sample gradients */ float3 gradTarget; int3 x1, x2; x1 = make_int3(clamp(gridC.x+1, 0, gridSize_D.x-1), gridC.y, gridC.z); x2 = make_int3(clamp(gridC.x-1, 0, gridSize_D.x-1), gridC.y, gridC.z); gradTarget.x = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, clamp(gridC.y+1, 0, gridSize_D.y-1), gridC.z); x2 = make_int3(gridC.x, clamp(gridC.y-1, 0, gridSize_D.y-1), gridC.z); gradTarget.y = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, gridC.y, clamp(gridC.z+1, 0, gridSize_D.z-1)); x2 = make_int3(gridC.x, gridC.y, clamp(gridC.z-1, 0, gridSize_D.z-1)); gradTarget.z = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; float len = length(gradTarget); if (len > 0.0) gradTarget/= len; /* Extract cont data*/ // Compute len^2 len = gradTarget.x*gradTarget.x + gradTarget.y*gradTarget.y + gradTarget.z*gradTarget.z; // Write b to device memory gvfConstData_D[4*idx+0] = len; // Write c1, c2, and c3 to device memory gvfConstData_D[4*idx+1] = len*gradTarget.x; gvfConstData_D[4*idx+2] = len*gradTarget.y; gvfConstData_D[4*idx+3] = len*gradTarget.z; } /* * DiffusionSolver_InitTwoWayGVF_D */ __global__ void DiffusionSolver_InitTwoWayGVF_D( const float *volSource_D, const float *volTarget_D, const unsigned int *cellStatesSource_D, const unsigned int *cellStatesTarget_D, float *gvfConstData_D) { const uint idx = __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) + threadIdx.x; int volsize = gridSize_D.x*gridSize_D.y*gridSize_D.z; if (idx >= volsize) return; int3 cellC; uint cellIdx; int activeSource=0, activeTarget=0; int3 gridC = make_int3( idx % (gridSize_D.x), (idx / gridSize_D.x) % gridSize_D.y, (idx / gridSize_D.x) / gridSize_D.y); /* Check neighbor cells in source volume */ // (-1, -1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (-1, 1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (-1, 1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (-1, -1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (1, -1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (1, 1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (1, 1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; // (1, -1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeSource |= cellStatesSource_D[cellIdx]; /* Check neighbor cells in target volume */ // (-1, -1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, 1, -1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, 1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (-1, -1, 1) cellC = make_int3( clamp(gridC.x-1, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, -1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, 1, -1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z-1, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, 1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; // (1, -1, 1) cellC = make_int3( clamp(gridC.x, 0, gridSize_D.x-2), clamp(gridC.y-1, 0, gridSize_D.y-2), clamp(gridC.z, 0, gridSize_D.z-2)); cellIdx = ::GetCellIdxByGridCoords(cellC); activeTarget |= cellStatesTarget_D[cellIdx]; /* Sample gradients */ float3 gradSource, gradTarget, gradFinal; int3 x1, x2; x1 = make_int3(clamp(gridC.x+1, 0, gridSize_D.x-1), gridC.y, gridC.z); x2 = make_int3(clamp(gridC.x-1, 0, gridSize_D.x-1), gridC.y, gridC.z); gradSource.x = volSource_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volSource_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, clamp(gridC.y+1, 0, gridSize_D.y-1), gridC.z); x2 = make_int3(gridC.x, clamp(gridC.y-1, 0, gridSize_D.y-1), gridC.z); gradSource.y = volSource_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volSource_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, gridC.y, clamp(gridC.z+1, 0, gridSize_D.z-1)); x2 = make_int3(gridC.x, gridC.y, clamp(gridC.z-1, 0, gridSize_D.z-1)); gradSource.z = volSource_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volSource_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; float len = length(gradSource); if (len > 0.0) gradSource/= len; x1 = make_int3(clamp(gridC.x+1, 0, gridSize_D.x-1), gridC.y, gridC.z); x2 = make_int3(clamp(gridC.x-1, 0, gridSize_D.x-1), gridC.y, gridC.z); gradTarget.x = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, clamp(gridC.y+1, 0, gridSize_D.y-1), gridC.z); x2 = make_int3(gridC.x, clamp(gridC.y-1, 0, gridSize_D.y-1), gridC.z); gradTarget.y = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; x1 = make_int3(gridC.x, gridC.y, clamp(gridC.z+1, 0, gridSize_D.z-1)); x2 = make_int3(gridC.x, gridC.y, clamp(gridC.z-1, 0, gridSize_D.z-1)); gradTarget.z = volTarget_D[gridSize_D.x*(gridSize_D.y*x1.z + x1.y) + x1.x]- volTarget_D[gridSize_D.x*(gridSize_D.y*x2.z + x2.y) + x2.x]; len = length(gradTarget); if (len > 0.0) gradTarget/= len; /* Compute final gradient and extract const data*/ gradFinal = activeSource*(activeSource-0.5*activeTarget)*gradSource + activeTarget*(activeTarget-0.5*activeSource)*gradTarget; // Compute len^2 //len = gradFinal.x*gradFinal.x + gradFinal.y*gradFinal.y + gradFinal.z*gradFinal.z; len = float(activeSource||activeTarget); // Write b to device memory gvfConstData_D[4*idx+0] = len; // Write c1, c2, and c3 to device memory gvfConstData_D[4*idx+1] = len*gradFinal.x; gvfConstData_D[4*idx+2] = len*gradFinal.y; gvfConstData_D[4*idx+3] = len*gradFinal.z; } /* * DiffusionSolver_UpdateGVF_D */ __global__ void DiffusionSolver_UpdateGVF_D( float *gvfIn_D, float *gvfOut_D, float *gvfConstData_D, // b, c1, c2, c3 float scl) { const uint idx = __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) + threadIdx.x; uint volsize = gridSize_D.x*gridSize_D.y*gridSize_D.z; if (idx >= volsize) return; /// Get const data from global device memory /// float b = gvfConstData_D[4*idx+0]; float c1 = gvfConstData_D[4*idx+1]; float c2 = gvfConstData_D[4*idx+2]; float c3 = gvfConstData_D[4*idx+3]; float3 gvf, gvfOld, gvfAdj[6]; //uint idxAdj[6]; // Get grid coordinates int3 gridC = make_int3( idx % gridSize_D.x, (idx / gridSize_D.x) % gridSize_D.y, (idx / gridSize_D.x) / gridSize_D.y); /// Update isotropic diffusion for all vector components /// // Get adjacent gvf values gvfOld = make_float3(gvfIn_D[4*idx+0], gvfIn_D[4*idx+1], gvfIn_D[4*idx+2]); uint idxAdj = ::GetPosIdxByGridCoords(make_int3(clamp(int(gridC.x)-1, 0, int(gridSize_D.x-1)), gridC.y, gridC.z)); gvfAdj[0] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); idxAdj = ::GetPosIdxByGridCoords(make_int3(clamp(int(gridC.x)+1, 0, int(gridSize_D.x-1)), gridC.y, gridC.z)); gvfAdj[1] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); idxAdj = ::GetPosIdxByGridCoords(make_int3(gridC.x, uint(clamp(int(gridC.y)-1, 0, int(gridSize_D.y-1))), gridC.z)); gvfAdj[2] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); idxAdj = ::GetPosIdxByGridCoords(make_int3(gridC.x, uint(clamp(int(gridC.y)+1, 0, int(gridSize_D.y-1))), gridC.z)); gvfAdj[3] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); idxAdj = ::GetPosIdxByGridCoords(make_int3(gridC.x, gridC.y, uint(clamp(int(gridC.z)-1, 0, int(gridSize_D.z-1))))); gvfAdj[4] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); idxAdj = ::GetPosIdxByGridCoords(make_int3(gridC.x, gridC.y, uint(clamp(int(gridC.z)+1, 0, int(gridSize_D.z-1))))); gvfAdj[5] = make_float3(gvfIn_D[4*idxAdj+0], gvfIn_D[4*idxAdj+1], gvfIn_D[4*idxAdj+2]); // // Calculate maximum time step to ensure conversion // float dt = gridDelta_D.x*gridDelta_D.y*gridDelta_D.z/(scl*6); // dt /= 2.0; // // // Compute diffusion // gvf.x = (1.0-b*dt)*gvfOld.x; // gvf.x += (gvfAdj[0].x + gvfAdj[1].x + gvfAdj[2].x + gvfAdj[3].x + // gvfAdj[4].x + gvfAdj[5].x -6*gvfOld.x)*scl; // gvf.x += c1*dt; // // gvf.y = (1.0-b*dt)*gvfOld.y; // gvf.y += (gvfAdj[0].y + gvfAdj[1].y + gvfAdj[2].y + gvfAdj[3].y + // gvfAdj[4].y + gvfAdj[5].y -6*gvfOld.y)*scl; // gvf.y += c2*dt; // // gvf.z = (1.0-b*dt)*gvfOld.z; // gvf.z += (gvfAdj[0].z + gvfAdj[1].z + gvfAdj[2].z + gvfAdj[3].z + // gvfAdj[4].z + gvfAdj[5].z -6*gvfOld.z)*scl; // gvf.z += c3*dt; // Calculate maximum time step to ensure conversion float minStep = min(gridDelta_D.z, min(gridDelta_D.x, gridDelta_D.y))/6.0; float dt = minStep*0.5; // Compute diffusion gvf.x = c1 + (1.0-b)*(gvfOld.x + dt*(gvfAdj[0].x + gvfAdj[1].x + gvfAdj[2].x + gvfAdj[3].x + gvfAdj[4].x + gvfAdj[5].x -6*gvfOld.x)); gvf.y = c2 + (1.0-b)*(gvfOld.y + dt*(gvfAdj[0].y + gvfAdj[1].y + gvfAdj[2].y + gvfAdj[3].y + gvfAdj[4].y + gvfAdj[5].y -6*gvfOld.y)); gvf.z = c3 + (1.0-b)*(gvfOld.z + dt*(gvfAdj[0].z + gvfAdj[1].z + gvfAdj[2].z + gvfAdj[3].z + gvfAdj[4].z + gvfAdj[5].z -6*gvfOld.z)); gvfOut_D[4*idx+0] = gvf.x; gvfOut_D[4*idx+1] = gvf.y; gvfOut_D[4*idx+2] = gvf.z; } /* * DiffusionSolver::CalcGVF */ bool DiffusionSolver::CalcGVF( const float *volTarget_D, float *gvfConstData_D, const unsigned int *cellStatesTarget_D, int3 volDim, float3 volOrg, float3 volDelta, float *gvfIn_D, float *gvfOut_D, unsigned int maxIt, float scl) { using namespace megamol::core::utility::log; int volsize = volDim.x*volDim.y*volDim.z; uint3 voldim = make_uint3(volDim.x, volDim.y, volDim.z); // Init constant device parameters if (!initGridParams(volDim, volOrg, volDelta)) { Log::DefaultLog.WriteMsg(Log::LEVEL_ERROR, "%s: could not init constant device params", DiffusionSolver::ClassName()); return false; } #ifdef USE_CUDA_TIMER float dt_ms; cudaEvent_t event1, event2; cudaEventCreate(&event1); cudaEventCreate(&event2); cudaEventRecord(event1, 0); #endif // Init diffusion by calculating cont data DiffusionSolver_InitGVF_D <<< Grid(volsize, 256), 256 >>> ( volTarget_D, cellStatesTarget_D, gvfConstData_D); #ifdef USE_CUDA_TIMER cudaEventRecord(event2, 0); cudaEventSynchronize(event1); cudaEventSynchronize(event2); cudaEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time for 'initTwoWayGVF_D': %.10f sec\n", dt_ms/1000.0f); #endif for (unsigned int it=(maxIt%2); it < maxIt+(maxIt%2); ++it) { #ifdef USE_CUDA_TIMER cudaEventRecord(event1, 0); #endif if (it%2 == 0) { // Update diffusion DiffusionSolver_UpdateGVF_D <<< Grid(volsize, 256), 256 >>> ( gvfIn_D, gvfOut_D, gvfConstData_D, scl); if (cudaGetLastError() != cudaSuccess) { return false; } } else { // Update diffusion DiffusionSolver_UpdateGVF_D <<< Grid(volsize, 256), 256 >>> ( gvfOut_D, gvfIn_D, gvfConstData_D, scl); if (cudaGetLastError() != cudaSuccess) { return false; } } #ifdef USE_CUDA_TIMER cudaEventRecord(event2, 0); cudaEventSynchronize(event1); cudaEventSynchronize(event2); cudaEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time for 'updateGVF_D': %.10f sec\n", dt_ms/1000.0f); #endif } return true; } /* * DiffusionSolver::CalcTwoWayGVF */ bool DiffusionSolver::CalcTwoWayGVF( const float *volSource_D, const float *volTarget_D, const unsigned int *cellStatesSource_D, const unsigned int *cellStatesTarget_D, int3 volDim, float3 volOrg, float3 volDelta, float *gvfConstData_D, float *gvfIn_D, float *gvfOut_D, unsigned int maxIt, float scl) { using namespace megamol::core::utility::log; int volsize = volDim.x*volDim.y*volDim.z; uint3 voldim = make_uint3(volDim.x, volDim.y, volDim.z); // Init constant device parameters if (!initGridParams(volDim, volOrg, volDelta)) { Log::DefaultLog.WriteMsg(Log::LEVEL_ERROR, "%s: could not init constant device params", DiffusionSolver::ClassName()); return false; } #ifdef USE_CUDA_TIMER float dt_ms; cudaEvent_t event1, event2; cudaEventCreate(&event1); cudaEventCreate(&event2); cudaEventRecord(event1, 0); #endif // Init diffusion by calculating cont data DiffusionSolver_InitTwoWayGVF_D <<< Grid(volsize, 256), 256 >>> ( volSource_D, volTarget_D, cellStatesSource_D, cellStatesTarget_D, gvfConstData_D); #ifdef USE_CUDA_TIMER cudaEventRecord(event2, 0); cudaEventSynchronize(event1); cudaEventSynchronize(event2); cudaEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time for 'initTwoWayGVF_D': %.10f sec\n", dt_ms/1000.0f); cudaEventRecord(event1, 0); #endif for (unsigned int it=(maxIt%2); it < maxIt+(maxIt%2); ++it) { #ifdef USE_CUDA_TIMER cudaEventRecord(event1, 0); #endif if (it%2 == 0) { // Update diffusion DiffusionSolver_UpdateGVF_D <<< Grid(volsize, 256), 256 >>> ( gvfIn_D, gvfOut_D, gvfConstData_D, scl); if (cudaGetLastError() != cudaSuccess) { return false; } } else { // Update diffusion DiffusionSolver_UpdateGVF_D <<< Grid(volsize, 256), 256 >>> ( gvfOut_D, gvfIn_D, gvfConstData_D, scl); if (cudaGetLastError() != cudaSuccess) { return false; } } #ifdef USE_CUDA_TIMER cudaEventRecord(event2, 0); cudaEventSynchronize(event1); cudaEventSynchronize(event2); cudaEventElapsedTime(&dt_ms, event1, event2); printf("CUDA time for 'updateGVF_D': %.10f sec\n", dt_ms/1000.0f); #endif } return true; }
5b1619ce3fa58f88a39a6c08afd66c9c281d6287.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_lidar_mapping.cuh" struct PointInt2D { int x; int y; }; __device__ PointInt2D mapRealToGPU(float point_x, float point_y, float map_orient, float map_scale, float map_offset_pix) { PointInt2D map_pose; float point_orient = atan2f(point_y, point_x); float point_dist = sqrtf(point_x*point_x + point_y*point_y); map_pose.x = (int) (sinf(map_orient - point_orient) * point_dist * map_scale + map_offset_pix); map_pose.y = (int) (cosf(map_orient - point_orient) * point_dist * map_scale + map_offset_pix); return map_pose; } __global__ void lidarMappingKernel(float* laser_scan, int laser_rays, double* dk_matrix, uint8_t* heightmap, int map_x, int map_y, float map_orient, float map_scale, float map_offset_pix, float* debug) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float laser_angle = ((float)tid)/laser_rays*SCAN_ANGLE - 0.5*SCAN_ANGLE; float rotZ[16]; float dk_laser_sensor[16]; rotZ[0] = cosf(laser_angle); rotZ[1] = -sinf(laser_angle); rotZ[2] = 0; rotZ[3] = 0; rotZ[4] = sinf(laser_angle); rotZ[5] = cosf(laser_angle); rotZ[6] = 0; rotZ[7] = 0; rotZ[8] = 0; rotZ[9] = 0; rotZ[10] = 1; rotZ[11] = 0; rotZ[12] = 0; rotZ[13] = 0; rotZ[14] = 0; rotZ[15] = 1; for(int i = 0; i < 4; i++){ for(int j = 0; j < 4; j++){ for(int k = 0; k < 4; k++){ dk_laser_sensor[i*4 + j] += dk_matrix[i*4 + k] * rotZ[k*4 + j]; } } } float point_x = dk_laser_sensor[0] * laser_scan[tid] + dk_laser_sensor[3]; float point_y = dk_laser_sensor[4] * laser_scan[tid] + dk_laser_sensor[7]; float point_z = dk_laser_sensor[8] * laser_scan[tid] + dk_laser_sensor[11]; PointInt2D point_map = mapRealToGPU(point_x, point_y, map_orient, map_scale, map_offset_pix); // atomicExch(&heightmap[point_map.y * map_x + point_map.x], (uint8_t)255); heightmap[point_map.x * map_y + point_map.y] = (uint8_t) 255; debug[tid] = point_map.x * map_y + point_map.y; } CudaLidarMapping::CudaLidarMapping(_RobotPlannerMaps *_rpm, _ROSBuffor *_ros) { this->_rpm = _rpm; this->_ros = _ros; } //////////////////////////////////////////////////////////////////////////////// void CudaLidarMapping::copyInputToDevice() { gpuErrchk( hipMemcpy(_rpm->dev_laser_scan, &_ros->laser_scan.ranges.front(), _rpm->laser_rays * sizeof(float), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(_rpm->dev_dk_matrix, _rpm->dk_matrix.m, 16*sizeof(double), hipMemcpyHostToDevice) ); } //////////////////////////////////////////////////////////////////////////////// void CudaLidarMapping::executeKernel() { hipLaunchKernelGGL(( lidarMappingKernel) , dim3(_rpm->laser_rays), dim3(1) , 0, 0, _rpm->dev_laser_scan, _rpm->laser_rays, _rpm->dev_dk_matrix, _rpm->dev_heightmap.data, _rpm->dev_heightmap.size_x, _rpm->dev_heightmap.size_y, _rpm->map_orient, _rpm->map_scale, _rpm->map_offset_pix, _rpm->dev_debug ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////////////// void CudaLidarMapping::copyOutputToHost() { gpuErrchk( hipMemcpy(_rpm->host_heightmap.data, _rpm->dev_heightmap.data, _rpm->dev_heightmap.size() * sizeof(uint8_t), hipMemcpyDeviceToHost) ); } //////////////////////////////////////////////////////////////////////////////// void CudaLidarMapping::display() { cv::namedWindow("win1", 0); cv::imshow("win1", _rpm->host_heightmap); cv::waitKey(10); }
5b1619ce3fa58f88a39a6c08afd66c9c281d6287.cu
#include "cuda_lidar_mapping.cuh" struct PointInt2D { int x; int y; }; __device__ PointInt2D mapRealToGPU(float point_x, float point_y, float map_orient, float map_scale, float map_offset_pix) { PointInt2D map_pose; float point_orient = atan2f(point_y, point_x); float point_dist = sqrtf(point_x*point_x + point_y*point_y); map_pose.x = (int) (sinf(map_orient - point_orient) * point_dist * map_scale + map_offset_pix); map_pose.y = (int) (cosf(map_orient - point_orient) * point_dist * map_scale + map_offset_pix); return map_pose; } __global__ void lidarMappingKernel(float* laser_scan, int laser_rays, double* dk_matrix, uint8_t* heightmap, int map_x, int map_y, float map_orient, float map_scale, float map_offset_pix, float* debug) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float laser_angle = ((float)tid)/laser_rays*SCAN_ANGLE - 0.5*SCAN_ANGLE; float rotZ[16]; float dk_laser_sensor[16]; rotZ[0] = cosf(laser_angle); rotZ[1] = -sinf(laser_angle); rotZ[2] = 0; rotZ[3] = 0; rotZ[4] = sinf(laser_angle); rotZ[5] = cosf(laser_angle); rotZ[6] = 0; rotZ[7] = 0; rotZ[8] = 0; rotZ[9] = 0; rotZ[10] = 1; rotZ[11] = 0; rotZ[12] = 0; rotZ[13] = 0; rotZ[14] = 0; rotZ[15] = 1; for(int i = 0; i < 4; i++){ for(int j = 0; j < 4; j++){ for(int k = 0; k < 4; k++){ dk_laser_sensor[i*4 + j] += dk_matrix[i*4 + k] * rotZ[k*4 + j]; } } } float point_x = dk_laser_sensor[0] * laser_scan[tid] + dk_laser_sensor[3]; float point_y = dk_laser_sensor[4] * laser_scan[tid] + dk_laser_sensor[7]; float point_z = dk_laser_sensor[8] * laser_scan[tid] + dk_laser_sensor[11]; PointInt2D point_map = mapRealToGPU(point_x, point_y, map_orient, map_scale, map_offset_pix); // atomicExch(&heightmap[point_map.y * map_x + point_map.x], (uint8_t)255); heightmap[point_map.x * map_y + point_map.y] = (uint8_t) 255; debug[tid] = point_map.x * map_y + point_map.y; } CudaLidarMapping::CudaLidarMapping(_RobotPlannerMaps *_rpm, _ROSBuffor *_ros) { this->_rpm = _rpm; this->_ros = _ros; } //////////////////////////////////////////////////////////////////////////////// void CudaLidarMapping::copyInputToDevice() { gpuErrchk( cudaMemcpy(_rpm->dev_laser_scan, &_ros->laser_scan.ranges.front(), _rpm->laser_rays * sizeof(float), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(_rpm->dev_dk_matrix, _rpm->dk_matrix.m, 16*sizeof(double), cudaMemcpyHostToDevice) ); } //////////////////////////////////////////////////////////////////////////////// void CudaLidarMapping::executeKernel() { lidarMappingKernel <<< _rpm->laser_rays, 1 >>> ( _rpm->dev_laser_scan, _rpm->laser_rays, _rpm->dev_dk_matrix, _rpm->dev_heightmap.data, _rpm->dev_heightmap.size_x, _rpm->dev_heightmap.size_y, _rpm->map_orient, _rpm->map_scale, _rpm->map_offset_pix, _rpm->dev_debug ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////////////// void CudaLidarMapping::copyOutputToHost() { gpuErrchk( cudaMemcpy(_rpm->host_heightmap.data, _rpm->dev_heightmap.data, _rpm->dev_heightmap.size() * sizeof(uint8_t), cudaMemcpyDeviceToHost) ); } //////////////////////////////////////////////////////////////////////////////// void CudaLidarMapping::display() { cv::namedWindow("win1", 0); cv::imshow("win1", _rpm->host_heightmap); cv::waitKey(10); }
825da061626b9b8531424edf09f40c68bdfd75c3.hip
// !!! This is a file automatically generated by hipify!!! // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #include "cudakernel/memory/reshape.h" #include "ppl/nn/common/tensor_shape.h" #include "ppl/common/retcode.h" #include <hip/hip_runtime.h> ppl::common::RetCode PPLCUDAReshapeForwardImp( hipStream_t stream, const ppl::nn::TensorShape* input_shape, const void* input, const ppl::nn::TensorShape* output_shape, void* output) { int64_t num_elems_output = output_shape->GetElementsIncludingPadding(); hipMemcpyAsync(output, input, ppl::common::GetSizeOfDataType(input_shape->GetDataType()) * num_elems_output, hipMemcpyDeviceToDevice, stream); return ppl::common::RC_SUCCESS; }
825da061626b9b8531424edf09f40c68bdfd75c3.cu
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #include "cudakernel/memory/reshape.h" #include "ppl/nn/common/tensor_shape.h" #include "ppl/common/retcode.h" #include <cuda_runtime.h> ppl::common::RetCode PPLCUDAReshapeForwardImp( cudaStream_t stream, const ppl::nn::TensorShape* input_shape, const void* input, const ppl::nn::TensorShape* output_shape, void* output) { int64_t num_elems_output = output_shape->GetElementsIncludingPadding(); cudaMemcpyAsync(output, input, ppl::common::GetSizeOfDataType(input_shape->GetDataType()) * num_elems_output, cudaMemcpyDeviceToDevice, stream); return ppl::common::RC_SUCCESS; }
493dcd77623be646ccba66c0bc2a2f17849e2119.hip
// !!! This is a file automatically generated by hipify!!! /** * gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <hip/hip_runtime.h> #include "polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size */ #define N 1024 #ifndef THREADS #define THREADS 1024 #endif /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i, j; for (i = 0; i < N; i++) { tmp[i] = 0; y[i] = 0; for (j = 0; j < N; j++) { tmp[i] = A[i*N + j] * x[j] + tmp[i]; y[i] = B[i*N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void init(DATA_TYPE* A, DATA_TYPE* x) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE) i) / N; for (j = 0; j < N; j++) { A[i*N + j] = ((DATA_TYPE) i*j) / N; } } } void compareResults(DATA_TYPE* y, DATA_TYPE* y_outputFromGpu) { int i, fail; fail = 0; for (i=0; i<(N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void gesummv_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int j; for(j = 0; j < N; j++) { tmp[i] += a[i * N + j] * x[j]; y[i] += b[i * N + j] * x[j]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void gesummvCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu) { double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * N); hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * N); hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * N); hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * N); hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * N); hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice); hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice); hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice); hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice); hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice); dim3 block(THREADS); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) )); t_start = rtclock(); hipLaunchKernelGGL(( gesummv_kernel), dim3(grid), dim3(block), 0, 0, A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu); hipDeviceSynchronize(); t_end = rtclock(); hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * N, hipMemcpyDeviceToHost); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); } int main(int argc, char *argv[]) { //double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; A = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); init(A, x); //GPU_argv_init(); gesummvCuda(A, B, x, y, tmp, y_outputFromGpu); //t_start = rtclock(); //gesummv(A, B, x, y, tmp); //t_end = rtclock(); //fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); //compareResults(y, y_outputFromGpu); free(A); free(B); free(x); free(y); free(y_outputFromGpu); free(tmp); return 0; }
493dcd77623be646ccba66c0bc2a2f17849e2119.cu
/** * gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <cuda.h> #include "polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size */ #define N 1024 #ifndef THREADS #define THREADS 1024 #endif /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i, j; for (i = 0; i < N; i++) { tmp[i] = 0; y[i] = 0; for (j = 0; j < N; j++) { tmp[i] = A[i*N + j] * x[j] + tmp[i]; y[i] = B[i*N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void init(DATA_TYPE* A, DATA_TYPE* x) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE) i) / N; for (j = 0; j < N; j++) { A[i*N + j] = ((DATA_TYPE) i*j) / N; } } } void compareResults(DATA_TYPE* y, DATA_TYPE* y_outputFromGpu) { int i, fail; fail = 0; for (i=0; i<(N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void gesummv_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int j; for(j = 0; j < N; j++) { tmp[i] += a[i * N + j] * x[j]; y[i] += b[i * N + j] * x[j]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void gesummvCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu) { double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * N); cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * N); cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * N); cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * N); cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * N); cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice); cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice); cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice); dim3 block(THREADS); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) )); t_start = rtclock(); gesummv_kernel<<< grid, block>>>(A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu); cudaThreadSynchronize(); t_end = rtclock(); cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); } int main(int argc, char *argv[]) { //double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; A = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); init(A, x); //GPU_argv_init(); gesummvCuda(A, B, x, y, tmp, y_outputFromGpu); //t_start = rtclock(); //gesummv(A, B, x, y, tmp); //t_end = rtclock(); //fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); //compareResults(y, y_outputFromGpu); free(A); free(B); free(x); free(y); free(y_outputFromGpu); free(tmp); return 0; }
148c779759a567388beb20460ef83377873d66f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> #include<stdlib.h> #define DIVIDER 10000 __global__ void even(int *darr,int n) { int k=blockIdx.x*512+threadIdx.x; int t; k=k*2; //for even positions if(k< n-1) { if(darr[k]>darr[k+1]) { //swap the numbers t=darr[k]; darr[k]=darr[k+1]; darr[k+1] =t; } } } __global__ void odd(int *darr,int n) { int k=blockIdx.x*512+threadIdx.x; int t; k=k*2 +1; //for odd positions if(k< n-1) { if(darr[k]>darr[k+1]) { //swap the numbers t=darr[k]; darr[k]=darr[k+1]; darr[k+1] =t; } } } int main() { int *arr,*darr; int n,i; time_t t; srand((unsigned)time(&t)); printf("\n Enter how many numbers :"); scanf("%d",&n); arr=(int *)malloc(n*sizeof(int)); //for dynamic inputs for(i=0; i<n; i++) { arr[i] = (rand() % DIVIDER) + 1; } // printf("\n UNSORTED ARRAY \n"); // for(i=0; i<n; i++) // printf("\t%d",arr[i]); hipMalloc(&darr,n*sizeof(int)); //memory allocation in GPU for darr hipMemcpy(darr,arr ,n*sizeof(int) ,hipMemcpyHostToDevice); // data transfer from host to GPU for(i=0;i<=n/2;i++) { hipLaunchKernelGGL(( even), dim3(n/1024+1),dim3(512), 0, 0, darr,n); hipLaunchKernelGGL(( odd), dim3(n/1024+1),dim3(512), 0, 0, darr,n); } hipMemcpy(arr,darr,n*sizeof(int),hipMemcpyDeviceToHost); printf("\n SORTED ARRAY \n"); for(i=0; i<n; i++) printf("\t%d",arr[i]); } Enter how many numbers :1050 SORTED ARRAY 6 8 12 60 60 61 63 93 93 118 126 128 177 181 209 214 216 218 231 232 242 245 264 294 297 298 298 322 333 336 364 389 406 417 425 431 433 443 469 476 481 486 500 505 517 525 533 543 547 557 564 583 587 593 598 606 616 623 626 632 633 644 655 656 663 673 675 705 708 709 720 723 743 751 757 805 806 806 810 827 837 840 846 846 847 853 858 860 862 875 887 888 898 898 937 938 954 977 988 995 1007 1007 1019 1019 1022 1023 1025 1033 1036 1051 1054 1057 1064 1067 1075 1076 1100 1105 1108 1112 1118 1122 1144 1171 1173 1181 1196 1196 1198 1205 1240 1242 1243 1261 1270 1280 1280 1280 1283 1295 1304 1337 1338 1339 1344 1363 1372 1380 1383 1385 1395 1398 1402 1432 1439 1441 1449 1455 1466 1475 1491 1497 1504 1512 1531 1534 1553 1555 1559 1570 1573 1576 1582 1584 1584 1605 1607 1609 1622 1625 1628 1646 1653 1667 1677 1683 1687 1692 1699 1715 1735 1739 1759 1771 1782 1789 1789 1791 1824 1842 1856 1883 1886 1889 1897 1909 1913 1931 1932 1932 1933 1952 1960 1962 1965 1970 1980 1981 1997 2010 2036 2037 2042 2049 2066 2073 2073 2087 2096 2099 2117 2119 2123 2133 2153 2156 2168 2173 2180 2187 2200 2223 2235 2240 2246 2256 2276 2301 2311 2314 2319 2346 2351 2352 2378 2386 2391 2399 2404 2408 2426 2463 2476 2481 2482 2485 2495 2496 2496 2498 2503 2510 2520 2520 2532 2535 2544 2545 2555 2564 2566 2578 2580 2597 2603 2604 2626 2651 2657 2658 2667 2670 2675 2683 2685 2689 2693 2700 2712 2715 2721 2724 2726 2734 2744 2745 2782 2789 2803 2804 2823 2824 2841 2855 2856 2871 2874 2878 2906 2929 2932 2935 2947 2949 2958 2967 2981 2989 2997 3006 3010 3016 3020 3038 3040 3043 3052 3056 3067 3068 3069 3075 3092 3094 3094 3104 3110 3117 3122 3137 3137 3146 3147 3187 3201 3203 3225 3229 3244 3292 3320 3323 3325 3350 3355 3361 3390 3407 3417 3432 3436 3438 3440 3459 3461 3462 3471 3481 3482 3488 3498 3509 3529 3531 3538 3545 3550 3552 3565 3565 3571 3573 3580 3607 3609 3621 3625 3635 3641 3657 3661 3662 3662 3674 3682 3699 3707 3751 3762 3762 3768 3771 3814 3822 3837 3842 3848 3857 3880 3888 3901 3911 3934 3939 3943 3954 3958 3961 3971 3976 3983 3998 3998 4020 4021 4038 4039 4046 4048 4056 4064 4069 4071 4076 4090 4093 4118 4125 4139 4141 4159 4170 4170 4172 4180 4188 4195 4203 4207 4218 4226 4230 4231 4243 4252 4254 4267 4276 4279 4287 4291 4293 4303 4316 4333 4334 4335 4337 4349 4352 4361 4363 4369 4383 4393 4405 4408 4412 4422 4425 4435 4438 4443 4445 4447 4455 4460 4462 4462 4464 4467 4468 4473 4476 4478 4484 4489 4489 4499 4522 4523 4534 4540 4541 4577 4589 4591 4607 4607 4614 4630 4642 4643 4646 4658 4658 4662 4683 4689 4690 4699 4718 4719 4740 4761 4775 4779 4782 4789 4825 4831 4836 4842 4844 4861 4868 4877 4889 4892 4895 4901 4926 4939 4950 4958 4965 4967 4968 4971 4981 4984 4985 4991 4998 5010 5012 5013 5016 5023 5052 5080 5088 5089 5146 5155 5168 5173 5177 5187 5192 5201 5201 5218 5227 5249 5267 5267 5275 5277 5282 5284 5290 5291 5294 5303 5306 5310 5311 5314 5318 5326 5344 5344 5344 5346 5366 5374 5378 5381 5384 5399 5408 5416 5417 5436 5461 5461 5463 5464 5472 5486 5494 5494 5516 5519 5524 5527 5535 5542 5544 5558 5568 5573 5594 5610 5610 5648 5658 5668 5682 5685 5692 5705 5707 5707 5718 5724 5729 5733 5741 5753 5762 5765 5765 5768 5779 5785 5814 5817 5838 5841 5846 5859 5867 5894 5898 5906 5911 5915 5921 5922 5930 5940 5942 5943 5944 5969 5979 5999 6010 6013 6022 6038 6068 6093 6095 6119 6132 6147 6158 6163 6167 6169 6194 6195 6211 6251 6254 6254 6271 6274 6281 6298 6331 6337 6342 6346 6365 6365 6374 6394 6397 6402 6410 6417 6422 6423 6432 6434 6435 6442 6452 6452 6466 6474 6486 6500 6503 6528 6535 6538 6586 6590 6610 6615 6640 6688 6688 6689 6694 6694 6732 6739 6752 6753 6756 6757 6768 6772 6773 6797 6801 6803 6813 6828 6840 6842 6843 6866 6869 6873 6892 6898 6910 6928 6939 6942 6947 6949 6973 6984 7012 7040 7046 7053 7070 7079 7084 7107 7129 7131 7136 7137 7143 7154 7167 7176 7181 7195 7197 7202 7213 7213 7222 7226 7226 7286 7307 7309 7309 7315 7328 7347 7356 7371 7388 7389 7392 7400 7404 7405 7412 7414 7425 7446 7450 7452 7459 7462 7467 7471 7483 7483 7487 7488 7488 7489 7492 7511 7511 7523 7525 7527 7531 7552 7560 7576 7580 7582 7627 7628 7667 7673 7701 7708 7738 7756 7756 7759 7772 7785 7786 7795 7808 7808 7812 7842 7846 7859 7895 7900 7923 7928 7928 7929 7931 7943 7944 7951 7963 7968 7969 7971 7975 7993 8013 8020 8035 8050 8061 8062 8077 8091 8095 8103 8105 8107 8138 8145 8193 8215 8233 8254 8264 8268 8298 8301 8303 8304 8311 8318 8337 8337 8341 8350 8382 8388 8391 8398 8398 8402 8407 8411 8432 8439 8448 8455 8476 8491 8514 8515 8539 8540 8555 8557 8578 8590 8601 8602 8617 8625 8651 8665 8665 8705 8714 8718 8731 8737 8753 8753 8754 8757 8758 8765 8765 8789 8798 8818 8824 8828 8840 8842 8843 8845 8847 8849 8872 8882 8886 8892 8915 8933 8937 8941 8942 8942 8944 8945 8965 8967 8972 8981 8984 8995 8996 8998 9019 9027 9050 9075 9103 9103 9105 9118 9119 9127 9132 9134 9175 9178 9202 9209 9219 9222 9242 9246 9251 9254 9279 9341 9371 9413 9436 9437 9445 9446 9453 9512 9532 9539 9543 9553 9564 9569 9578 9580 9600 9612 9632 9642 9652 9662 9671 9684 9688 9697 9712 9717 9720 9723 9726 9733 9750 9770 9774 9783 9797 9831 9883 9901 9907 9916 9939 9957 9960 9962 9964 9988 9998
148c779759a567388beb20460ef83377873d66f2.cu
#include<stdio.h> #include<cuda.h> #include<stdlib.h> #define DIVIDER 10000 __global__ void even(int *darr,int n) { int k=blockIdx.x*512+threadIdx.x; int t; k=k*2; //for even positions if(k< n-1) { if(darr[k]>darr[k+1]) { //swap the numbers t=darr[k]; darr[k]=darr[k+1]; darr[k+1] =t; } } } __global__ void odd(int *darr,int n) { int k=blockIdx.x*512+threadIdx.x; int t; k=k*2 +1; //for odd positions if(k< n-1) { if(darr[k]>darr[k+1]) { //swap the numbers t=darr[k]; darr[k]=darr[k+1]; darr[k+1] =t; } } } int main() { int *arr,*darr; int n,i; time_t t; srand((unsigned)time(&t)); printf("\n Enter how many numbers :"); scanf("%d",&n); arr=(int *)malloc(n*sizeof(int)); //for dynamic inputs for(i=0; i<n; i++) { arr[i] = (rand() % DIVIDER) + 1; } // printf("\n UNSORTED ARRAY \n"); // for(i=0; i<n; i++) // printf("\t%d",arr[i]); cudaMalloc(&darr,n*sizeof(int)); //memory allocation in GPU for darr cudaMemcpy(darr,arr ,n*sizeof(int) ,cudaMemcpyHostToDevice); // data transfer from host to GPU for(i=0;i<=n/2;i++) { even<<<n/1024+1,512>>>(darr,n); odd<<<n/1024+1,512>>>(darr,n); } cudaMemcpy(arr,darr,n*sizeof(int),cudaMemcpyDeviceToHost); printf("\n SORTED ARRAY \n"); for(i=0; i<n; i++) printf("\t%d",arr[i]); } Enter how many numbers :1050 SORTED ARRAY 6 8 12 60 60 61 63 93 93 118 126 128 177 181 209 214 216 218 231 232 242 245 264 294 297 298 298 322 333 336 364 389 406 417 425 431 433 443 469 476 481 486 500 505 517 525 533 543 547 557 564 583 587 593 598 606 616 623 626 632 633 644 655 656 663 673 675 705 708 709 720 723 743 751 757 805 806 806 810 827 837 840 846 846 847 853 858 860 862 875 887 888 898 898 937 938 954 977 988 995 1007 1007 1019 1019 1022 1023 1025 1033 1036 1051 1054 1057 1064 1067 1075 1076 1100 1105 1108 1112 1118 1122 1144 1171 1173 1181 1196 1196 1198 1205 1240 1242 1243 1261 1270 1280 1280 1280 1283 1295 1304 1337 1338 1339 1344 1363 1372 1380 1383 1385 1395 1398 1402 1432 1439 1441 1449 1455 1466 1475 1491 1497 1504 1512 1531 1534 1553 1555 1559 1570 1573 1576 1582 1584 1584 1605 1607 1609 1622 1625 1628 1646 1653 1667 1677 1683 1687 1692 1699 1715 1735 1739 1759 1771 1782 1789 1789 1791 1824 1842 1856 1883 1886 1889 1897 1909 1913 1931 1932 1932 1933 1952 1960 1962 1965 1970 1980 1981 1997 2010 2036 2037 2042 2049 2066 2073 2073 2087 2096 2099 2117 2119 2123 2133 2153 2156 2168 2173 2180 2187 2200 2223 2235 2240 2246 2256 2276 2301 2311 2314 2319 2346 2351 2352 2378 2386 2391 2399 2404 2408 2426 2463 2476 2481 2482 2485 2495 2496 2496 2498 2503 2510 2520 2520 2532 2535 2544 2545 2555 2564 2566 2578 2580 2597 2603 2604 2626 2651 2657 2658 2667 2670 2675 2683 2685 2689 2693 2700 2712 2715 2721 2724 2726 2734 2744 2745 2782 2789 2803 2804 2823 2824 2841 2855 2856 2871 2874 2878 2906 2929 2932 2935 2947 2949 2958 2967 2981 2989 2997 3006 3010 3016 3020 3038 3040 3043 3052 3056 3067 3068 3069 3075 3092 3094 3094 3104 3110 3117 3122 3137 3137 3146 3147 3187 3201 3203 3225 3229 3244 3292 3320 3323 3325 3350 3355 3361 3390 3407 3417 3432 3436 3438 3440 3459 3461 3462 3471 3481 3482 3488 3498 3509 3529 3531 3538 3545 3550 3552 3565 3565 3571 3573 3580 3607 3609 3621 3625 3635 3641 3657 3661 3662 3662 3674 3682 3699 3707 3751 3762 3762 3768 3771 3814 3822 3837 3842 3848 3857 3880 3888 3901 3911 3934 3939 3943 3954 3958 3961 3971 3976 3983 3998 3998 4020 4021 4038 4039 4046 4048 4056 4064 4069 4071 4076 4090 4093 4118 4125 4139 4141 4159 4170 4170 4172 4180 4188 4195 4203 4207 4218 4226 4230 4231 4243 4252 4254 4267 4276 4279 4287 4291 4293 4303 4316 4333 4334 4335 4337 4349 4352 4361 4363 4369 4383 4393 4405 4408 4412 4422 4425 4435 4438 4443 4445 4447 4455 4460 4462 4462 4464 4467 4468 4473 4476 4478 4484 4489 4489 4499 4522 4523 4534 4540 4541 4577 4589 4591 4607 4607 4614 4630 4642 4643 4646 4658 4658 4662 4683 4689 4690 4699 4718 4719 4740 4761 4775 4779 4782 4789 4825 4831 4836 4842 4844 4861 4868 4877 4889 4892 4895 4901 4926 4939 4950 4958 4965 4967 4968 4971 4981 4984 4985 4991 4998 5010 5012 5013 5016 5023 5052 5080 5088 5089 5146 5155 5168 5173 5177 5187 5192 5201 5201 5218 5227 5249 5267 5267 5275 5277 5282 5284 5290 5291 5294 5303 5306 5310 5311 5314 5318 5326 5344 5344 5344 5346 5366 5374 5378 5381 5384 5399 5408 5416 5417 5436 5461 5461 5463 5464 5472 5486 5494 5494 5516 5519 5524 5527 5535 5542 5544 5558 5568 5573 5594 5610 5610 5648 5658 5668 5682 5685 5692 5705 5707 5707 5718 5724 5729 5733 5741 5753 5762 5765 5765 5768 5779 5785 5814 5817 5838 5841 5846 5859 5867 5894 5898 5906 5911 5915 5921 5922 5930 5940 5942 5943 5944 5969 5979 5999 6010 6013 6022 6038 6068 6093 6095 6119 6132 6147 6158 6163 6167 6169 6194 6195 6211 6251 6254 6254 6271 6274 6281 6298 6331 6337 6342 6346 6365 6365 6374 6394 6397 6402 6410 6417 6422 6423 6432 6434 6435 6442 6452 6452 6466 6474 6486 6500 6503 6528 6535 6538 6586 6590 6610 6615 6640 6688 6688 6689 6694 6694 6732 6739 6752 6753 6756 6757 6768 6772 6773 6797 6801 6803 6813 6828 6840 6842 6843 6866 6869 6873 6892 6898 6910 6928 6939 6942 6947 6949 6973 6984 7012 7040 7046 7053 7070 7079 7084 7107 7129 7131 7136 7137 7143 7154 7167 7176 7181 7195 7197 7202 7213 7213 7222 7226 7226 7286 7307 7309 7309 7315 7328 7347 7356 7371 7388 7389 7392 7400 7404 7405 7412 7414 7425 7446 7450 7452 7459 7462 7467 7471 7483 7483 7487 7488 7488 7489 7492 7511 7511 7523 7525 7527 7531 7552 7560 7576 7580 7582 7627 7628 7667 7673 7701 7708 7738 7756 7756 7759 7772 7785 7786 7795 7808 7808 7812 7842 7846 7859 7895 7900 7923 7928 7928 7929 7931 7943 7944 7951 7963 7968 7969 7971 7975 7993 8013 8020 8035 8050 8061 8062 8077 8091 8095 8103 8105 8107 8138 8145 8193 8215 8233 8254 8264 8268 8298 8301 8303 8304 8311 8318 8337 8337 8341 8350 8382 8388 8391 8398 8398 8402 8407 8411 8432 8439 8448 8455 8476 8491 8514 8515 8539 8540 8555 8557 8578 8590 8601 8602 8617 8625 8651 8665 8665 8705 8714 8718 8731 8737 8753 8753 8754 8757 8758 8765 8765 8789 8798 8818 8824 8828 8840 8842 8843 8845 8847 8849 8872 8882 8886 8892 8915 8933 8937 8941 8942 8942 8944 8945 8965 8967 8972 8981 8984 8995 8996 8998 9019 9027 9050 9075 9103 9103 9105 9118 9119 9127 9132 9134 9175 9178 9202 9209 9219 9222 9242 9246 9251 9254 9279 9341 9371 9413 9436 9437 9445 9446 9453 9512 9532 9539 9543 9553 9564 9569 9578 9580 9600 9612 9632 9642 9652 9662 9671 9684 9688 9697 9712 9717 9720 9723 9726 9733 9750 9770 9774 9783 9797 9831 9883 9901 9907 9916 9939 9957 9960 9962 9964 9988 9998
2597e71b73ad4ab8f4c55a5d286a65789413aca8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_computeModelMany1 (int n, int sizeImage, double *result, double *x, double *amplitude,double background) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; int id2=id/sizeImage; if (id < n) { result[id] = x[id] * amplitude[id2] + background; } }
2597e71b73ad4ab8f4c55a5d286a65789413aca8.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_computeModelMany1 (int n, int sizeImage, double *result, double *x, double *amplitude,double background) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; int id2=id/sizeImage; if (id < n) { result[id] = x[id] * amplitude[id2] + background; } }