hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
653f8e9d75bdb1ee0c6177341ece3c5ca645fdab.hip
// !!! This is a file automatically generated by hipify!!! /* * File smc_impl_nested.cuh contains the implementation of the nested SMC. * This file is included by smc_impl.cuh and relies on the includes in smc_impl.cuh. */ // Nested inference is never used, and this code has become obsolete. /* #include "macros/macros.cuh" #include "smc.cuh" #include "dists/dists.cuh" #include "particles_memory_handler.cuh" #include "resample/systematic/systematic_cpu.cuh" #ifdef __NVCC__ #include <hiprand/hiprand_kernel.h> #include "utils/cuda_error_utils.cuh" #include "resample/systematic/systematic_gpu.cuh" #include "smc_kernels.cuh" #endif DEV double runSMCNested( #ifdef __NVCC__ hiprandState_t* randState, #endif pplFunc_t* bblocks, int numBblocks, int numParticles, size_t progStateSize, bool parallelExec, bool parallelResampling, int parentIdx, callbackFunc_t callback, void* ret, void* arg) { if(parallelExec || parallelResampling) { #ifndef GPU printf("Cannot run in parallel when not compiled for GPU"); return 0.0; #endif } bool requireRandStates = parallelExec; floating_t logNormConstant = 0; particles_t particles = allocateParticlesNested(numParticles, progStateSize); #ifdef __NVCC__ const int NUM_BLOCKS = (numParticles + NUM_THREADS_PER_BLOCK_NESTED - 1) / NUM_THREADS_PER_BLOCK_NESTED; hiprandState_t* randStates; if(requireRandStates) { randStates = new hiprandState_t[numParticles]; initCurandStates<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK_NESTED>>>(randStates, numParticles, parentIdx); hipDeviceSynchronize(); cudaCheckErrorDev(); } #endif resampler_t resampler = initResamplerNested(numParticles, progStateSize); // Run program/inference while(true) { if(parallelExec) { #ifdef __NVCC__ // Use nested randStates execFuncs<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK_NESTED>>>(randStates, particles, bblocks, numParticles, numParticles, numBblocks, arg); hipDeviceSynchronize(); cudaCheckErrorDev(); #endif } else { for(int i = 0; i < numParticles; i++) { int pc = particles.pcs[i]; if(pc < numBblocks) { bblocks[pc]( #ifdef __NVCC__ randState, #endif particles, i, arg); } } } floating_t logWeightSum; if(parallelResampling) { #ifdef __NVCC__ logWeightSum = calcLogWeightSumGpu(particles.weights, resampler, numParticles, NUM_BLOCKS, NUM_THREADS_PER_BLOCK_NESTED); #endif } else { logWeightSum = calcLogWeightSumCpu(particles.weights, resampler, numParticles); } logNormConstant += logWeightSum - log(static_cast<floating_t>(numParticles)); if(particles.pcs[0] >= numBblocks) // Assumption: All terminate at the same time break; if(parallelResampling) { #ifdef __NVCC__ resampleSystematicGpuNested(randState, particles, resampler, numParticles, NUM_BLOCKS); #endif } else { resampleSystematicCpu( #ifdef __NVCC__ randState, #endif particles, resampler, numParticles); } } callback(particles, numParticles, ret); // Clean up destResamplerNested(resampler); freeParticlesNested(particles); #ifdef __NVCC__ if(requireRandStates) delete[] randStates; #endif return logNormConstant; } */
653f8e9d75bdb1ee0c6177341ece3c5ca645fdab.cu
/* * File smc_impl_nested.cuh contains the implementation of the nested SMC. * This file is included by smc_impl.cuh and relies on the includes in smc_impl.cuh. */ // Nested inference is never used, and this code has become obsolete. /* #include "macros/macros.cuh" #include "smc.cuh" #include "dists/dists.cuh" #include "particles_memory_handler.cuh" #include "resample/systematic/systematic_cpu.cuh" #ifdef __NVCC__ #include <curand_kernel.h> #include "utils/cuda_error_utils.cuh" #include "resample/systematic/systematic_gpu.cuh" #include "smc_kernels.cuh" #endif DEV double runSMCNested( #ifdef __NVCC__ curandState* randState, #endif pplFunc_t* bblocks, int numBblocks, int numParticles, size_t progStateSize, bool parallelExec, bool parallelResampling, int parentIdx, callbackFunc_t callback, void* ret, void* arg) { if(parallelExec || parallelResampling) { #ifndef GPU printf("Cannot run in parallel when not compiled for GPU"); return 0.0; #endif } bool requireRandStates = parallelExec; floating_t logNormConstant = 0; particles_t particles = allocateParticlesNested(numParticles, progStateSize); #ifdef __NVCC__ const int NUM_BLOCKS = (numParticles + NUM_THREADS_PER_BLOCK_NESTED - 1) / NUM_THREADS_PER_BLOCK_NESTED; curandState* randStates; if(requireRandStates) { randStates = new curandState[numParticles]; initCurandStates<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK_NESTED>>>(randStates, numParticles, parentIdx); cudaDeviceSynchronize(); cudaCheckErrorDev(); } #endif resampler_t resampler = initResamplerNested(numParticles, progStateSize); // Run program/inference while(true) { if(parallelExec) { #ifdef __NVCC__ // Use nested randStates execFuncs<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK_NESTED>>>(randStates, particles, bblocks, numParticles, numParticles, numBblocks, arg); cudaDeviceSynchronize(); cudaCheckErrorDev(); #endif } else { for(int i = 0; i < numParticles; i++) { int pc = particles.pcs[i]; if(pc < numBblocks) { bblocks[pc]( #ifdef __NVCC__ randState, #endif particles, i, arg); } } } floating_t logWeightSum; if(parallelResampling) { #ifdef __NVCC__ logWeightSum = calcLogWeightSumGpu(particles.weights, resampler, numParticles, NUM_BLOCKS, NUM_THREADS_PER_BLOCK_NESTED); #endif } else { logWeightSum = calcLogWeightSumCpu(particles.weights, resampler, numParticles); } logNormConstant += logWeightSum - log(static_cast<floating_t>(numParticles)); if(particles.pcs[0] >= numBblocks) // Assumption: All terminate at the same time break; if(parallelResampling) { #ifdef __NVCC__ resampleSystematicGpuNested(randState, particles, resampler, numParticles, NUM_BLOCKS); #endif } else { resampleSystematicCpu( #ifdef __NVCC__ randState, #endif particles, resampler, numParticles); } } callback(particles, numParticles, ret); // Clean up destResamplerNested(resampler); freeParticlesNested(particles); #ifdef __NVCC__ if(requireRandStates) delete[] randStates; #endif return logNormConstant; } */
74d88a1be5e8b59b7b1306fea562eda8d1f5a776.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> int main (int argc, char **argv){ int ndev, maxtpb; hipGetDeviceCount(&ndev); printf("Number of GPUs = %4d\n",ndev); for(int i=0;i<ndev;i++){ hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); maxtpb = deviceProps.maxThreadsPerBlock; printf("GPU device %4d:\n\tName: %s:\n",i,deviceProps.name); printf("\tCompute capabilities: SM %d.%d\n", deviceProps.major, deviceProps.minor); printf("\tMaximum number of threads per block: %4d\n",maxtpb); printf("\tMaximum number of threads per SM: %4d\n", deviceProps.maxThreadsPerMultiProcessor); printf("\tNumber of streaming multiprocessors: %4d\n", deviceProps.multiProcessorCount); printf("\tClock rate: %d KHz\n",deviceProps.clockRate); printf("\tGlobal memory: %lu bytes\n",deviceProps.totalGlobalMem); } hipSetDevice(0); }
74d88a1be5e8b59b7b1306fea562eda8d1f5a776.cu
#include <cuda.h> #include <stdio.h> int main (int argc, char **argv){ int ndev, maxtpb; cudaGetDeviceCount(&ndev); printf("Number of GPUs = %4d\n",ndev); for(int i=0;i<ndev;i++){ cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); maxtpb = deviceProps.maxThreadsPerBlock; printf("GPU device %4d:\n\tName: %s:\n",i,deviceProps.name); printf("\tCompute capabilities: SM %d.%d\n", deviceProps.major, deviceProps.minor); printf("\tMaximum number of threads per block: %4d\n",maxtpb); printf("\tMaximum number of threads per SM: %4d\n", deviceProps.maxThreadsPerMultiProcessor); printf("\tNumber of streaming multiprocessors: %4d\n", deviceProps.multiProcessorCount); printf("\tClock rate: %d KHz\n",deviceProps.clockRate); printf("\tGlobal memory: %lu bytes\n",deviceProps.totalGlobalMem); } cudaSetDevice(0); }
b66292b16b7bc6ea2e806b9e33304494f8377b78.hip
// !!! This is a file automatically generated by hipify!!! #include "distribution_evolution_tests.cuh" double tol = 1.e-6; SCENARIO("[DEVICE] Acceleration Update", "[d-acc]") { GIVEN("A thermal distribution of 5000 positions, help in a quadrupole trap with a Bz = 2.0") { int num_test = 5000; // Initialise trapping parameters trap_geo trap_parameters; trap_parameters.Bz = 2.0; trap_parameters.B0 = 0.; // Initialise rng hiprandState_t *state; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&state), num_test*sizeof(hiprandState_t))); initialise_rng_states(num_test, state, false); // Initialise positions double3 *d_pos; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_pos), num_test*sizeof(double3))); // Generate velocity distribution generate_thermal_positions(num_test, 20.e-6, trap_parameters, state, d_pos); WHEN("The update_atom_accelerations function is called") { // Initialise accelerations double3 *d_test_acc; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_test_acc), num_test*sizeof(double3))); // Generate accelerations update_accelerations(num_test, trap_parameters, d_pos, d_test_acc);; double3 *test_acc; test_acc = reinterpret_cast<double3*>(calloc(num_test, sizeof(double3))); checkCudaErrors(hipMemcpy(test_acc, d_test_acc, num_test*sizeof(double3), hipMemcpyDeviceToHost)); double mean_acc_x = mean_x(test_acc, num_test); double mean_acc_y = mean_y(test_acc, num_test); double mean_acc_z = mean_z(test_acc, num_test); double std_acc_x = std_dev_x(test_acc, num_test); double std_acc_y = std_dev_y(test_acc, num_test); double std_acc_z = std_dev_z(test_acc, num_test); THEN("The mean in each direction should be 0.") { REQUIRE(mean_acc_x <= 0. + std_acc_x / sqrt(num_test)); REQUIRE(mean_acc_x >= 0. - std_acc_x / sqrt(num_test)); REQUIRE(mean_acc_y <= 0. + std_acc_y / sqrt(num_test)); REQUIRE(mean_acc_y >= 0. - std_acc_y / sqrt(num_test)); REQUIRE(mean_acc_z <= 0. + std_acc_z / sqrt(num_test)); REQUIRE(mean_acc_z >= 0. - std_acc_z / sqrt(num_test)); } double expected_std_x_y = sqrt(trap_parameters.Bz*trap_parameters.Bz * gs*gs * muB*muB / (48. * mass*mass)); double expected_std_z = sqrt(trap_parameters.Bz*trap_parameters.Bz * gs*gs * muB*muB / (12. * mass*mass)); THEN("The standard deviation in each direction should be given by blah") { REQUIRE(std_acc_x <= expected_std_x_y + std_acc_x / sqrt(num_test)); REQUIRE(std_acc_x >= expected_std_x_y - std_acc_x / sqrt(num_test)); REQUIRE(std_acc_y <= expected_std_x_y + std_acc_y / sqrt(num_test)); REQUIRE(std_acc_y >= expected_std_x_y - std_acc_y / sqrt(num_test)); REQUIRE(std_acc_z <= expected_std_z + std_acc_z / sqrt(num_test)); REQUIRE(std_acc_z >= expected_std_z - std_acc_z / sqrt(num_test)); } hipFree(d_test_acc); free(test_acc); } hipFree(d_pos); } } SCENARIO("[DEVICE] Velocity Update", "[d-vel]") { GIVEN("A thermal distribution of 5000 positions, help in a quadrupole trap with a Bz = 2.0") { double init_T = 20.e-6; int num_test = 5000; // Initialise trapping parameters trap_geo trap_parameters; trap_parameters.Bz = 2.0; trap_parameters.B0 = 0.; // Initialise rng hiprandState_t *state; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&state), num_test*sizeof(hiprandState_t))); initialise_rng_states(num_test, state, false); // Initialise positions double3 *d_pos; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_pos), num_test*sizeof(double3))); // Generate velocity distribution generate_thermal_positions(num_test, 20.e-6, trap_parameters, state, d_pos); // Initialise accelerations double3 *d_acc; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_acc), num_test*sizeof(double3))); // Generate accelerations update_accelerations(num_test, trap_parameters, d_pos, d_acc); WHEN("The update_velocities function is called with dt=1.e-6") { double dt = 1.e-6; // Initialise velocities double3 *d_test_vel; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_test_vel), num_test*sizeof(double3))); // Generate velocity distribution generate_thermal_velocities(num_test, init_T, state, d_test_vel); double3 *test_vel; test_vel = reinterpret_cast<double3*>(calloc(num_test, sizeof(double3))); checkCudaErrors(hipMemcpy(test_vel, d_test_vel, num_test*sizeof(double3), hipMemcpyDeviceToHost)); double initial_kinetic_energy = mean_kinetic_energy(num_test, test_vel); hipblasHandle_t cublas_handle; checkCudaErrors(hipblasCreate(&cublas_handle)); update_velocities(num_test, dt, cublas_handle, d_acc, d_test_vel); hipblasDestroy(cublas_handle); checkCudaErrors(hipMemcpy(test_vel, d_test_vel, num_test*sizeof(double3), hipMemcpyDeviceToHost)); double final_kinetic_energy = mean_kinetic_energy(num_test, test_vel); THEN("The change in kinetic energy should be 0") { REQUIRE(final_kinetic_energy - initial_kinetic_energy > -tol); REQUIRE(final_kinetic_energy - initial_kinetic_energy < tol); } hipFree(d_test_vel); } hipFree(d_pos); hipFree(d_acc); } }
b66292b16b7bc6ea2e806b9e33304494f8377b78.cu
#include "distribution_evolution_tests.cuh" double tol = 1.e-6; SCENARIO("[DEVICE] Acceleration Update", "[d-acc]") { GIVEN("A thermal distribution of 5000 positions, help in a quadrupole trap with a Bz = 2.0") { int num_test = 5000; // Initialise trapping parameters trap_geo trap_parameters; trap_parameters.Bz = 2.0; trap_parameters.B0 = 0.; // Initialise rng curandState *state; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&state), num_test*sizeof(curandState))); initialise_rng_states(num_test, state, false); // Initialise positions double3 *d_pos; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_pos), num_test*sizeof(double3))); // Generate velocity distribution generate_thermal_positions(num_test, 20.e-6, trap_parameters, state, d_pos); WHEN("The update_atom_accelerations function is called") { // Initialise accelerations double3 *d_test_acc; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_test_acc), num_test*sizeof(double3))); // Generate accelerations update_accelerations(num_test, trap_parameters, d_pos, d_test_acc);; double3 *test_acc; test_acc = reinterpret_cast<double3*>(calloc(num_test, sizeof(double3))); checkCudaErrors(cudaMemcpy(test_acc, d_test_acc, num_test*sizeof(double3), cudaMemcpyDeviceToHost)); double mean_acc_x = mean_x(test_acc, num_test); double mean_acc_y = mean_y(test_acc, num_test); double mean_acc_z = mean_z(test_acc, num_test); double std_acc_x = std_dev_x(test_acc, num_test); double std_acc_y = std_dev_y(test_acc, num_test); double std_acc_z = std_dev_z(test_acc, num_test); THEN("The mean in each direction should be 0.") { REQUIRE(mean_acc_x <= 0. + std_acc_x / sqrt(num_test)); REQUIRE(mean_acc_x >= 0. - std_acc_x / sqrt(num_test)); REQUIRE(mean_acc_y <= 0. + std_acc_y / sqrt(num_test)); REQUIRE(mean_acc_y >= 0. - std_acc_y / sqrt(num_test)); REQUIRE(mean_acc_z <= 0. + std_acc_z / sqrt(num_test)); REQUIRE(mean_acc_z >= 0. - std_acc_z / sqrt(num_test)); } double expected_std_x_y = sqrt(trap_parameters.Bz*trap_parameters.Bz * gs*gs * muB*muB / (48. * mass*mass)); double expected_std_z = sqrt(trap_parameters.Bz*trap_parameters.Bz * gs*gs * muB*muB / (12. * mass*mass)); THEN("The standard deviation in each direction should be given by blah") { REQUIRE(std_acc_x <= expected_std_x_y + std_acc_x / sqrt(num_test)); REQUIRE(std_acc_x >= expected_std_x_y - std_acc_x / sqrt(num_test)); REQUIRE(std_acc_y <= expected_std_x_y + std_acc_y / sqrt(num_test)); REQUIRE(std_acc_y >= expected_std_x_y - std_acc_y / sqrt(num_test)); REQUIRE(std_acc_z <= expected_std_z + std_acc_z / sqrt(num_test)); REQUIRE(std_acc_z >= expected_std_z - std_acc_z / sqrt(num_test)); } cudaFree(d_test_acc); free(test_acc); } cudaFree(d_pos); } } SCENARIO("[DEVICE] Velocity Update", "[d-vel]") { GIVEN("A thermal distribution of 5000 positions, help in a quadrupole trap with a Bz = 2.0") { double init_T = 20.e-6; int num_test = 5000; // Initialise trapping parameters trap_geo trap_parameters; trap_parameters.Bz = 2.0; trap_parameters.B0 = 0.; // Initialise rng curandState *state; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&state), num_test*sizeof(curandState))); initialise_rng_states(num_test, state, false); // Initialise positions double3 *d_pos; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_pos), num_test*sizeof(double3))); // Generate velocity distribution generate_thermal_positions(num_test, 20.e-6, trap_parameters, state, d_pos); // Initialise accelerations double3 *d_acc; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_acc), num_test*sizeof(double3))); // Generate accelerations update_accelerations(num_test, trap_parameters, d_pos, d_acc); WHEN("The update_velocities function is called with dt=1.e-6") { double dt = 1.e-6; // Initialise velocities double3 *d_test_vel; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_test_vel), num_test*sizeof(double3))); // Generate velocity distribution generate_thermal_velocities(num_test, init_T, state, d_test_vel); double3 *test_vel; test_vel = reinterpret_cast<double3*>(calloc(num_test, sizeof(double3))); checkCudaErrors(cudaMemcpy(test_vel, d_test_vel, num_test*sizeof(double3), cudaMemcpyDeviceToHost)); double initial_kinetic_energy = mean_kinetic_energy(num_test, test_vel); cublasHandle_t cublas_handle; checkCudaErrors(cublasCreate(&cublas_handle)); update_velocities(num_test, dt, cublas_handle, d_acc, d_test_vel); cublasDestroy(cublas_handle); checkCudaErrors(cudaMemcpy(test_vel, d_test_vel, num_test*sizeof(double3), cudaMemcpyDeviceToHost)); double final_kinetic_energy = mean_kinetic_energy(num_test, test_vel); THEN("The change in kinetic energy should be 0") { REQUIRE(final_kinetic_energy - initial_kinetic_energy > -tol); REQUIRE(final_kinetic_energy - initial_kinetic_energy < tol); } cudaFree(d_test_vel); } cudaFree(d_pos); cudaFree(d_acc); } }
7e77a07139e6792e61acc1a2c432f95f7872ffbb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "orttraining/training_ops/cuda/math/scale.h" namespace onnxruntime { namespace cuda { template <typename T, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void _Scale( const T* input_data, const T scale_value, T* output_data, CUDA_LONG N) { CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; T input_value[NumElementsPerThread]; CUDA_LONG id = start; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { input_value[i] = input_data[id]; id += NumThreadsPerBlock; } } id = start; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output_data[id] = input_value[i] * scale_value; id += NumThreadsPerBlock; } } } template <typename T> void Impl_Scale( hipStream_t stream, const T* input_data, const float scale_value, T* output_data, size_t count) { int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _Scale<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, static_cast<T>(scale_value), output_data, N); } #define SPECIALIZE_SCALE_IMPL(T) \ template void Impl_Scale<T>( \ hipStream_t stream, \ const T* input_data, \ const float scale_value, \ T* output_data, \ size_t count); SPECIALIZE_SCALE_IMPL(half) SPECIALIZE_SCALE_IMPL(float) SPECIALIZE_SCALE_IMPL(double) } // namespace cuda } // namespace onnxruntime
7e77a07139e6792e61acc1a2c432f95f7872ffbb.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "orttraining/training_ops/cuda/math/scale.h" namespace onnxruntime { namespace cuda { template <typename T, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void _Scale( const T* input_data, const T scale_value, T* output_data, CUDA_LONG N) { CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; T input_value[NumElementsPerThread]; CUDA_LONG id = start; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { input_value[i] = input_data[id]; id += NumThreadsPerBlock; } } id = start; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N) { output_data[id] = input_value[i] * scale_value; id += NumThreadsPerBlock; } } } template <typename T> void Impl_Scale( cudaStream_t stream, const T* input_data, const float scale_value, T* output_data, size_t count) { int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _Scale<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input_data, static_cast<T>(scale_value), output_data, N); } #define SPECIALIZE_SCALE_IMPL(T) \ template void Impl_Scale<T>( \ cudaStream_t stream, \ const T* input_data, \ const float scale_value, \ T* output_data, \ size_t count); SPECIALIZE_SCALE_IMPL(half) SPECIALIZE_SCALE_IMPL(float) SPECIALIZE_SCALE_IMPL(double) } // namespace cuda } // namespace onnxruntime
197e0579529b5d0e900ec9913785cb6747d26908.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef EVOLUTION_AUX_H #define EVOLUTION_AUX_H #include "evolutionUtils.h" #include "cudaMath.h" #include "coriolisUtils.h" #ifdef printf #undef printf #endif static __global__ void _print_constant_memory_() { printf(" %f %f %f %d\n", r1_dev.left, r1_dev.dr, r1_dev.mass, r1_dev.n); printf(" %f %f %f %d\n", r2_dev.left, r2_dev.dr, r2_dev.mass, r2_dev.n); for(int i = 0; i < 500; i+=10) printf("%d %18.15f %18.15f\n", i+1, r1_dev.dump[i], r2_dev.dump[i]); } static __global__ void _print_gradient_coeffients_(const int n) { for(int i = 0; i < n; i++) printf(" %d %18.15e\n", i, gradient_coeffients_dev[i]); } static __global__ void _print_energies_(const int n) { for(int i = 0; i < n; i++) printf(" %d %18.15f\n", i, energies_dev[i]); } static __global__ void _psi_times_kinetic_energy_(Complex *psi_out, const Complex *psi_in, const int n1, const int n2, const int n_theta) { extern __shared__ double kinetic_data[]; double *kin1 = (double *) kinetic_data; double *kin2 = &kin1[n1/2+1]; cudaMath::setup_kinetic_energy_for_fft_nonnegative(kin1, r1_dev.n, r1_dev.n*r1_dev.dr, r1_dev.mass); cudaMath::setup_kinetic_energy_for_fft(kin2, r2_dev.n, r2_dev.n*r2_dev.dr, r2_dev.mass); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < (n1/2+1)*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, n1/2+1, n2, n_theta, i, j, k); psi_out[index] = (kin1[i] + kin2[j])*psi_in[index]; } } static __global__ void _add_T_radial_weighted_psi_to_H_weighted_psi_(double *HPsi, const double *TRadPsi, const int n1, const int n2, const int n_theta) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < (n1/2+1)*2*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, (n1/2+1)*2, n2, n_theta, i, j, k); if(i < n1) { const int index2 = cudaUtils::ijk_2_index(n1, n2, n_theta, i, j, k); HPsi[index2] += TRadPsi[index]/(n1*n2); } } } static __global__ void _add_potential_weighted_psi_to_H_weighted_psi_(double *HPsi, const double *psi, const double *pot, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) HPsi[index] += pot[index]*psi[index]; } static __global__ void _add_T_bend_T_sym_to_T_angle_legendre_psi_dev_(double *TangPsi, const double *psi, const int n1, const int n2, const int nLegs, const int J, const int omega, const int a, const int b) { extern __shared__ double rotational_moments[]; double *I1 = rotational_moments; double *I2 = &I1[n1]; double &Tsym = I2[n2]; cudaMath::setup_moments_of_inertia(I1, r1_dev.n, r1_dev.left, r1_dev.dr, r1_dev.mass); cudaMath::setup_moments_of_inertia(I2, r2_dev.n, r2_dev.left, r2_dev.dr, r2_dev.mass); if(threadIdx.x == 0) Tsym = double(J*(J+1) - 2*omega*omega); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*nLegs) { int i = -1; int j = -1; int l = -1; cudaUtils::index_2_ijk(index, n1, n2, nLegs, i, j, l); // l += omega; l = a*l + b; TangPsi[index] += ((I1[i]+I2[j])*l*(l+1) + I1[i]*Tsym)*psi[index]; } } static __global__ void _add_T_asym_to_T_angle_legendre_psi_dev_(double *TangPsi, const double *psi, const int n1, const int n2, const int nLegs, const int J, const int Omega, const int Omega1, const int OmegaMax, const int a, const int b) { extern __shared__ double I1[]; cudaMath::setup_moments_of_inertia(I1, r1_dev.n, r1_dev.left, r1_dev.dr, r1_dev.mass); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*nLegs) { int i = -1; int j = -1; int l = -1; cudaUtils::index_2_ijk(index, n1, n2, nLegs, i, j, l); //l += OmegaMax; l = a*l + b; const double c = coriolisUtils::coriolis(J, l, Omega, Omega1); TangPsi[index] += I1[i]*c*psi[index]; } } static __global__ void _dump_wavepacket_(double *psi, const int n1, const int n2, const int n_theta) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, n1, n2, n_theta, i, j, k); psi[index] *= r1_dev.dump[i]*r2_dev.dump[j]; } } static __global__ void _daxpy_(double *y, const double *x, const double alpha, const double beta, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) y[index] = alpha*x[index] + beta*y[index]; } static __global__ void _setup_potential_scale_(int *scale, const double *pot_dev, const double cutoff, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) scale[index] = pot_dev[index] < cutoff ? 1 : 0; } static __global__ void _scale_wavepacket_with_potential_cutoff_(double *psi, const double *potential, const double cutoff, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) if(potential[index] > cutoff) psi[index] = 0.0; } static __global__ void _psi_time_to_fai_energy_on_dividing_surface_ (const int n, const int n_energies, const double t, const double dt, const double *psi_real_dev, const double *psi_imag_dev, const double *d_psi_real_dev, const double *d_psi_imag_dev, Complex *fai_dev, Complex *d_fai_dev) { extern __shared__ Complex expIEtDt[]; for(int i = threadIdx.x; i < n_energies; i += blockDim.x) expIEtDt[i] = exp(Complex(0.0, t*energies_dev[i]))*dt; __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n*n_energies) { int i = -1; int iE = -1; cudaUtils::index_2_ij(index, n, n_energies, i, iE); fai_dev[index] += expIEtDt[iE]*Complex(psi_real_dev[i], psi_imag_dev[i]); d_fai_dev[index] += expIEtDt[iE]*Complex(d_psi_real_dev[i], d_psi_imag_dev[i]); } } #endif /* EVOLUTION_AUX_H */
197e0579529b5d0e900ec9913785cb6747d26908.cu
#ifndef EVOLUTION_AUX_H #define EVOLUTION_AUX_H #include "evolutionUtils.h" #include "cudaMath.h" #include "coriolisUtils.h" #ifdef printf #undef printf #endif static __global__ void _print_constant_memory_() { printf(" %f %f %f %d\n", r1_dev.left, r1_dev.dr, r1_dev.mass, r1_dev.n); printf(" %f %f %f %d\n", r2_dev.left, r2_dev.dr, r2_dev.mass, r2_dev.n); for(int i = 0; i < 500; i+=10) printf("%d %18.15f %18.15f\n", i+1, r1_dev.dump[i], r2_dev.dump[i]); } static __global__ void _print_gradient_coeffients_(const int n) { for(int i = 0; i < n; i++) printf(" %d %18.15e\n", i, gradient_coeffients_dev[i]); } static __global__ void _print_energies_(const int n) { for(int i = 0; i < n; i++) printf(" %d %18.15f\n", i, energies_dev[i]); } static __global__ void _psi_times_kinetic_energy_(Complex *psi_out, const Complex *psi_in, const int n1, const int n2, const int n_theta) { extern __shared__ double kinetic_data[]; double *kin1 = (double *) kinetic_data; double *kin2 = &kin1[n1/2+1]; cudaMath::setup_kinetic_energy_for_fft_nonnegative(kin1, r1_dev.n, r1_dev.n*r1_dev.dr, r1_dev.mass); cudaMath::setup_kinetic_energy_for_fft(kin2, r2_dev.n, r2_dev.n*r2_dev.dr, r2_dev.mass); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < (n1/2+1)*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, n1/2+1, n2, n_theta, i, j, k); psi_out[index] = (kin1[i] + kin2[j])*psi_in[index]; } } static __global__ void _add_T_radial_weighted_psi_to_H_weighted_psi_(double *HPsi, const double *TRadPsi, const int n1, const int n2, const int n_theta) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < (n1/2+1)*2*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, (n1/2+1)*2, n2, n_theta, i, j, k); if(i < n1) { const int index2 = cudaUtils::ijk_2_index(n1, n2, n_theta, i, j, k); HPsi[index2] += TRadPsi[index]/(n1*n2); } } } static __global__ void _add_potential_weighted_psi_to_H_weighted_psi_(double *HPsi, const double *psi, const double *pot, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) HPsi[index] += pot[index]*psi[index]; } static __global__ void _add_T_bend_T_sym_to_T_angle_legendre_psi_dev_(double *TangPsi, const double *psi, const int n1, const int n2, const int nLegs, const int J, const int omega, const int a, const int b) { extern __shared__ double rotational_moments[]; double *I1 = rotational_moments; double *I2 = &I1[n1]; double &Tsym = I2[n2]; cudaMath::setup_moments_of_inertia(I1, r1_dev.n, r1_dev.left, r1_dev.dr, r1_dev.mass); cudaMath::setup_moments_of_inertia(I2, r2_dev.n, r2_dev.left, r2_dev.dr, r2_dev.mass); if(threadIdx.x == 0) Tsym = double(J*(J+1) - 2*omega*omega); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*nLegs) { int i = -1; int j = -1; int l = -1; cudaUtils::index_2_ijk(index, n1, n2, nLegs, i, j, l); // l += omega; l = a*l + b; TangPsi[index] += ((I1[i]+I2[j])*l*(l+1) + I1[i]*Tsym)*psi[index]; } } static __global__ void _add_T_asym_to_T_angle_legendre_psi_dev_(double *TangPsi, const double *psi, const int n1, const int n2, const int nLegs, const int J, const int Omega, const int Omega1, const int OmegaMax, const int a, const int b) { extern __shared__ double I1[]; cudaMath::setup_moments_of_inertia(I1, r1_dev.n, r1_dev.left, r1_dev.dr, r1_dev.mass); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*nLegs) { int i = -1; int j = -1; int l = -1; cudaUtils::index_2_ijk(index, n1, n2, nLegs, i, j, l); //l += OmegaMax; l = a*l + b; const double c = coriolisUtils::coriolis(J, l, Omega, Omega1); TangPsi[index] += I1[i]*c*psi[index]; } } static __global__ void _dump_wavepacket_(double *psi, const int n1, const int n2, const int n_theta) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, n1, n2, n_theta, i, j, k); psi[index] *= r1_dev.dump[i]*r2_dev.dump[j]; } } static __global__ void _daxpy_(double *y, const double *x, const double alpha, const double beta, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) y[index] = alpha*x[index] + beta*y[index]; } static __global__ void _setup_potential_scale_(int *scale, const double *pot_dev, const double cutoff, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) scale[index] = pot_dev[index] < cutoff ? 1 : 0; } static __global__ void _scale_wavepacket_with_potential_cutoff_(double *psi, const double *potential, const double cutoff, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) if(potential[index] > cutoff) psi[index] = 0.0; } static __global__ void _psi_time_to_fai_energy_on_dividing_surface_ (const int n, const int n_energies, const double t, const double dt, const double *psi_real_dev, const double *psi_imag_dev, const double *d_psi_real_dev, const double *d_psi_imag_dev, Complex *fai_dev, Complex *d_fai_dev) { extern __shared__ Complex expIEtDt[]; for(int i = threadIdx.x; i < n_energies; i += blockDim.x) expIEtDt[i] = exp(Complex(0.0, t*energies_dev[i]))*dt; __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n*n_energies) { int i = -1; int iE = -1; cudaUtils::index_2_ij(index, n, n_energies, i, iE); fai_dev[index] += expIEtDt[iE]*Complex(psi_real_dev[i], psi_imag_dev[i]); d_fai_dev[index] += expIEtDt[iE]*Complex(d_psi_real_dev[i], d_psi_imag_dev[i]); } } #endif /* EVOLUTION_AUX_H */
23febce1768bfbb1a38c66786e32063e7f10ffa1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <helper_timer.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void addKernel(const int *a, const int *b, int *c, int size) { int i = threadIdx.x + blockDim.x * blockIdx.x; float _a = a[i]; float _b = b[i]; float sum = _a + _b * 2.0f; float mul = _a * (_a + _b); float val = (sum + mul) * 5.35f; float val2= (sum * 8.06f) + sqrtf(val) * (mul + 6.36f); c[i] = sqrt(val2) * 44.87f; } void task4() { const int VALUE = 100; int someConstant = 1024; int size = 0; int block = 0; int *a = 0; int *b = 0; int *c = 0; int i = 0; int j = 0; int sum = 0; int sum2 = 0; int *aOnDevice = 0; int *bOnDevice = 0; int *cOnDevice = 0; do { printf("Type vector size: "); scanf("%d", &size); if (size <= 0) break; printf("\nType size of block: "); scanf("%d", &someConstant); if (size % someConstant == 0) { block = size / someConstant; } else { block = size / someConstant + 1; } a = (int*)malloc(sizeof(int) * size); b = (int*)malloc(sizeof(int) * size); c = (int*)malloc(sizeof(int) * size); for(i = 0; i < size; ++i) { a[i] = 1; b[i] = 1; } hipSetDevice(0); hipMalloc((void**)&aOnDevice, size * sizeof(int)); hipMalloc((void**)&bOnDevice, size * sizeof(int)); hipMalloc((void**)&cOnDevice, size * sizeof(int)); hipMemcpy(aOnDevice, a, size * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(bOnDevice, b, size * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cOnDevice, c, size * sizeof(int), hipMemcpyHostToDevice); dim3 gridDims = dim3(block, 1, 1); dim3 blockDims = dim3(someConstant, 1, 1); StopWatchInterface* hTimer; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); hipDeviceSynchronize(); hipLaunchKernelGGL(( addKernel), dim3(gridDims),dim3(blockDims), 0, 0, aOnDevice, bOnDevice, cOnDevice, size); sdkStartTimer(&hTimer); for(i = 0; i < VALUE; ++i) { hipLaunchKernelGGL(( addKernel), dim3(gridDims),dim3(blockDims), 0, 0, aOnDevice, bOnDevice, cOnDevice, size); } hipDeviceSynchronize(); sdkStopTimer(&hTimer); hipMemcpy(c, cOnDevice, size * sizeof(int), hipMemcpyDeviceToHost); float time1 = sdkGetTimerValue(&hTimer) / VALUE; sdkResetTimer(&hTimer); hipFree(aOnDevice); hipFree(bOnDevice); sum = c[0]; for(i = 1; i < size; ++i) { sum += c[i]; } sdkStartTimer(&hTimer); for(i = 0; i < size; ++i) { float _a = a[i]; float _b = b[i]; float sum = _a + _b * 2.0f; float mul = _a * (_a + _b); float val = (sum + mul) * 5.35f; float val2 = (sum * 8.06f) + sqrtf(val) * (mul + 6.36f); c[i] = sqrt(val2) * 44.87f; } sdkStopTimer(&hTimer); float time2 = sdkGetTimerValue(&hTimer); sum2 = 0; for(i = 0; i < size; ++i) { sum2 += c[i]; } if (sum == sum2) { printf("OK!\n"); } printf("CUDA: %f, CPU: %f \n", time1, time2); sdkResetTimer(&hTimer); hipDeviceReset(); free(a); free(b); free(c); } while(size > 0); }
23febce1768bfbb1a38c66786e32063e7f10ffa1.cu
#include <stdio.h> #include <helper_timer.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void addKernel(const int *a, const int *b, int *c, int size) { int i = threadIdx.x + blockDim.x * blockIdx.x; float _a = a[i]; float _b = b[i]; float sum = _a + _b * 2.0f; float mul = _a * (_a + _b); float val = (sum + mul) * 5.35f; float val2= (sum * 8.06f) + sqrtf(val) * (mul + 6.36f); c[i] = sqrt(val2) * 44.87f; } void task4() { const int VALUE = 100; int someConstant = 1024; int size = 0; int block = 0; int *a = 0; int *b = 0; int *c = 0; int i = 0; int j = 0; int sum = 0; int sum2 = 0; int *aOnDevice = 0; int *bOnDevice = 0; int *cOnDevice = 0; do { printf("Type vector size: "); scanf("%d", &size); if (size <= 0) break; printf("\nType size of block: "); scanf("%d", &someConstant); if (size % someConstant == 0) { block = size / someConstant; } else { block = size / someConstant + 1; } a = (int*)malloc(sizeof(int) * size); b = (int*)malloc(sizeof(int) * size); c = (int*)malloc(sizeof(int) * size); for(i = 0; i < size; ++i) { a[i] = 1; b[i] = 1; } cudaSetDevice(0); cudaMalloc((void**)&aOnDevice, size * sizeof(int)); cudaMalloc((void**)&bOnDevice, size * sizeof(int)); cudaMalloc((void**)&cOnDevice, size * sizeof(int)); cudaMemcpy(aOnDevice, a, size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(bOnDevice, b, size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cOnDevice, c, size * sizeof(int), cudaMemcpyHostToDevice); dim3 gridDims = dim3(block, 1, 1); dim3 blockDims = dim3(someConstant, 1, 1); StopWatchInterface* hTimer; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); cudaDeviceSynchronize(); addKernel<<<gridDims,blockDims>>>(aOnDevice, bOnDevice, cOnDevice, size); sdkStartTimer(&hTimer); for(i = 0; i < VALUE; ++i) { addKernel<<<gridDims,blockDims>>>(aOnDevice, bOnDevice, cOnDevice, size); } cudaDeviceSynchronize(); sdkStopTimer(&hTimer); cudaMemcpy(c, cOnDevice, size * sizeof(int), cudaMemcpyDeviceToHost); float time1 = sdkGetTimerValue(&hTimer) / VALUE; sdkResetTimer(&hTimer); cudaFree(aOnDevice); cudaFree(bOnDevice); sum = c[0]; for(i = 1; i < size; ++i) { sum += c[i]; } sdkStartTimer(&hTimer); for(i = 0; i < size; ++i) { float _a = a[i]; float _b = b[i]; float sum = _a + _b * 2.0f; float mul = _a * (_a + _b); float val = (sum + mul) * 5.35f; float val2 = (sum * 8.06f) + sqrtf(val) * (mul + 6.36f); c[i] = sqrt(val2) * 44.87f; } sdkStopTimer(&hTimer); float time2 = sdkGetTimerValue(&hTimer); sum2 = 0; for(i = 0; i < size; ++i) { sum2 += c[i]; } if (sum == sum2) { printf("OK!\n"); } printf("CUDA: %f, CPU: %f \n", time1, time2); sdkResetTimer(&hTimer); cudaDeviceReset(); free(a); free(b); free(c); } while(size > 0); }
03a3e73e13063861a514e05601b985fd3347e1e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * cuLsoda_kernel.cu * cuLsoda * */ #ifndef _CULSODA_CU_H_ #define _CULSODA_CU_H_ #include "cuLsoda.cu.h" #define REAL double template<typename Fex, typename Jex> __global__ void Sanders(Fex fex, int *neq, REAL *y, REAL *t, REAL *tout, int *itol, REAL *rtol, REAL *atol, int *itask, int *istate, int *iopt, REAL *rwork, int *lrw, int *iwork, int *liw, Jex jac, int *jt, struct cuLsodaCommonBlock *common, int *err, int probSize) { int me = threadIdx.x + blockIdx.x * blockDim.x; err[me] = dlsoda_(fex, &neq[me], &y[4*me], &t[me], &tout[me], &itol[me], &rtol[me], &atol[me], &itask[me], &istate[me], &iopt[me], &rwork[86*me], &lrw[me], &iwork[24*me], &liw[me], jac, &jt[me], &common[me]); __syncthreads(); } #endif
03a3e73e13063861a514e05601b985fd3347e1e2.cu
/* * cuLsoda_kernel.cu * cuLsoda * */ #ifndef _CULSODA_CU_H_ #define _CULSODA_CU_H_ #include "cuLsoda.cu.h" #define REAL double template<typename Fex, typename Jex> __global__ void Sanders(Fex fex, int *neq, REAL *y, REAL *t, REAL *tout, int *itol, REAL *rtol, REAL *atol, int *itask, int *istate, int *iopt, REAL *rwork, int *lrw, int *iwork, int *liw, Jex jac, int *jt, struct cuLsodaCommonBlock *common, int *err, int probSize) { int me = threadIdx.x + blockIdx.x * blockDim.x; err[me] = dlsoda_(fex, &neq[me], &y[4*me], &t[me], &tout[me], &itol[me], &rtol[me], &atol[me], &itask[me], &istate[me], &iopt[me], &rwork[86*me], &lrw[me], &iwork[24*me], &liw[me], jac, &jt[me], &common[me]); __syncthreads(); } #endif
e4521b3ac64c537792e0238c99ba5367b1836f1d.hip
// !!! This is a file automatically generated by hipify!!! /*- * Copyright (c) 2017 Tomas Karnagel and Matthias Werner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include <stdio.h> #include <assert.h> #include <iostream> #include <fstream> #include <vector> #include <sys/time.h> #include <sys/stat.h> #include <sys/types.h> #include <string> #include <hip/hip_runtime.h> using namespace std; // --------------------------------- GPU Kernel ------------------------------ template<bool DISRUPT, bool GET_DURATION> static __global__ void tlb_latency_with_disruptor(unsigned int * hashtable, unsigned hashtable_count, unsigned iterations, unsigned stride_count, unsigned offset, int smid0, int smxxx) { extern __shared__ unsigned duration[]; // shared memory should be large enough to fill one SM unsigned smid; asm("mov.u32 %0, %smid;" : "=r"(smid) ); if(!(DISRUPT || smid==smid0)) // only take 1st SM in non-disrupting mode return; if(DISRUPT && smid!=smxxx) // only SMxxx does run in disrupting mode return; if(threadIdx.x!=0) return; unsigned long start; unsigned int sum = 0; unsigned int pos = DISRUPT ? (stride_count*iterations + offset) % hashtable_count : offset; sum += pos; // ensure pos is set before entering loop for (unsigned int i = 0; i < iterations; i++) { start = clock64(); pos = hashtable[pos]; sum += pos; // ensure pos is set before taking clock duration[i] = static_cast<unsigned>(clock64()-start); } if(sum == 0) hashtable[hashtable_count+1] = sum; if(GET_DURATION && smid==smid0) { // only store durations one time for (unsigned int i = 0; i < iterations; i++) { hashtable[hashtable_count+2+i] = duration[i]; } } } // --------------------------------- support functions ------------------------------ #ifndef CUDA_DISABLE_ERROR_CHECKING #define CHECK_CUDA(ans) check_cuda((ans), "", #ans, __FILE__, __LINE__) #define CHECK_LAST(msg) check_cuda(hipGetLastError(), msg, "CHECK_LAST", __FILE__, __LINE__) #else #define CHECK_CUDA(ans) {} #define CHECK_LAST(msg) {} #endif inline void check_cuda(hipError_t code, const char* msg, const char *func, const char *file, int line) { if (code != hipSuccess) { cerr << "CUDA ERROR: " << hipGetErrorString(code) << " in Line " << line << endl; exit(code); } } // --------------------------------- main part ------------------------------ int main(int argc, char **argv) { unsigned int devNo = 0; // ------------- handle inputs ------------ if (argc < 3) { cout << "usage: " << argv[0] << " stride_KB iterations device_No=0" << endl; return 0; } int stride_KB = atoi(argv[1]); int iterations = atoi(argv[2]); if (argc > 3) devNo = atoi(argv[3]); // --------------- init CUDA --------- int devCount; int SMcount = 0; CHECK_CUDA(hipGetDeviceCount(&devCount)); // check Dev Count if (devNo >= devCount){ cout << "Can not choose Dev " << devNo << ", only " << devCount << " GPUs " << endl; exit(0); } CHECK_CUDA(hipSetDevice(devNo)); hipDeviceProp_t props; CHECK_CUDA(hipGetDeviceProperties(&props, devNo)); cout << "#" << props.name << ": cuda " << props.major << "." << props.minor << endl; SMcount = props.multiProcessorCount; // --------------- setup input data --------- size_t hashtable_size_MB = ((iterations+1) * stride_KB * 2) / 1024; CHECK_CUDA(hipDeviceReset()); unsigned int * hashtable; unsigned* hduration = new unsigned [iterations]; size_t N = hashtable_size_MB*1048576llu/sizeof(unsigned int); CHECK_CUDA(hipMalloc(&hashtable, hashtable_size_MB*1048576llu+(iterations+2llu)*sizeof(unsigned int))); // init data unsigned int* hdata = new unsigned int[N+1]; size_t stride_count = stride_KB*1024llu/sizeof(unsigned); for(size_t t=0; t<N; ++t) { hdata[t] = ( t+stride_count ) % N; } hdata[N] = 0; CHECK_CUDA(hipMemcpy(hashtable, hdata, (N+1)*sizeof(unsigned), hipMemcpyHostToDevice)); delete[] hdata; // alloc output space double ** results = new double * [SMcount]; for(int i = 0; i < SMcount; ++i){ results[i] = new double[SMcount]; memset(results[i], 0, sizeof(double) * SMcount); } // --------------- test all SMx to SMy combinations --------- for (int smid0 = 0; smid0 < SMcount; smid0++){ for (int smxxx = 0; smxxx < SMcount; smxxx++) { CHECK_CUDA(hipDeviceSynchronize()); // fill TLB hipLaunchKernelGGL(( tlb_latency_with_disruptor<false, false>), dim3(2*SMcount), dim3(1), iterations*sizeof(unsigned), 0, hashtable, N, iterations, stride_count, 0, smid0,smxxx); // disrupt TLB if TLB is shared hipLaunchKernelGGL(( tlb_latency_with_disruptor<true, false>), dim3(2*SMcount), dim3(1), iterations*sizeof(unsigned), 0, hashtable, N, iterations, stride_count, 0, smid0,smxxx); // check if values in TLB hipLaunchKernelGGL(( tlb_latency_with_disruptor<false, true>), dim3(2*SMcount), dim3(1), iterations*sizeof(unsigned), 0, hashtable, N, iterations, stride_count, 0, smid0,smxxx); CHECK_LAST( "Kernel failed." ); CHECK_CUDA(hipDeviceSynchronize()); // get needed cycles CHECK_CUDA(hipMemcpy(hduration, hashtable+N+2, iterations*sizeof(unsigned), hipMemcpyDeviceToHost)); double avgc=0; for(int b=0; b<iterations;++b) { avgc+=hduration[b]; } results[smid0][smxxx] = avgc; } } CHECK_CUDA(hipFree(hashtable)); delete[] hduration; // ---------------output handling --------- cout << "#----------- absolute values ---------------" << endl; cout << "# "; for (unsigned int steps = 0; steps < SMcount; steps++) cout << steps << " "; cout << endl; for(unsigned int y = 0; y < SMcount; y++){ cout << y << " "; for(unsigned int x = 0; x < SMcount; x++){ cout << (unsigned int) (results[y][x]/iterations); cout << " "; } cout << endl; } cout << "#----------- which SMs share TLB ---------------" << endl; cout << "# "; for (unsigned int steps = 0; steps < SMcount; steps++) cout << steps << " "; cout << endl; for(unsigned int y = 0; y < SMcount; y++){ cout << y << " "; double avg = 0; for(unsigned int x = 0; x < SMcount; x++) avg += (results[y][x]/iterations); // build average and add some buffer avg = (avg / SMcount)+3; // cout << avg << endl; for(unsigned int x = 0; x < SMcount; x++){ if (results[y][x]/iterations > avg) cout << ".X "; else cout << ".. "; } cout << endl; } CHECK_CUDA(hipDeviceReset()); return 0; }
e4521b3ac64c537792e0238c99ba5367b1836f1d.cu
/*- * Copyright (c) 2017 Tomas Karnagel and Matthias Werner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include <stdio.h> #include <assert.h> #include <iostream> #include <fstream> #include <vector> #include <sys/time.h> #include <sys/stat.h> #include <sys/types.h> #include <string> #include <cuda_runtime.h> using namespace std; // --------------------------------- GPU Kernel ------------------------------ template<bool DISRUPT, bool GET_DURATION> static __global__ void tlb_latency_with_disruptor(unsigned int * hashtable, unsigned hashtable_count, unsigned iterations, unsigned stride_count, unsigned offset, int smid0, int smxxx) { extern __shared__ unsigned duration[]; // shared memory should be large enough to fill one SM unsigned smid; asm("mov.u32 %0, %smid;" : "=r"(smid) ); if(!(DISRUPT || smid==smid0)) // only take 1st SM in non-disrupting mode return; if(DISRUPT && smid!=smxxx) // only SMxxx does run in disrupting mode return; if(threadIdx.x!=0) return; unsigned long start; unsigned int sum = 0; unsigned int pos = DISRUPT ? (stride_count*iterations + offset) % hashtable_count : offset; sum += pos; // ensure pos is set before entering loop for (unsigned int i = 0; i < iterations; i++) { start = clock64(); pos = hashtable[pos]; sum += pos; // ensure pos is set before taking clock duration[i] = static_cast<unsigned>(clock64()-start); } if(sum == 0) hashtable[hashtable_count+1] = sum; if(GET_DURATION && smid==smid0) { // only store durations one time for (unsigned int i = 0; i < iterations; i++) { hashtable[hashtable_count+2+i] = duration[i]; } } } // --------------------------------- support functions ------------------------------ #ifndef CUDA_DISABLE_ERROR_CHECKING #define CHECK_CUDA(ans) check_cuda((ans), "", #ans, __FILE__, __LINE__) #define CHECK_LAST(msg) check_cuda(cudaGetLastError(), msg, "CHECK_LAST", __FILE__, __LINE__) #else #define CHECK_CUDA(ans) {} #define CHECK_LAST(msg) {} #endif inline void check_cuda(cudaError_t code, const char* msg, const char *func, const char *file, int line) { if (code != cudaSuccess) { cerr << "CUDA ERROR: " << cudaGetErrorString(code) << " in Line " << line << endl; exit(code); } } // --------------------------------- main part ------------------------------ int main(int argc, char **argv) { unsigned int devNo = 0; // ------------- handle inputs ------------ if (argc < 3) { cout << "usage: " << argv[0] << " stride_KB iterations device_No=0" << endl; return 0; } int stride_KB = atoi(argv[1]); int iterations = atoi(argv[2]); if (argc > 3) devNo = atoi(argv[3]); // --------------- init CUDA --------- int devCount; int SMcount = 0; CHECK_CUDA(cudaGetDeviceCount(&devCount)); // check Dev Count if (devNo >= devCount){ cout << "Can not choose Dev " << devNo << ", only " << devCount << " GPUs " << endl; exit(0); } CHECK_CUDA(cudaSetDevice(devNo)); cudaDeviceProp props; CHECK_CUDA(cudaGetDeviceProperties(&props, devNo)); cout << "#" << props.name << ": cuda " << props.major << "." << props.minor << endl; SMcount = props.multiProcessorCount; // --------------- setup input data --------- size_t hashtable_size_MB = ((iterations+1) * stride_KB * 2) / 1024; CHECK_CUDA(cudaDeviceReset()); unsigned int * hashtable; unsigned* hduration = new unsigned [iterations]; size_t N = hashtable_size_MB*1048576llu/sizeof(unsigned int); CHECK_CUDA(cudaMalloc(&hashtable, hashtable_size_MB*1048576llu+(iterations+2llu)*sizeof(unsigned int))); // init data unsigned int* hdata = new unsigned int[N+1]; size_t stride_count = stride_KB*1024llu/sizeof(unsigned); for(size_t t=0; t<N; ++t) { hdata[t] = ( t+stride_count ) % N; } hdata[N] = 0; CHECK_CUDA(cudaMemcpy(hashtable, hdata, (N+1)*sizeof(unsigned), cudaMemcpyHostToDevice)); delete[] hdata; // alloc output space double ** results = new double * [SMcount]; for(int i = 0; i < SMcount; ++i){ results[i] = new double[SMcount]; memset(results[i], 0, sizeof(double) * SMcount); } // --------------- test all SMx to SMy combinations --------- for (int smid0 = 0; smid0 < SMcount; smid0++){ for (int smxxx = 0; smxxx < SMcount; smxxx++) { CHECK_CUDA(cudaDeviceSynchronize()); // fill TLB tlb_latency_with_disruptor<false, false><<<2*SMcount, 1, iterations*sizeof(unsigned)>>>(hashtable, N, iterations, stride_count, 0, smid0,smxxx); // disrupt TLB if TLB is shared tlb_latency_with_disruptor<true, false><<<2*SMcount, 1, iterations*sizeof(unsigned)>>>(hashtable, N, iterations, stride_count, 0, smid0,smxxx); // check if values in TLB tlb_latency_with_disruptor<false, true><<<2*SMcount, 1, iterations*sizeof(unsigned)>>>(hashtable, N, iterations, stride_count, 0, smid0,smxxx); CHECK_LAST( "Kernel failed." ); CHECK_CUDA(cudaDeviceSynchronize()); // get needed cycles CHECK_CUDA(cudaMemcpy(hduration, hashtable+N+2, iterations*sizeof(unsigned), cudaMemcpyDeviceToHost)); double avgc=0; for(int b=0; b<iterations;++b) { avgc+=hduration[b]; } results[smid0][smxxx] = avgc; } } CHECK_CUDA(cudaFree(hashtable)); delete[] hduration; // ---------------output handling --------- cout << "#----------- absolute values ---------------" << endl; cout << "# "; for (unsigned int steps = 0; steps < SMcount; steps++) cout << steps << " "; cout << endl; for(unsigned int y = 0; y < SMcount; y++){ cout << y << " "; for(unsigned int x = 0; x < SMcount; x++){ cout << (unsigned int) (results[y][x]/iterations); cout << " "; } cout << endl; } cout << "#----------- which SMs share TLB ---------------" << endl; cout << "# "; for (unsigned int steps = 0; steps < SMcount; steps++) cout << steps << " "; cout << endl; for(unsigned int y = 0; y < SMcount; y++){ cout << y << " "; double avg = 0; for(unsigned int x = 0; x < SMcount; x++) avg += (results[y][x]/iterations); // build average and add some buffer avg = (avg / SMcount)+3; // cout << avg << endl; for(unsigned int x = 0; x < SMcount; x++){ if (results[y][x]/iterations > avg) cout << ".X "; else cout << ".. "; } cout << endl; } CHECK_CUDA(cudaDeviceReset()); return 0; }
81e5a52638940e4df2d9319c5c9b8e3cc7e003e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hello_world.hpp" int countGPUs() { int gpu_count; hipGetDeviceCount(&gpu_count); return gpu_count; }
81e5a52638940e4df2d9319c5c9b8e3cc7e003e5.cu
#include "hello_world.hpp" int countGPUs() { int gpu_count; cudaGetDeviceCount(&gpu_count); return gpu_count; }
d629105732c164dadb327463a8a5789a0c04c64a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "SolveSmoothGaussianGlobalKernel3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *u = NULL; hipMalloc(&u, XSIZE*YSIZE); float *v = NULL; hipMalloc(&v, XSIZE*YSIZE); float *bku = NULL; hipMalloc(&bku, XSIZE*YSIZE); float *bkv = NULL; hipMalloc(&bkv, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int stride = 2; float *outputu = NULL; hipMalloc(&outputu, XSIZE*YSIZE); float *outputv = NULL; hipMalloc(&outputv, XSIZE*YSIZE); float *outputbku = NULL; hipMalloc(&outputbku, XSIZE*YSIZE); float *outputbkv = NULL; hipMalloc(&outputbkv, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( SolveSmoothGaussianGlobalKernel3), dim3(gridBlock),dim3(threadBlock), 0, 0, u,v,bku,bkv,width,height,stride,outputu,outputv,outputbku,outputbkv); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( SolveSmoothGaussianGlobalKernel3), dim3(gridBlock),dim3(threadBlock), 0, 0, u,v,bku,bkv,width,height,stride,outputu,outputv,outputbku,outputbkv); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( SolveSmoothGaussianGlobalKernel3), dim3(gridBlock),dim3(threadBlock), 0, 0, u,v,bku,bkv,width,height,stride,outputu,outputv,outputbku,outputbkv); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d629105732c164dadb327463a8a5789a0c04c64a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "SolveSmoothGaussianGlobalKernel3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *u = NULL; cudaMalloc(&u, XSIZE*YSIZE); float *v = NULL; cudaMalloc(&v, XSIZE*YSIZE); float *bku = NULL; cudaMalloc(&bku, XSIZE*YSIZE); float *bkv = NULL; cudaMalloc(&bkv, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int stride = 2; float *outputu = NULL; cudaMalloc(&outputu, XSIZE*YSIZE); float *outputv = NULL; cudaMalloc(&outputv, XSIZE*YSIZE); float *outputbku = NULL; cudaMalloc(&outputbku, XSIZE*YSIZE); float *outputbkv = NULL; cudaMalloc(&outputbkv, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); SolveSmoothGaussianGlobalKernel3<<<gridBlock,threadBlock>>>(u,v,bku,bkv,width,height,stride,outputu,outputv,outputbku,outputbkv); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { SolveSmoothGaussianGlobalKernel3<<<gridBlock,threadBlock>>>(u,v,bku,bkv,width,height,stride,outputu,outputv,outputbku,outputbkv); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { SolveSmoothGaussianGlobalKernel3<<<gridBlock,threadBlock>>>(u,v,bku,bkv,width,height,stride,outputu,outputv,outputbku,outputbkv); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
58d43b6bfb8ab842314239f630cc3a5749c1e017.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "maxChannels.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *Params = NULL; hipMalloc(&Params, XSIZE*YSIZE); const float *dataraw = NULL; hipMalloc(&dataraw, XSIZE*YSIZE); const float *data = NULL; hipMalloc(&data, XSIZE*YSIZE); const int *iC = NULL; hipMalloc(&iC, XSIZE*YSIZE); int *st = NULL; hipMalloc(&st, XSIZE*YSIZE); int *id = NULL; hipMalloc(&id, XSIZE*YSIZE); int *counter = NULL; hipMalloc(&counter, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( maxChannels), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,dataraw,data,iC,st,id,counter); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( maxChannels), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,dataraw,data,iC,st,id,counter); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( maxChannels), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,dataraw,data,iC,st,id,counter); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
58d43b6bfb8ab842314239f630cc3a5749c1e017.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "maxChannels.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *Params = NULL; cudaMalloc(&Params, XSIZE*YSIZE); const float *dataraw = NULL; cudaMalloc(&dataraw, XSIZE*YSIZE); const float *data = NULL; cudaMalloc(&data, XSIZE*YSIZE); const int *iC = NULL; cudaMalloc(&iC, XSIZE*YSIZE); int *st = NULL; cudaMalloc(&st, XSIZE*YSIZE); int *id = NULL; cudaMalloc(&id, XSIZE*YSIZE); int *counter = NULL; cudaMalloc(&counter, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); maxChannels<<<gridBlock,threadBlock>>>(Params,dataraw,data,iC,st,id,counter); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { maxChannels<<<gridBlock,threadBlock>>>(Params,dataraw,data,iC,st,id,counter); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { maxChannels<<<gridBlock,threadBlock>>>(Params,dataraw,data,iC,st,id,counter); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e891a1e8c90c037a1ee82e3df529c35f01901479.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "csr.h" #include "cudaerror.h" #include "cuda_utils.h" #include <vector> #include <numeric> #include <algorithm> #include <iostream> static __global__ void kernelCalcRowLen(unsigned int * rowLen, const unsigned int * rowOffsets, int numRows) { int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < numRows) { rowLen[k] = rowOffsets[k + 1] - rowOffsets[k]; } } //make a GPU deep copy of a CPU csr matrix void ohdSVM::makeCudaCsr(csr_gpu & x_gpu, const csr & x_cpu) { x_gpu.nnz = x_cpu.nnz; x_gpu.numCols = x_cpu.numCols; x_gpu.numRows = x_cpu.numRows; std::cout << "Allocating CSR structure of size " << ((x_gpu.nnz * (sizeof(x_gpu.values) + sizeof(x_gpu.colInd)) + (x_gpu.numRows + 1) * sizeof(x_gpu.rowOffsets) + x_gpu.numRows * sizeof(x_gpu.rowLen)) >> 20) << " MB" << std::endl; assert_cuda(hipMalloc((void **)&(x_gpu.values), x_gpu.nnz * sizeof(float))); assert_cuda(hipMalloc((void **)&(x_gpu.colInd), x_gpu.nnz * sizeof(int))); assert_cuda(hipMalloc((void **)&(x_gpu.rowOffsets), (x_gpu.numRows+1) * sizeof(int))); assert_cuda(hipMalloc((void **)&(x_gpu.rowLen), x_gpu.numRows * sizeof(int))); assert_cuda(hipMemcpy(x_gpu.values, x_cpu.values, x_gpu.nnz * sizeof(float), hipMemcpyHostToDevice)); assert_cuda(hipMemcpy(x_gpu.colInd, x_cpu.colInd, x_gpu.nnz * sizeof(int), hipMemcpyHostToDevice)); assert_cuda(hipMemcpy(x_gpu.rowOffsets, x_cpu.rowOffsets, (x_gpu.numRows+1) * sizeof(int), hipMemcpyHostToDevice)); dim3 dimBlock(256); dim3 dimGrid(getgriddim(x_gpu.numRows, dimBlock.x)); hipLaunchKernelGGL(( kernelCalcRowLen), dim3(dimGrid), dim3(dimBlock), 0, 0, x_gpu.rowLen, x_gpu.rowOffsets, x_gpu.numRows); } void ohdSVM::freeCudaCsr(csr_gpu & x_gpu) { hipFree(x_gpu.values); hipFree(x_gpu.colInd); hipFree(x_gpu.rowOffsets); hipFree(x_gpu.rowLen); x_gpu.values = NULL; x_gpu.colInd = NULL; x_gpu.rowOffsets = NULL; x_gpu.nnz = 0; x_gpu.numRows = 0; x_gpu.numCols = 0; } template<typename T> class IdxComparator { T * v; public: IdxComparator(T * v) : v(v) {} bool operator()(int i1, int i2) { return (*v)[i1] > (*v)[i2]; } }; void ohdSVM::makeCudaJds(jds_gpu & x_gpu, const csr & x_cpu) { x_gpu.nnz = x_cpu.nnz; x_gpu.numCols = x_cpu.numCols; x_gpu.numRows = x_cpu.numRows; std::vector<int> rowLen(x_cpu.numRows); std::adjacent_difference(x_cpu.rowOffsets + 1, x_cpu.rowOffsets + x_cpu.numRows + 1, rowLen.begin()); x_gpu.maxRowLen = *std::max_element(rowLen.begin(), rowLen.end()); std::vector<int> rowPerm(rowLen.size()); std::iota(rowPerm.begin(), rowPerm.end(), 0); std::sort(rowPerm.begin(), rowPerm.end(), IdxComparator<std::vector<int>>(&rowLen)); std::vector<int> rowLenSorted(rowLen.size()); for (int i = 0; i < rowPerm.size(); i++) rowLenSorted[i] = rowLen[rowPerm[i]]; std::cout << "Allocating JDS structure of size " << ((x_gpu.nnz * (sizeof(x_gpu.values) + sizeof(x_gpu.colInd)) + x_gpu.numRows * (sizeof(x_gpu.rowLen) + sizeof(x_gpu.rowPerm)) + x_gpu.numCols * sizeof(x_gpu.colStart)) >> 20) << " MB" << std::endl; assert_cuda(hipMalloc((void **)&(x_gpu.values), x_gpu.nnz * sizeof(float))); assert_cuda(hipMalloc((void **)&(x_gpu.colInd), x_gpu.nnz * sizeof(unsigned int))); assert_cuda(hipMalloc((void **)&(x_gpu.rowLen), x_gpu.numRows * sizeof(unsigned int))); assert_cuda(hipMalloc((void **)&(x_gpu.rowPerm), x_gpu.numRows * sizeof(unsigned int))); assert_cuda(hipMalloc((void **)&(x_gpu.colStart), x_gpu.numCols * sizeof(unsigned int))); assert_cuda(hipMemcpy(x_gpu.rowLen, &rowLenSorted[0], x_gpu.numRows * sizeof(unsigned int), hipMemcpyHostToDevice)); assert_cuda(hipMemcpy(x_gpu.rowPerm, &rowPerm[0], x_gpu.numRows * sizeof(unsigned int), hipMemcpyHostToDevice)); std::vector<float> values_jds(x_gpu.nnz); std::vector<unsigned int> colInd_jds(x_gpu.nnz); std::vector<unsigned int> colStart(x_gpu.maxRowLen); int out_idx = 0; for (int col = 0; col < x_gpu.maxRowLen; col++) { colStart[col] = out_idx; for (int row = 0; row < x_gpu.numRows; row++) { if (rowLenSorted[row] <= col) continue; int i = x_cpu.rowOffsets[rowPerm[row]] + col; values_jds[out_idx] = x_cpu.values[i]; colInd_jds[out_idx] = x_cpu.colInd[i]; out_idx++; } } assert_cuda(hipMemcpy(x_gpu.colStart, &colStart[0], x_gpu.maxRowLen * sizeof(unsigned int), hipMemcpyHostToDevice)); assert_cuda(hipMemcpy(x_gpu.values, &values_jds[0], x_gpu.nnz * sizeof(float), hipMemcpyHostToDevice)); assert_cuda(hipMemcpy(x_gpu.colInd, &colInd_jds[0], x_gpu.nnz * sizeof(unsigned int), hipMemcpyHostToDevice)); } void ohdSVM::freeCudaJds(jds_gpu & x_gpu) { hipFree(x_gpu.values); hipFree(x_gpu.colInd); hipFree(x_gpu.rowLen); hipFree(x_gpu.rowPerm); hipFree(x_gpu.colStart); x_gpu.values = NULL; x_gpu.colInd = NULL; x_gpu.rowLen = NULL; x_gpu.rowPerm = NULL; x_gpu.colStart = NULL; x_gpu.nnz = 0; x_gpu.numRows = 0; x_gpu.numCols = 0; x_gpu.maxRowLen = 0; } void ohdSVM::makeCudaEllrt(ellrt_gpu & x_gpu, const csr & x_cpu, unsigned int sliceSize, unsigned int threadPerRow) { x_gpu.nnz = x_cpu.nnz; x_gpu.numCols = x_cpu.numCols; x_gpu.numRows = x_cpu.numRows; x_gpu.sliceSize = sliceSize; x_gpu.threadPerRow = threadPerRow; unsigned int numSlices = (x_gpu.numRows - 1) / sliceSize + 1; x_gpu.numSlices = numSlices; std::vector<int> rowLen(x_cpu.numRows); std::adjacent_difference(x_cpu.rowOffsets + 1, x_cpu.rowOffsets + x_cpu.numRows + 1, rowLen.begin()); //x_gpu.maxRowLen = *std::max_element(rowLen.begin(), rowLen.end()); size_t bufLen = 0; std::vector<int> sliceWidth(numSlices); for (int i = 0; i < numSlices; i++) { int maxLen = 0; for (int j = 0; j < sliceSize; j++) { int row = sliceSize * i + j; if (row >= x_gpu.numRows) break; if (maxLen < rowLen[row]) maxLen = rowLen[row]; } maxLen = ((maxLen - 1) / threadPerRow + 1) * threadPerRow; sliceWidth[i] = maxLen; bufLen += maxLen * sliceSize; } std::vector<float> values(bufLen, 0.f); std::vector<unsigned int> colInd(bufLen, 0); std::vector<size_t> sliceStart(numSlices); size_t curSliceStart = 0; for (int i = 0; i < numSlices; i++) { sliceStart[i] = curSliceStart; for (int b = 0; b < sliceWidth[i] / threadPerRow; b++) { int out_idx = curSliceStart + threadPerRow * sliceSize * b; for (int j = 0; j < sliceSize; j++) { int row = sliceSize * i + j; if (row >= x_gpu.numRows) break; for (int t = 0; t < threadPerRow; t++) { int col = threadPerRow * b + t; if (col < rowLen[row]) { int idx = x_cpu.rowOffsets[row] + col; values[out_idx] = x_cpu.values[idx]; colInd[out_idx] = x_cpu.colInd[idx]; } out_idx++; } } } curSliceStart += sliceWidth[i] * (size_t)sliceSize; } std::cout << "Allocating EllR-T structure of size " << ((bufLen * (sizeof(x_gpu.values) + sizeof(x_gpu.colInd)) + numSlices * sizeof(x_gpu.sliceStart) + x_gpu.numRows * sizeof(x_gpu.rowLen)) >> 20) << " MB" << std::endl; assert_cuda(hipMalloc((void **)&(x_gpu.values), bufLen * sizeof(float))); assert_cuda(hipMalloc((void **)&(x_gpu.colInd), bufLen * sizeof(unsigned int))); assert_cuda(hipMalloc((void **)&(x_gpu.sliceStart), numSlices * sizeof(size_t))); assert_cuda(hipMalloc((void **)&(x_gpu.rowLen), x_gpu.numRows * sizeof(unsigned int))); assert_cuda(hipMemcpy(x_gpu.values, &values[0], bufLen * sizeof(float), hipMemcpyHostToDevice)); assert_cuda(hipMemcpy(x_gpu.colInd, &colInd[0], bufLen * sizeof(unsigned int), hipMemcpyHostToDevice)); assert_cuda(hipMemcpy(x_gpu.sliceStart, &sliceStart[0], numSlices * sizeof(size_t), hipMemcpyHostToDevice)); assert_cuda(hipMemcpy(x_gpu.rowLen, &rowLen[0], x_gpu.numRows * sizeof(unsigned int), hipMemcpyHostToDevice)); } void ohdSVM::freeCudaEllrt(ellrt_gpu & x_gpu) { hipFree(x_gpu.values); hipFree(x_gpu.colInd); hipFree(x_gpu.sliceStart); hipFree(x_gpu.rowLen); x_gpu.values = NULL; x_gpu.colInd = NULL; x_gpu.rowLen = NULL; x_gpu.nnz = 0; x_gpu.numRows = 0; x_gpu.numCols = 0; }
e891a1e8c90c037a1ee82e3df529c35f01901479.cu
#include "csr.h" #include "cudaerror.h" #include "cuda_utils.h" #include <vector> #include <numeric> #include <algorithm> #include <iostream> static __global__ void kernelCalcRowLen(unsigned int * rowLen, const unsigned int * rowOffsets, int numRows) { int k = blockDim.x * blockIdx.x + threadIdx.x; if (k < numRows) { rowLen[k] = rowOffsets[k + 1] - rowOffsets[k]; } } //make a GPU deep copy of a CPU csr matrix void ohdSVM::makeCudaCsr(csr_gpu & x_gpu, const csr & x_cpu) { x_gpu.nnz = x_cpu.nnz; x_gpu.numCols = x_cpu.numCols; x_gpu.numRows = x_cpu.numRows; std::cout << "Allocating CSR structure of size " << ((x_gpu.nnz * (sizeof(x_gpu.values) + sizeof(x_gpu.colInd)) + (x_gpu.numRows + 1) * sizeof(x_gpu.rowOffsets) + x_gpu.numRows * sizeof(x_gpu.rowLen)) >> 20) << " MB" << std::endl; assert_cuda(cudaMalloc((void **)&(x_gpu.values), x_gpu.nnz * sizeof(float))); assert_cuda(cudaMalloc((void **)&(x_gpu.colInd), x_gpu.nnz * sizeof(int))); assert_cuda(cudaMalloc((void **)&(x_gpu.rowOffsets), (x_gpu.numRows+1) * sizeof(int))); assert_cuda(cudaMalloc((void **)&(x_gpu.rowLen), x_gpu.numRows * sizeof(int))); assert_cuda(cudaMemcpy(x_gpu.values, x_cpu.values, x_gpu.nnz * sizeof(float), cudaMemcpyHostToDevice)); assert_cuda(cudaMemcpy(x_gpu.colInd, x_cpu.colInd, x_gpu.nnz * sizeof(int), cudaMemcpyHostToDevice)); assert_cuda(cudaMemcpy(x_gpu.rowOffsets, x_cpu.rowOffsets, (x_gpu.numRows+1) * sizeof(int), cudaMemcpyHostToDevice)); dim3 dimBlock(256); dim3 dimGrid(getgriddim(x_gpu.numRows, dimBlock.x)); kernelCalcRowLen<<<dimGrid, dimBlock>>>(x_gpu.rowLen, x_gpu.rowOffsets, x_gpu.numRows); } void ohdSVM::freeCudaCsr(csr_gpu & x_gpu) { cudaFree(x_gpu.values); cudaFree(x_gpu.colInd); cudaFree(x_gpu.rowOffsets); cudaFree(x_gpu.rowLen); x_gpu.values = NULL; x_gpu.colInd = NULL; x_gpu.rowOffsets = NULL; x_gpu.nnz = 0; x_gpu.numRows = 0; x_gpu.numCols = 0; } template<typename T> class IdxComparator { T * v; public: IdxComparator(T * v) : v(v) {} bool operator()(int i1, int i2) { return (*v)[i1] > (*v)[i2]; } }; void ohdSVM::makeCudaJds(jds_gpu & x_gpu, const csr & x_cpu) { x_gpu.nnz = x_cpu.nnz; x_gpu.numCols = x_cpu.numCols; x_gpu.numRows = x_cpu.numRows; std::vector<int> rowLen(x_cpu.numRows); std::adjacent_difference(x_cpu.rowOffsets + 1, x_cpu.rowOffsets + x_cpu.numRows + 1, rowLen.begin()); x_gpu.maxRowLen = *std::max_element(rowLen.begin(), rowLen.end()); std::vector<int> rowPerm(rowLen.size()); std::iota(rowPerm.begin(), rowPerm.end(), 0); std::sort(rowPerm.begin(), rowPerm.end(), IdxComparator<std::vector<int>>(&rowLen)); std::vector<int> rowLenSorted(rowLen.size()); for (int i = 0; i < rowPerm.size(); i++) rowLenSorted[i] = rowLen[rowPerm[i]]; std::cout << "Allocating JDS structure of size " << ((x_gpu.nnz * (sizeof(x_gpu.values) + sizeof(x_gpu.colInd)) + x_gpu.numRows * (sizeof(x_gpu.rowLen) + sizeof(x_gpu.rowPerm)) + x_gpu.numCols * sizeof(x_gpu.colStart)) >> 20) << " MB" << std::endl; assert_cuda(cudaMalloc((void **)&(x_gpu.values), x_gpu.nnz * sizeof(float))); assert_cuda(cudaMalloc((void **)&(x_gpu.colInd), x_gpu.nnz * sizeof(unsigned int))); assert_cuda(cudaMalloc((void **)&(x_gpu.rowLen), x_gpu.numRows * sizeof(unsigned int))); assert_cuda(cudaMalloc((void **)&(x_gpu.rowPerm), x_gpu.numRows * sizeof(unsigned int))); assert_cuda(cudaMalloc((void **)&(x_gpu.colStart), x_gpu.numCols * sizeof(unsigned int))); assert_cuda(cudaMemcpy(x_gpu.rowLen, &rowLenSorted[0], x_gpu.numRows * sizeof(unsigned int), cudaMemcpyHostToDevice)); assert_cuda(cudaMemcpy(x_gpu.rowPerm, &rowPerm[0], x_gpu.numRows * sizeof(unsigned int), cudaMemcpyHostToDevice)); std::vector<float> values_jds(x_gpu.nnz); std::vector<unsigned int> colInd_jds(x_gpu.nnz); std::vector<unsigned int> colStart(x_gpu.maxRowLen); int out_idx = 0; for (int col = 0; col < x_gpu.maxRowLen; col++) { colStart[col] = out_idx; for (int row = 0; row < x_gpu.numRows; row++) { if (rowLenSorted[row] <= col) continue; int i = x_cpu.rowOffsets[rowPerm[row]] + col; values_jds[out_idx] = x_cpu.values[i]; colInd_jds[out_idx] = x_cpu.colInd[i]; out_idx++; } } assert_cuda(cudaMemcpy(x_gpu.colStart, &colStart[0], x_gpu.maxRowLen * sizeof(unsigned int), cudaMemcpyHostToDevice)); assert_cuda(cudaMemcpy(x_gpu.values, &values_jds[0], x_gpu.nnz * sizeof(float), cudaMemcpyHostToDevice)); assert_cuda(cudaMemcpy(x_gpu.colInd, &colInd_jds[0], x_gpu.nnz * sizeof(unsigned int), cudaMemcpyHostToDevice)); } void ohdSVM::freeCudaJds(jds_gpu & x_gpu) { cudaFree(x_gpu.values); cudaFree(x_gpu.colInd); cudaFree(x_gpu.rowLen); cudaFree(x_gpu.rowPerm); cudaFree(x_gpu.colStart); x_gpu.values = NULL; x_gpu.colInd = NULL; x_gpu.rowLen = NULL; x_gpu.rowPerm = NULL; x_gpu.colStart = NULL; x_gpu.nnz = 0; x_gpu.numRows = 0; x_gpu.numCols = 0; x_gpu.maxRowLen = 0; } void ohdSVM::makeCudaEllrt(ellrt_gpu & x_gpu, const csr & x_cpu, unsigned int sliceSize, unsigned int threadPerRow) { x_gpu.nnz = x_cpu.nnz; x_gpu.numCols = x_cpu.numCols; x_gpu.numRows = x_cpu.numRows; x_gpu.sliceSize = sliceSize; x_gpu.threadPerRow = threadPerRow; unsigned int numSlices = (x_gpu.numRows - 1) / sliceSize + 1; x_gpu.numSlices = numSlices; std::vector<int> rowLen(x_cpu.numRows); std::adjacent_difference(x_cpu.rowOffsets + 1, x_cpu.rowOffsets + x_cpu.numRows + 1, rowLen.begin()); //x_gpu.maxRowLen = *std::max_element(rowLen.begin(), rowLen.end()); size_t bufLen = 0; std::vector<int> sliceWidth(numSlices); for (int i = 0; i < numSlices; i++) { int maxLen = 0; for (int j = 0; j < sliceSize; j++) { int row = sliceSize * i + j; if (row >= x_gpu.numRows) break; if (maxLen < rowLen[row]) maxLen = rowLen[row]; } maxLen = ((maxLen - 1) / threadPerRow + 1) * threadPerRow; sliceWidth[i] = maxLen; bufLen += maxLen * sliceSize; } std::vector<float> values(bufLen, 0.f); std::vector<unsigned int> colInd(bufLen, 0); std::vector<size_t> sliceStart(numSlices); size_t curSliceStart = 0; for (int i = 0; i < numSlices; i++) { sliceStart[i] = curSliceStart; for (int b = 0; b < sliceWidth[i] / threadPerRow; b++) { int out_idx = curSliceStart + threadPerRow * sliceSize * b; for (int j = 0; j < sliceSize; j++) { int row = sliceSize * i + j; if (row >= x_gpu.numRows) break; for (int t = 0; t < threadPerRow; t++) { int col = threadPerRow * b + t; if (col < rowLen[row]) { int idx = x_cpu.rowOffsets[row] + col; values[out_idx] = x_cpu.values[idx]; colInd[out_idx] = x_cpu.colInd[idx]; } out_idx++; } } } curSliceStart += sliceWidth[i] * (size_t)sliceSize; } std::cout << "Allocating EllR-T structure of size " << ((bufLen * (sizeof(x_gpu.values) + sizeof(x_gpu.colInd)) + numSlices * sizeof(x_gpu.sliceStart) + x_gpu.numRows * sizeof(x_gpu.rowLen)) >> 20) << " MB" << std::endl; assert_cuda(cudaMalloc((void **)&(x_gpu.values), bufLen * sizeof(float))); assert_cuda(cudaMalloc((void **)&(x_gpu.colInd), bufLen * sizeof(unsigned int))); assert_cuda(cudaMalloc((void **)&(x_gpu.sliceStart), numSlices * sizeof(size_t))); assert_cuda(cudaMalloc((void **)&(x_gpu.rowLen), x_gpu.numRows * sizeof(unsigned int))); assert_cuda(cudaMemcpy(x_gpu.values, &values[0], bufLen * sizeof(float), cudaMemcpyHostToDevice)); assert_cuda(cudaMemcpy(x_gpu.colInd, &colInd[0], bufLen * sizeof(unsigned int), cudaMemcpyHostToDevice)); assert_cuda(cudaMemcpy(x_gpu.sliceStart, &sliceStart[0], numSlices * sizeof(size_t), cudaMemcpyHostToDevice)); assert_cuda(cudaMemcpy(x_gpu.rowLen, &rowLen[0], x_gpu.numRows * sizeof(unsigned int), cudaMemcpyHostToDevice)); } void ohdSVM::freeCudaEllrt(ellrt_gpu & x_gpu) { cudaFree(x_gpu.values); cudaFree(x_gpu.colInd); cudaFree(x_gpu.sliceStart); cudaFree(x_gpu.rowLen); x_gpu.values = NULL; x_gpu.colInd = NULL; x_gpu.rowLen = NULL; x_gpu.nnz = 0; x_gpu.numRows = 0; x_gpu.numCols = 0; }
94f7beff3fd99e67685ff3e630c7786d3a5a2636.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : practica2Cuda.cu Author : Sergio Rosello & Csar Gil Version : 0.1 Copyright : If you copy you will fail Description : Optimizaciones usando GPU ============================================================================ */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include "debug_time.h" //#define LADO 100 //lado de la matriz using namespace std; // PARA LA PRACTICA DE CONCURRENCIA /* * 1 - GUARDAR MATRIZ RESULTADO EN FICHERO * 2 - * */ // Matriz identidad void inicializarMatrizIdentidad(float** matriz, int tamFilas, int tamColumnas){ // filas y colunmnas deben ser iguales para una matriz identidad int i, j; for (i = 0; i < tamFilas; i++) for (j = 0; j < tamColumnas; j++) if(i==j){ matriz[i][j] = 1; }else{ matriz[i][j] = 0; } } // Matriz Aleatoria void inicializarMatrizAleatoria(float **matriz, int tamFilas, int tamColumnas) { int i, j; for (i = 0; i < tamFilas; i++){ for (j = 0; j < tamColumnas; j++){ matriz[i][j] = rand() % 10; //matriz[i][j] = 1; } } } void crearMatriz(const char* nombre, bool identidad, int LADO) { int i = 0; //int j = 0; //Inicializamos memoria para la matriz. float **mat = (float **)malloc(sizeof(float *) * LADO); for (i = 0; i < LADO; i++) { mat[i] = (float *)malloc(sizeof(float) * LADO); } //Dependiendo de si es identidad hace una llamada u otra if(identidad){ inicializarMatrizIdentidad(mat, LADO, LADO); }else{ inicializarMatrizAleatoria(mat, LADO, LADO); } //Abrimos el fichero binario mat en modo de escritura. FILE* fich_bin = fopen(nombre, "w"); //Escribimos en el archivo binario apuntado por fich_bin //Volcamos los datos de la matriz en el archivo binario apuntado por fich_bin for (i = 0; i < LADO; i++) fwrite(mat[i], sizeof(int), LADO, fich_bin); fclose(fich_bin); //Liberamos memoria de cada uno de los elementos de la matriz. for (i = 0; i < LADO; i++) { free(mat[i]); } //Liberamos memoria de la matriz. free(mat); } //Guardar matriz resultado en fichero void guardarMatrizResultado(int LADO, float** matriz){ int i = 0; //Abrimos el fichero binario mat en modo de escritura. FILE* fich_bin = fopen("resultado.bin", "w"); //Escribimos en el archivo binario apuntado por fich_bin //Volcamos los datos de la matriz en el archivo binario apuntado por fich_bin for (i = 0; i < LADO; i++){ for (int j = 0; j < LADO; j++){ fwrite(&(matriz[j][i]), sizeof(int), 1, fich_bin); } } fclose(fich_bin); } //Imprime la matriz recibida void imprimirMatriz(float **matriz, int LADO) { for (int i = 0; i < LADO; i++) { for (int j = 0; j < LADO; j++) printf("%.0f ", matriz[i][j]); printf("\n"); } } //lee la matriz de un fichero binario void leerDatosBin(const char *nombreFichero, float ***datos, bool leerTraspuesta, int LADO) { FILE* fichero = fopen(nombreFichero, "r"); //Funciones accesibles: fclose, fread, fwrite float **datosLeidos; //int numFilas, numColumnas; //sint i = 0, j = 0; //Inicializamos un array para guardar todos los datos que leemos del fichero. datosLeidos = (float **)malloc(LADO * sizeof(float*)); //multiplicamos por 4 (bytes que ocupa un float) for (int i = 0; i < LADO; i++) datosLeidos[i] = (float*)malloc(sizeof(float)*LADO); if(!leerTraspuesta) for (int i = 0; i < LADO; i++) fread(datosLeidos[i], sizeof(float), LADO, fichero); else //Leer la matriz de una forma traspuesta. for (int i = 0; i < LADO; i++) for (int j = 0; j < LADO; j++) fread(&(datosLeidos[j][i]), sizeof(float), 1, fichero); //Derreferenciacin. (*datos) = datosLeidos; fclose(fichero); } //lamada de la gpu para multiplicar los vectores __device__ float multiplicarVectores(int lado, float* fila, float* columna){ if(threadIdx.x == 0 && threadIdx.y == 0) printf("Multiplicando vectores\n"); float resultadoAux = 0; for(int i = 0; i<lado; i++){ resultadoAux += fila[i] * columna[i]; } return resultadoAux; } //llamada a la gpu para multiplicar matrices __global__ void kernel_multiplicarMatrices(int lado, float** matriz1, float** matriz2, float** resultado){ //printf("estoy multiplicando\n"); int fila = blockIdx.x * blockDim.y + threadIdx.y; int columna = blockIdx.y * blockDim.x + threadIdx.x; //control de errores del thread if((fila >= lado) || (columna >= lado)){ //printf("ha ocurrido un error en multiplicacion\n"); return; } resultado[fila][columna] = multiplicarVectores(lado, matriz1[fila], matriz2[columna]); printf("cuda thread %d %d %.0f \n",fila,columna,resultado[fila][columna]); } int main(int argc, char** argv){ // inicializacion del debug_time DEBUG_TIME_INIT; DEBUG_TIME_START; //Se asigna el lado de la matriz segun el parametro introducido en la ejecucion int LADO = atoi(argv[1]); //Inicializacin de la semilla para los nmeros aleatorios. srand(time(NULL)); bool leerTraspuesta = true; //CREACION DE LAS MATRICES ALEATORIAS EN UN FICHERO BINARIO crearMatriz("mat.bin", false, LADO); crearMatriz("matIdentidad.bin", true, LADO); //CARGA E INICIALIZACION DE LAS MATRICES //CPU float** matriz1_host; float** matriz2_host; float** matrizResultado_host; //NEXO (memoria intermedia) float** matriz1_nexo; float** matriz2_nexo; float** matrizResultado_nexo; //GPU float** matriz1_device; float** matriz2_device; float** matrizResultado_device; //leemos de fichero binario leerDatosBin("mat.bin", &matriz1_host, leerTraspuesta, LADO); leerDatosBin("matIdentidad.bin", &matriz2_host, !leerTraspuesta, LADO); //IMPRIME LAS MATRICES GENERADAS printf("Se van a generar matrices de %d X %d : \n", LADO, LADO); printf("MATRIZ A: \n\n"); imprimirMatriz(matriz1_host, LADO); printf("MATRIZ B Identidad: \n\n"); imprimirMatriz(matriz2_host, LADO); //Reserva para el resultado del host matrizResultado_host = (float**)malloc(LADO * sizeof(float*)); for(int i=0; i < LADO; i++){ matrizResultado_host[i] = (float*)malloc(LADO * sizeof(float)); } //Reserva de la memoria intermedia matriz1_nexo = (float**)malloc(LADO * sizeof(float*)); matriz2_nexo = (float**)malloc(LADO * sizeof(float*)); matrizResultado_nexo = (float**)malloc(LADO * sizeof(float*)); //Reserva de memoria en GPU hipError_t err1 = hipMalloc((void**)&matriz1_device, sizeof(float*)* LADO); printf("Run Kernel: %s \n", hipGetErrorString(err1)); err1 = hipMalloc((void**)&matriz2_device, sizeof(float*)* LADO); printf("Run Kernel: %s \n", hipGetErrorString(err1)); err1 = hipMalloc((void**)&matrizResultado_device, sizeof(float*)* LADO); printf("Run Kernel: %s \n", hipGetErrorString(err1)); //Reserva de memoria para cada uno de los arrays intermedios for(int i = 0; i < LADO; i++){ err1 = hipMalloc((void**)&matriz1_nexo[i], sizeof(float)* LADO); printf("matriz1_nexo Run Kernel: %s \n", hipGetErrorString(err1)); err1 = hipMalloc((void**)&matriz2_nexo[i], sizeof(float)* LADO); printf("matriz2_nexo Run Kernel: %s \n", hipGetErrorString(err1)); hipMalloc((void**)&(matrizResultado_nexo[i]), sizeof(float)* LADO); } //Copia el contenido de los arrays de CPU a los arrays de la matriz intermedia for(int i = 0; i < LADO; i++){ err1 = hipMemcpy(matriz1_nexo[i], matriz1_host[i], LADO * sizeof(float),hipMemcpyHostToDevice); printf("cudaMemcoy matriz2_host1 a nexo1 Run Kernel: %s \n", hipGetErrorString(err1)); err1 = hipMemcpy(matriz2_nexo[i], matriz2_host[i], LADO * sizeof(float),hipMemcpyHostToDevice); printf("cudaMemcoy matriz2_host2 a nexo2 Run Kernel: %s \n", hipGetErrorString(err1)); } //copia el contenido del array de punteros de CPU a GPU err1 = hipMemcpy(matriz1_device, matriz1_nexo, LADO * sizeof(float*),hipMemcpyHostToDevice); printf("copia de cpu a gpu array de punteros matriz1 Run Kernel: %s \n", hipGetErrorString(err1)); err1 = hipMemcpy(matriz2_device, matriz2_nexo, LADO * sizeof(float*),hipMemcpyHostToDevice); printf("copia de cpu a gpu array de punteros matriz2 Run Kernel: %s \n", hipGetErrorString(err1)); hipMemcpy(matrizResultado_device, matrizResultado_nexo, LADO * sizeof(float*),hipMemcpyHostToDevice); //Operaciones en GPU: // tamBloque = 32 porque los kernels proporcionan las instrucciones en warps (32 threads) //entonces tiene que ser multiplo de 32 para no despediciar threads. // dimensionGrid -> (LADO / tamBloque) + 1 para calcular el numero de bloques para la x y para y, 1 para la z // dimensionBlock -> numero de threads por cada bloque (32*32 = 1024 threads) // https://codeyarns.com/2011/02/16/cuda-dim3/ // http://www.icl.utk.edu/~mgates3/docs/cuda.html int tamBloque = 32; dim3 dimensionGrid = dim3((int)(LADO / tamBloque) + 1, (int)(LADO / tamBloque) + 1, 1); dim3 dimensionBlock = dim3(tamBloque, tamBloque, 1); printf("Antes de multiplicar\n"); //hace la multiplicacion en GPU { // PARA EL CALCULO DEL TIEMPO DE EJECUCION EN GPU DEBUG_TIME_INIT; DEBUG_TIME_START; hipLaunchKernelGGL(( kernel_multiplicarMatrices) , dim3(dimensionGrid),dim3(dimensionBlock), 0, 0, LADO, matriz1_device, matriz2_device, matrizResultado_device); //Para que espere hasta que todos los threads terminen (CUDA THREADS SYNCRONIZE) hipError_t error = hipDeviceSynchronize(); printf("Thread synchronization: %s \n", hipGetErrorString(error)); //Finaliza el tiempo de ejecucion en GPU DEBUG_TIME_END; // Imprimir el tiempo DEBUG_PRINT_FINALTIME("Tiempo transcurrido en GPU de multiplicar matrices: \n\t"); } //pasamos el resultado de device al host for(int i = 0; i < LADO; i++){ err1 = hipMemcpy(matrizResultado_host[i], matrizResultado_nexo[i], LADO * sizeof(float),hipMemcpyDeviceToHost); printf("copia de gpu a cpu final Run Kernel: %s \n", hipGetErrorString(err1)); } //imprime la matriz resultado una vez copiada al host printf("El resultado es: \n"); imprimirMatriz(matrizResultado_host, LADO); //Guarda el resultado en un fichero binario guardarMatrizResultado(LADO, matrizResultado_host); /* // ************** TEST PARA PROBAR QUE SE HA GUADADO BIEN ****** float** test; test = (float**)malloc(LADO * sizeof(float*)); for(int i=0; i < LADO; i++){ test[i] = (float*)malloc(LADO * sizeof(float)); } leerDatosBin("resultado.bin", &test, leerTraspuesta, LADO); printf("\n RESULTADO DE RESUTLADO\n"); imprimirMatriz(test, LADO);*/ //LIBERACION DE MEMORIA DE CPU E INTERMEDIA for(int i = 0; i < LADO; i++){ //CPU free(matriz1_host[i]); free(matriz2_host[i]); free(matrizResultado_host[i]); //Intermedia hipFree(matriz1_nexo[i]); hipFree(matriz2_nexo[i]); hipFree(matrizResultado_nexo[i]); } //liberacion del array de punteros free(matriz1_host); free(matriz2_host); free(matrizResultado_host); free(matriz1_nexo); free(matriz2_nexo); free(matrizResultado_nexo); //free GPU hipFree(matriz1_device); hipFree(matriz2_device); hipFree(matrizResultado_device); DEBUG_TIME_END; DEBUG_PRINT_FINALTIME("Tiempo total del programa: \n\t"); }
94f7beff3fd99e67685ff3e630c7786d3a5a2636.cu
/* ============================================================================ Name : practica2Cuda.cu Author : Sergio Rosello & César Gil Version : 0.1 Copyright : If you copy you will fail Description : Optimizaciones usando GPU ============================================================================ */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include "debug_time.h" //#define LADO 100 //lado de la matriz using namespace std; // PARA LA PRACTICA DE CONCURRENCIA /* * 1 - GUARDAR MATRIZ RESULTADO EN FICHERO * 2 - * */ // Matriz identidad void inicializarMatrizIdentidad(float** matriz, int tamFilas, int tamColumnas){ // filas y colunmnas deben ser iguales para una matriz identidad int i, j; for (i = 0; i < tamFilas; i++) for (j = 0; j < tamColumnas; j++) if(i==j){ matriz[i][j] = 1; }else{ matriz[i][j] = 0; } } // Matriz Aleatoria void inicializarMatrizAleatoria(float **matriz, int tamFilas, int tamColumnas) { int i, j; for (i = 0; i < tamFilas; i++){ for (j = 0; j < tamColumnas; j++){ matriz[i][j] = rand() % 10; //matriz[i][j] = 1; } } } void crearMatriz(const char* nombre, bool identidad, int LADO) { int i = 0; //int j = 0; //Inicializamos memoria para la matriz. float **mat = (float **)malloc(sizeof(float *) * LADO); for (i = 0; i < LADO; i++) { mat[i] = (float *)malloc(sizeof(float) * LADO); } //Dependiendo de si es identidad hace una llamada u otra if(identidad){ inicializarMatrizIdentidad(mat, LADO, LADO); }else{ inicializarMatrizAleatoria(mat, LADO, LADO); } //Abrimos el fichero binario mat en modo de escritura. FILE* fich_bin = fopen(nombre, "w"); //Escribimos en el archivo binario apuntado por fich_bin //Volcamos los datos de la matriz en el archivo binario apuntado por fich_bin for (i = 0; i < LADO; i++) fwrite(mat[i], sizeof(int), LADO, fich_bin); fclose(fich_bin); //Liberamos memoria de cada uno de los elementos de la matriz. for (i = 0; i < LADO; i++) { free(mat[i]); } //Liberamos memoria de la matriz. free(mat); } //Guardar matriz resultado en fichero void guardarMatrizResultado(int LADO, float** matriz){ int i = 0; //Abrimos el fichero binario mat en modo de escritura. FILE* fich_bin = fopen("resultado.bin", "w"); //Escribimos en el archivo binario apuntado por fich_bin //Volcamos los datos de la matriz en el archivo binario apuntado por fich_bin for (i = 0; i < LADO; i++){ for (int j = 0; j < LADO; j++){ fwrite(&(matriz[j][i]), sizeof(int), 1, fich_bin); } } fclose(fich_bin); } //Imprime la matriz recibida void imprimirMatriz(float **matriz, int LADO) { for (int i = 0; i < LADO; i++) { for (int j = 0; j < LADO; j++) printf("%.0f ", matriz[i][j]); printf("\n"); } } //lee la matriz de un fichero binario void leerDatosBin(const char *nombreFichero, float ***datos, bool leerTraspuesta, int LADO) { FILE* fichero = fopen(nombreFichero, "r"); //Funciones accesibles: fclose, fread, fwrite float **datosLeidos; //int numFilas, numColumnas; //sint i = 0, j = 0; //Inicializamos un array para guardar todos los datos que leemos del fichero. datosLeidos = (float **)malloc(LADO * sizeof(float*)); //multiplicamos por 4 (bytes que ocupa un float) for (int i = 0; i < LADO; i++) datosLeidos[i] = (float*)malloc(sizeof(float)*LADO); if(!leerTraspuesta) for (int i = 0; i < LADO; i++) fread(datosLeidos[i], sizeof(float), LADO, fichero); else //Leer la matriz de una forma traspuesta. for (int i = 0; i < LADO; i++) for (int j = 0; j < LADO; j++) fread(&(datosLeidos[j][i]), sizeof(float), 1, fichero); //Derreferenciación. (*datos) = datosLeidos; fclose(fichero); } //lamada de la gpu para multiplicar los vectores __device__ float multiplicarVectores(int lado, float* fila, float* columna){ if(threadIdx.x == 0 && threadIdx.y == 0) printf("Multiplicando vectores\n"); float resultadoAux = 0; for(int i = 0; i<lado; i++){ resultadoAux += fila[i] * columna[i]; } return resultadoAux; } //llamada a la gpu para multiplicar matrices __global__ void kernel_multiplicarMatrices(int lado, float** matriz1, float** matriz2, float** resultado){ //printf("estoy multiplicando\n"); int fila = blockIdx.x * blockDim.y + threadIdx.y; int columna = blockIdx.y * blockDim.x + threadIdx.x; //control de errores del thread if((fila >= lado) || (columna >= lado)){ //printf("ha ocurrido un error en multiplicacion\n"); return; } resultado[fila][columna] = multiplicarVectores(lado, matriz1[fila], matriz2[columna]); printf("cuda thread %d %d %.0f \n",fila,columna,resultado[fila][columna]); } int main(int argc, char** argv){ // inicializacion del debug_time DEBUG_TIME_INIT; DEBUG_TIME_START; //Se asigna el lado de la matriz segun el parametro introducido en la ejecucion int LADO = atoi(argv[1]); //Inicialización de la semilla para los números aleatorios. srand(time(NULL)); bool leerTraspuesta = true; //CREACION DE LAS MATRICES ALEATORIAS EN UN FICHERO BINARIO crearMatriz("mat.bin", false, LADO); crearMatriz("matIdentidad.bin", true, LADO); //CARGA E INICIALIZACION DE LAS MATRICES //CPU float** matriz1_host; float** matriz2_host; float** matrizResultado_host; //NEXO (memoria intermedia) float** matriz1_nexo; float** matriz2_nexo; float** matrizResultado_nexo; //GPU float** matriz1_device; float** matriz2_device; float** matrizResultado_device; //leemos de fichero binario leerDatosBin("mat.bin", &matriz1_host, leerTraspuesta, LADO); leerDatosBin("matIdentidad.bin", &matriz2_host, !leerTraspuesta, LADO); //IMPRIME LAS MATRICES GENERADAS printf("Se van a generar matrices de %d X %d : \n", LADO, LADO); printf("MATRIZ A: \n\n"); imprimirMatriz(matriz1_host, LADO); printf("MATRIZ B Identidad: \n\n"); imprimirMatriz(matriz2_host, LADO); //Reserva para el resultado del host matrizResultado_host = (float**)malloc(LADO * sizeof(float*)); for(int i=0; i < LADO; i++){ matrizResultado_host[i] = (float*)malloc(LADO * sizeof(float)); } //Reserva de la memoria intermedia matriz1_nexo = (float**)malloc(LADO * sizeof(float*)); matriz2_nexo = (float**)malloc(LADO * sizeof(float*)); matrizResultado_nexo = (float**)malloc(LADO * sizeof(float*)); //Reserva de memoria en GPU cudaError_t err1 = cudaMalloc((void**)&matriz1_device, sizeof(float*)* LADO); printf("Run Kernel: %s \n", cudaGetErrorString(err1)); err1 = cudaMalloc((void**)&matriz2_device, sizeof(float*)* LADO); printf("Run Kernel: %s \n", cudaGetErrorString(err1)); err1 = cudaMalloc((void**)&matrizResultado_device, sizeof(float*)* LADO); printf("Run Kernel: %s \n", cudaGetErrorString(err1)); //Reserva de memoria para cada uno de los arrays intermedios for(int i = 0; i < LADO; i++){ err1 = cudaMalloc((void**)&matriz1_nexo[i], sizeof(float)* LADO); printf("matriz1_nexo Run Kernel: %s \n", cudaGetErrorString(err1)); err1 = cudaMalloc((void**)&matriz2_nexo[i], sizeof(float)* LADO); printf("matriz2_nexo Run Kernel: %s \n", cudaGetErrorString(err1)); cudaMalloc((void**)&(matrizResultado_nexo[i]), sizeof(float)* LADO); } //Copia el contenido de los arrays de CPU a los arrays de la matriz intermedia for(int i = 0; i < LADO; i++){ err1 = cudaMemcpy(matriz1_nexo[i], matriz1_host[i], LADO * sizeof(float),cudaMemcpyHostToDevice); printf("cudaMemcoy matriz2_host1 a nexo1 Run Kernel: %s \n", cudaGetErrorString(err1)); err1 = cudaMemcpy(matriz2_nexo[i], matriz2_host[i], LADO * sizeof(float),cudaMemcpyHostToDevice); printf("cudaMemcoy matriz2_host2 a nexo2 Run Kernel: %s \n", cudaGetErrorString(err1)); } //copia el contenido del array de punteros de CPU a GPU err1 = cudaMemcpy(matriz1_device, matriz1_nexo, LADO * sizeof(float*),cudaMemcpyHostToDevice); printf("copia de cpu a gpu array de punteros matriz1 Run Kernel: %s \n", cudaGetErrorString(err1)); err1 = cudaMemcpy(matriz2_device, matriz2_nexo, LADO * sizeof(float*),cudaMemcpyHostToDevice); printf("copia de cpu a gpu array de punteros matriz2 Run Kernel: %s \n", cudaGetErrorString(err1)); cudaMemcpy(matrizResultado_device, matrizResultado_nexo, LADO * sizeof(float*),cudaMemcpyHostToDevice); //Operaciones en GPU: // tamBloque = 32 porque los kernels proporcionan las instrucciones en warps (32 threads) //entonces tiene que ser multiplo de 32 para no despediciar threads. // dimensionGrid -> (LADO / tamBloque) + 1 para calcular el numero de bloques para la x y para y, 1 para la z // dimensionBlock -> numero de threads por cada bloque (32*32 = 1024 threads) // https://codeyarns.com/2011/02/16/cuda-dim3/ // http://www.icl.utk.edu/~mgates3/docs/cuda.html int tamBloque = 32; dim3 dimensionGrid = dim3((int)(LADO / tamBloque) + 1, (int)(LADO / tamBloque) + 1, 1); dim3 dimensionBlock = dim3(tamBloque, tamBloque, 1); printf("Antes de multiplicar\n"); //hace la multiplicacion en GPU { // PARA EL CALCULO DEL TIEMPO DE EJECUCION EN GPU DEBUG_TIME_INIT; DEBUG_TIME_START; kernel_multiplicarMatrices <<<dimensionGrid,dimensionBlock>>>(LADO, matriz1_device, matriz2_device, matrizResultado_device); //Para que espere hasta que todos los threads terminen (CUDA THREADS SYNCRONIZE) cudaError_t error = cudaDeviceSynchronize(); printf("Thread synchronization: %s \n", cudaGetErrorString(error)); //Finaliza el tiempo de ejecucion en GPU DEBUG_TIME_END; // Imprimir el tiempo DEBUG_PRINT_FINALTIME("Tiempo transcurrido en GPU de multiplicar matrices: \n\t"); } //pasamos el resultado de device al host for(int i = 0; i < LADO; i++){ err1 = cudaMemcpy(matrizResultado_host[i], matrizResultado_nexo[i], LADO * sizeof(float),cudaMemcpyDeviceToHost); printf("copia de gpu a cpu final Run Kernel: %s \n", cudaGetErrorString(err1)); } //imprime la matriz resultado una vez copiada al host printf("El resultado es: \n"); imprimirMatriz(matrizResultado_host, LADO); //Guarda el resultado en un fichero binario guardarMatrizResultado(LADO, matrizResultado_host); /* // ************** TEST PARA PROBAR QUE SE HA GUADADO BIEN ****** float** test; test = (float**)malloc(LADO * sizeof(float*)); for(int i=0; i < LADO; i++){ test[i] = (float*)malloc(LADO * sizeof(float)); } leerDatosBin("resultado.bin", &test, leerTraspuesta, LADO); printf("\n RESULTADO DE RESUTLADO\n"); imprimirMatriz(test, LADO);*/ //LIBERACION DE MEMORIA DE CPU E INTERMEDIA for(int i = 0; i < LADO; i++){ //CPU free(matriz1_host[i]); free(matriz2_host[i]); free(matrizResultado_host[i]); //Intermedia cudaFree(matriz1_nexo[i]); cudaFree(matriz2_nexo[i]); cudaFree(matrizResultado_nexo[i]); } //liberacion del array de punteros free(matriz1_host); free(matriz2_host); free(matrizResultado_host); free(matriz1_nexo); free(matriz2_nexo); free(matrizResultado_nexo); //free GPU cudaFree(matriz1_device); cudaFree(matriz2_device); cudaFree(matrizResultado_device); DEBUG_TIME_END; DEBUG_PRINT_FINALTIME("Tiempo total del programa: \n\t"); }
6e3b15c3a1e6a4b0c53e1748704b2c3fd9b28b3d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gAddRow.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); const float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); int length = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gAddRow), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,length); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gAddRow), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gAddRow), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6e3b15c3a1e6a4b0c53e1748704b2c3fd9b28b3d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gAddRow.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); const float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); int length = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gAddRow<<<gridBlock,threadBlock>>>(out,in,length); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gAddRow<<<gridBlock,threadBlock>>>(out,in,length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gAddRow<<<gridBlock,threadBlock>>>(out,in,length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5ac20248d1a45e589645af5a6b801180ea098e4f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Implementes the math functions for CPU. #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/system/hip/detail/par.h> #include <thrust/version.h> #include "caffe2/utils/math.h" #include "caffe2/core/context_gpu.h" #if THRUST_VERSION >= 100800 #define THRUST_SUPPORTS_PER_THREAD #endif // THRUST_VERSION >= 100800 namespace caffe2 { namespace math { #define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \ __global__ \ void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ y[i] = function(x[i]); \ } \ } \ template <> \ void Funcname<T, CUDAContext>( \ const int N, const T* x, T* y, \ CUDAContext* context) { \ hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \ 0, context->cuda_stream(), \ N, x, y); \ } DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Exp, exp); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Log, log); __device__ float cuda_sqrf(const float x) { return x * x; } __device__ double cuda_sqr(const double x) { return x * x; } DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sqr, cuda_sqr); #undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION #define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(T, Funcname, expr) \ __global__ void _Kernel_##T##_##Funcname( \ const int N, const T* a, const T* b, T* y) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ y[i] = a[i] expr b[i]; \ } \ } \ template <> \ void Funcname<T, CUDAContext>( \ const int N, const T* a, const T* b, T* y, CUDAContext* context) { \ hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, a, b, y); \ } DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Add, +); DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Sub, -); DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Mul, *); DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Div, /); // Caffe2 gemm provides a simpler interface to the gemm functions, with the // limitation that the data has to be contiguous in memory. template <> void Gemm<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void GemmEx<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const int lda, const float* B, const int ldb, const float beta, float* C, const int ldc, CUDAContext* context) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc)); } template <> void Gemv<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* context) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(context->cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } namespace { template <typename T> __global__ void SetKernel(const int N, const T alpha, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = alpha; } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_SET(T) \ template <> \ void Set<T, CUDAContext>(const TIndex N, const T alpha, T *Y, \ CUDAContext* context) { \ hipLaunchKernelGGL(( SetKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \ 0, context->cuda_stream(), N, alpha, Y); \ } CAFFE2_SPECIALIZED_CUDA_SET(float); CAFFE2_SPECIALIZED_CUDA_SET(double); CAFFE2_SPECIALIZED_CUDA_SET(int); CAFFE2_SPECIALIZED_CUDA_SET(int64_t); CAFFE2_SPECIALIZED_CUDA_SET(bool); CAFFE2_SPECIALIZED_CUDA_SET(char); CAFFE2_SPECIALIZED_CUDA_SET(uint8_t); #undef CAFFE2_SPECIALIZED_CUDA_SET namespace { template <typename T> __global__ void UniformShift(const int N, const T min, const T max, T* x) { T scale = max - min; CUDA_1D_KERNEL_LOOP(i, N) { x[i] = x[i] * scale + min; } } __global__ void UniformIntFit(const int N, const int min, const int max, unsigned int* x) { int* x_int = reinterpret_cast<int*>(x); int range = (max - min + 1); CUDA_1D_KERNEL_LOOP(i, N) { x_int[i] = min + static_cast<int>(x[i] % range); } } } // namespace template <> void RandUniform<float, CUDAContext>( const int n, const float min, const float max, float* r, CUDAContext* context) { CURAND_CHECK(hiprandGenerateUniform(context->curand_generator(), r, n)); hipLaunchKernelGGL(( UniformShift<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, min, max, r); } template <> void RandUniform<double, CUDAContext>( const int n, const double min, const double max, double* r, CUDAContext* context) { CURAND_CHECK(hiprandGenerateUniformDouble(context->curand_generator(), r, n)); hipLaunchKernelGGL(( UniformShift<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, min, max, r); } template <> void RandUniform<int, CUDAContext>( const int n, const int min, const int max, int* r, CUDAContext* context) { CURAND_CHECK(hiprandGenerate(context->curand_generator(), reinterpret_cast<unsigned int*>(r), n)); hipLaunchKernelGGL(( UniformIntFit), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, min, max, reinterpret_cast<unsigned int*>(r)); } template <typename T> int HandleOddLengthRandGaussian( const int n, const T mean, const T std, T* r, CUDAContext* context) { if (n % 2 == 1) { std::default_random_engine generator; std::normal_distribution<T> distribution(mean, std); const T random_value = distribution(generator); math::Set<T, CUDAContext>( 1, random_value, r + sizeof(T) * (n - 1), context); return n - 1; } return n; } template <> void RandGaussian<float, CUDAContext>( const int n, const float mean, const float std, float* r, CUDAContext* context) { // If n is odd, we add a random Gaussian value at the end manually // and generate n-1 random values using hiprandGenerateNormal. // hiprandGenerateNormal requires n to be even. const int even_n = HandleOddLengthRandGaussian<float>(n, mean, std, r, context); CURAND_CHECK( hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std)); } template <> void RandGaussian<double, CUDAContext>( const int n, const double mean, const double std, double* r, CUDAContext* context) { const int even_n = HandleOddLengthRandGaussian<double>(n, mean, std, r, context); CURAND_CHECK(hiprandGenerateNormalDouble( context->curand_generator(), r, even_n, mean, std)); } template<> void Dot<float, CUDAContext>( const int n, const float* a, const float* b, float* y, CUDAContext* context) { float result; CUBLAS_CHECK(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, &result)); context->Copy<float, CPUContext, CUDAContext>(1, &result, y); } template<> void Dot<double, CUDAContext>( const int n, const double* a, const double* b, double* y, CUDAContext* context) { double result; CUBLAS_CHECK(hipblasDdot(context->cublas_handle(), n, a, 1, b, 1, y)); context->Copy<double, CPUContext, CUDAContext>(1, &result, y); } // A previous version of caffe2 used Thrust but it turns out that thrust // reduction has an implicit scratch space allocation and deallocation, which // may interfere with NCCL and create a deadlock. Hence we are using a custom // reduction here. #define SUM_KERNEL_NTHREADS 128 template <typename T> __global__ void SumKernel(const int N, const T* X, T* Y) { const int idx = threadIdx.x; __shared__ T reduction_buffer[SUM_KERNEL_NTHREADS]; reduction_buffer[idx] = 0; // A multilevel reduction. // N -> 128 for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { reduction_buffer[idx] += X[i]; } __syncthreads(); // 128 -> 32 if (idx < 32) { reduction_buffer[idx] += reduction_buffer[idx + 32] + reduction_buffer[idx + 64] + reduction_buffer[idx + 96]; } __syncthreads(); // 32 -> 1 if (idx == 0) { float tmp = 0; for (int i = 0; i < 32; ++i) { tmp += reduction_buffer[i]; } *Y = tmp; } } #define CAFFE2_MATH_SUM_FUNC(T) \ template<> \ void Sum<T, CUDAContext>(const int N, const T* x, T* y, CUDAContext* context) {\ hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), N, x, y); \ } CAFFE2_MATH_SUM_FUNC(float) CAFFE2_MATH_SUM_FUNC(double) #undef CAFFE2_MATH_SUM_FUNC namespace { template <typename T> __global__ void SelectKernel( const int N, const int D, const T* x, const int* idx, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = x[i * D + idx[i]]; } } } // namespace template <> void Select<float, CUDAContext>( const int N, const int D, const float* x, const int* idx, float* y, CUDAContext* context) { hipLaunchKernelGGL(( SelectKernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, D, x, idx, y); } namespace { template <typename T> __global__ void ScaleKernel( const int n, const T alpha, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * alpha; } } template <typename T> __global__ void ScaleKernelDeviceAlpha( const int n, const T* alpha, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * (*alpha); } } } // namespace template <> void Scale<float, CUDAContext>( const int n, const float alpha, const float *x, float* y, CUDAContext* context) { hipLaunchKernelGGL(( ScaleKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, alpha, x, y); } template <> void Scale<double, CUDAContext>( const int n, const double alpha, const double *x, double* y, CUDAContext* context) { hipLaunchKernelGGL(( ScaleKernel<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, alpha, x, y); } template <> void Scale<float, CUDAContext>( const int n, const float* alpha, const float *x, float* y, CUDAContext* context) { hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, alpha, x, y); } template <> void Scale<double, CUDAContext>( const int n, const double* alpha, const double *x, double* y, CUDAContext* context) { hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, alpha, x, y); } namespace detail { template <> void ScaleDynamic<float, CUDAContext>( const int n, const float alpha, const float* x, float* y, CUDAContext* context) { return math::Scale<float, CUDAContext>(n, alpha, x, y, context); } template <> void ScaleDynamic<double, CUDAContext>( const int n, const double alpha, const double* x, double* y, CUDAContext* context) { return math::Scale<double, CUDAContext>(n, alpha, x, y, context); } } template <> void Axpy<float, CUDAContext>(const int N, const float alpha, const float* X, float* Y, CUDAContext* context) { CUBLAS_CHECK(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void Axpy<double, CUDAContext>( const int N, const double alpha, const double* X, double* Y, CUDAContext* context) { CUBLAS_CHECK(hipblasDaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); } namespace { template <typename T> __global__ void AxpyKernel(const int n, const T* a, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(index, n) { y[index] += x[index] * (*a); } } } // namespace template <> void Axpy<float, CUDAContext>( const int n, const float* alpha, const float* X, float* Y, CUDAContext* context) { hipLaunchKernelGGL(( AxpyKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, alpha, X, Y); } template <> void Axpy<double, CUDAContext>( const int n, const double* alpha, const double* X, double* Y, CUDAContext* context) { hipLaunchKernelGGL(( AxpyKernel<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, alpha, X, Y); } namespace { template <typename T> __global__ void AxpbyKernel(const int n, const T a, const T* x, const T b, T* y) { CUDA_1D_KERNEL_LOOP(index, n) { y[index] = x[index] * a + y[index] * b; } } } // namespace template <> void Axpby<float, CUDAContext>( const int n, const float a, const float* x, const float b, float* y, CUDAContext* context) { hipLaunchKernelGGL(( AxpbyKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, a, x, b, y); } template <> void Axpby<double, CUDAContext>( const int n, const double a, const double* x, const double b, double* y, CUDAContext* context) { hipLaunchKernelGGL(( AxpbyKernel<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, a, x, b, y); } namespace { template <typename T> __global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int height_col, const int width_col, T* data_col) { CUDA_1D_KERNEL_LOOP(index, n) { int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * kernel_h * kernel_w; int h_in = h_out * stride_h - pad_t; int w_in = w_out * stride_w - pad_l; T* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const T* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h = h_in + i * dilation_h; int w = w_in + j * dilation_w; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * dilation_h * width + j * dilation_w] : 0; data_col_ptr += height_col * width_col; } } } } template <typename T> __global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int width_col, const int channels, T* data_col) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { int channel_in = index % channels; int w_out = index / channels % width_col; int h_out = index / channels / width_col; int h_in = h_out * stride_h - pad_t; int w_in = w_out * stride_w - pad_l; T* local_data_col = data_col + ((h_out * width_col) + w_out) * channels * kernel_h * kernel_w + channel_in; for (int i = 0; i < dkernel_h; i += dilation_h) { int h = h_in + i; for (int j = 0; j < dkernel_w; j += dilation_w) { int w = w_in + j; *local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ? data_im[(h * width + w) * channels + channel_in] : 0; local_data_col += channels; } } } } template <typename T> __global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col, const int height, const int width, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int height_col, const int width_col, T* data_im) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; int w = index % width + pad_l; int h = (index / width) % height + pad_t; int c = index / (width * height); // compute the start and end of the output int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; int w_col_end = min(w / stride_w + 1, width_col); int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; int h_col_end = min(h / stride_h + 1, height_col); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = (h - h_col * stride_h); int w_k = (w - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int data_col_index = (((c * patch_h + h_k) * patch_w + w_k) * height_col + h_col) * width_col + w_col; val += data_col[data_col_index]; } } } data_im[index] = val; } } template <typename T> __global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col, const int width, const int channels, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int height_col, const int width_col, T* data_im) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; int c = index % channels; int w = index / channels % width + pad_l; int h = index / channels / width + pad_t; // compute the start and end of the output int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; int w_col_end = min(w / stride_w + 1, width_col); int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; int h_col_end = min(h / stride_h + 1, height_col); int channels_col = patch_h * patch_w * channels; for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = h - h_col * stride_h; int w_k = w - w_col * stride_w; if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int c_col = (h_k * patch_w + w_k) * channels + c; val += data_col[(h_col * width_col + w_col) * channels_col + c_col]; } } } data_im[index] = val; } } } // namespace template <> void Im2col<float, CUDAContext, StorageOrder::NCHW>( const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_col, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_gpu_kernel_nchw<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, data_im, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_col); } template <> void Im2col<float, CUDAContext, StorageOrder::NHWC>( const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_col, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; // We are going to launch height_col * width_col * channels kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = height_col * width_col * channels; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_gpu_kernel_nhwc<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, data_im, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, width_col, channels, data_col); } template <> void Col2im<float, CUDAContext, StorageOrder::NCHW>( const float* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_im, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. hipLaunchKernelGGL(( col2im_gpu_kernel_nchw<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, data_col, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im); } template <> void Col2im<float, CUDAContext, StorageOrder::NHWC>( const float* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_im, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = height * width * channels; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. hipLaunchKernelGGL(( col2im_gpu_kernel_nhwc<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, data_col, width, channels, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im); } template <> void CopyMatrix<CUDAContext>( const size_t itemsize, const int M, const int N, const void* A, const int lda, void* B, const int ldb, CUDAContext* context) { hipMemcpy2DAsync(B, ldb * itemsize, A, lda * itemsize, N * itemsize, M, hipMemcpyDeviceToDevice, context->cuda_stream()); } } // namespace math } // namespace caffe2
5ac20248d1a45e589645af5a6b801180ea098e4f.cu
// Implementes the math functions for CPU. #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/system/cuda/detail/par.h> #include <thrust/version.h> #include "caffe2/utils/math.h" #include "caffe2/core/context_gpu.h" #if THRUST_VERSION >= 100800 #define THRUST_SUPPORTS_PER_THREAD #endif // THRUST_VERSION >= 100800 namespace caffe2 { namespace math { #define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \ __global__ \ void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ y[i] = function(x[i]); \ } \ } \ template <> \ void Funcname<T, CUDAContext>( \ const int N, const T* x, T* y, \ CUDAContext* context) { \ _Kernel_##T##_##Funcname<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \ 0, context->cuda_stream()>>>( \ N, x, y); \ } DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Exp, exp); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Log, log); __device__ float cuda_sqrf(const float x) { return x * x; } __device__ double cuda_sqr(const double x) { return x * x; } DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sqr, cuda_sqr); #undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION #define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(T, Funcname, expr) \ __global__ void _Kernel_##T##_##Funcname( \ const int N, const T* a, const T* b, T* y) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ y[i] = a[i] expr b[i]; \ } \ } \ template <> \ void Funcname<T, CUDAContext>( \ const int N, const T* a, const T* b, T* y, CUDAContext* context) { \ _Kernel_##T##_##Funcname<<< \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, a, b, y); \ } DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Add, +); DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Sub, -); DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Mul, *); DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Div, /); // Caffe2 gemm provides a simpler interface to the gemm functions, with the // limitation that the data has to be contiguous in memory. template <> void Gemm<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void GemmEx<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const int lda, const float* B, const int ldb, const float beta, float* C, const int ldc, CUDAContext* context) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc)); } template <> void Gemv<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* context) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(context->cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } namespace { template <typename T> __global__ void SetKernel(const int N, const T alpha, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = alpha; } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_SET(T) \ template <> \ void Set<T, CUDAContext>(const TIndex N, const T alpha, T *Y, \ CUDAContext* context) { \ SetKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \ 0, context->cuda_stream()>>>(N, alpha, Y); \ } CAFFE2_SPECIALIZED_CUDA_SET(float); CAFFE2_SPECIALIZED_CUDA_SET(double); CAFFE2_SPECIALIZED_CUDA_SET(int); CAFFE2_SPECIALIZED_CUDA_SET(int64_t); CAFFE2_SPECIALIZED_CUDA_SET(bool); CAFFE2_SPECIALIZED_CUDA_SET(char); CAFFE2_SPECIALIZED_CUDA_SET(uint8_t); #undef CAFFE2_SPECIALIZED_CUDA_SET namespace { template <typename T> __global__ void UniformShift(const int N, const T min, const T max, T* x) { T scale = max - min; CUDA_1D_KERNEL_LOOP(i, N) { x[i] = x[i] * scale + min; } } __global__ void UniformIntFit(const int N, const int min, const int max, unsigned int* x) { int* x_int = reinterpret_cast<int*>(x); int range = (max - min + 1); CUDA_1D_KERNEL_LOOP(i, N) { x_int[i] = min + static_cast<int>(x[i] % range); } } } // namespace template <> void RandUniform<float, CUDAContext>( const int n, const float min, const float max, float* r, CUDAContext* context) { CURAND_CHECK(curandGenerateUniform(context->curand_generator(), r, n)); UniformShift<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, min, max, r); } template <> void RandUniform<double, CUDAContext>( const int n, const double min, const double max, double* r, CUDAContext* context) { CURAND_CHECK(curandGenerateUniformDouble(context->curand_generator(), r, n)); UniformShift<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, min, max, r); } template <> void RandUniform<int, CUDAContext>( const int n, const int min, const int max, int* r, CUDAContext* context) { CURAND_CHECK(curandGenerate(context->curand_generator(), reinterpret_cast<unsigned int*>(r), n)); UniformIntFit<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( n, min, max, reinterpret_cast<unsigned int*>(r)); } template <typename T> int HandleOddLengthRandGaussian( const int n, const T mean, const T std, T* r, CUDAContext* context) { if (n % 2 == 1) { std::default_random_engine generator; std::normal_distribution<T> distribution(mean, std); const T random_value = distribution(generator); math::Set<T, CUDAContext>( 1, random_value, r + sizeof(T) * (n - 1), context); return n - 1; } return n; } template <> void RandGaussian<float, CUDAContext>( const int n, const float mean, const float std, float* r, CUDAContext* context) { // If n is odd, we add a random Gaussian value at the end manually // and generate n-1 random values using curandGenerateNormal. // curandGenerateNormal requires n to be even. const int even_n = HandleOddLengthRandGaussian<float>(n, mean, std, r, context); CURAND_CHECK( curandGenerateNormal(context->curand_generator(), r, even_n, mean, std)); } template <> void RandGaussian<double, CUDAContext>( const int n, const double mean, const double std, double* r, CUDAContext* context) { const int even_n = HandleOddLengthRandGaussian<double>(n, mean, std, r, context); CURAND_CHECK(curandGenerateNormalDouble( context->curand_generator(), r, even_n, mean, std)); } template<> void Dot<float, CUDAContext>( const int n, const float* a, const float* b, float* y, CUDAContext* context) { float result; CUBLAS_CHECK(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, &result)); context->Copy<float, CPUContext, CUDAContext>(1, &result, y); } template<> void Dot<double, CUDAContext>( const int n, const double* a, const double* b, double* y, CUDAContext* context) { double result; CUBLAS_CHECK(cublasDdot(context->cublas_handle(), n, a, 1, b, 1, y)); context->Copy<double, CPUContext, CUDAContext>(1, &result, y); } // A previous version of caffe2 used Thrust but it turns out that thrust // reduction has an implicit scratch space allocation and deallocation, which // may interfere with NCCL and create a deadlock. Hence we are using a custom // reduction here. #define SUM_KERNEL_NTHREADS 128 template <typename T> __global__ void SumKernel(const int N, const T* X, T* Y) { const int idx = threadIdx.x; __shared__ T reduction_buffer[SUM_KERNEL_NTHREADS]; reduction_buffer[idx] = 0; // A multilevel reduction. // N -> 128 for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { reduction_buffer[idx] += X[i]; } __syncthreads(); // 128 -> 32 if (idx < 32) { reduction_buffer[idx] += reduction_buffer[idx + 32] + reduction_buffer[idx + 64] + reduction_buffer[idx + 96]; } __syncthreads(); // 32 -> 1 if (idx == 0) { float tmp = 0; for (int i = 0; i < 32; ++i) { tmp += reduction_buffer[i]; } *Y = tmp; } } #define CAFFE2_MATH_SUM_FUNC(T) \ template<> \ void Sum<T, CUDAContext>(const int N, const T* x, T* y, CUDAContext* context) {\ SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(N, x, y); \ } CAFFE2_MATH_SUM_FUNC(float) CAFFE2_MATH_SUM_FUNC(double) #undef CAFFE2_MATH_SUM_FUNC namespace { template <typename T> __global__ void SelectKernel( const int N, const int D, const T* x, const int* idx, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = x[i * D + idx[i]]; } } } // namespace template <> void Select<float, CUDAContext>( const int N, const int D, const float* x, const int* idx, float* y, CUDAContext* context) { SelectKernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, x, idx, y); } namespace { template <typename T> __global__ void ScaleKernel( const int n, const T alpha, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * alpha; } } template <typename T> __global__ void ScaleKernelDeviceAlpha( const int n, const T* alpha, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * (*alpha); } } } // namespace template <> void Scale<float, CUDAContext>( const int n, const float alpha, const float *x, float* y, CUDAContext* context) { ScaleKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, alpha, x, y); } template <> void Scale<double, CUDAContext>( const int n, const double alpha, const double *x, double* y, CUDAContext* context) { ScaleKernel<double><<< CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( n, alpha, x, y); } template <> void Scale<float, CUDAContext>( const int n, const float* alpha, const float *x, float* y, CUDAContext* context) { ScaleKernelDeviceAlpha<float><<< CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( n, alpha, x, y); } template <> void Scale<double, CUDAContext>( const int n, const double* alpha, const double *x, double* y, CUDAContext* context) { ScaleKernelDeviceAlpha<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, alpha, x, y); } namespace detail { template <> void ScaleDynamic<float, CUDAContext>( const int n, const float alpha, const float* x, float* y, CUDAContext* context) { return math::Scale<float, CUDAContext>(n, alpha, x, y, context); } template <> void ScaleDynamic<double, CUDAContext>( const int n, const double alpha, const double* x, double* y, CUDAContext* context) { return math::Scale<double, CUDAContext>(n, alpha, x, y, context); } } template <> void Axpy<float, CUDAContext>(const int N, const float alpha, const float* X, float* Y, CUDAContext* context) { CUBLAS_CHECK(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void Axpy<double, CUDAContext>( const int N, const double alpha, const double* X, double* Y, CUDAContext* context) { CUBLAS_CHECK(cublasDaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); } namespace { template <typename T> __global__ void AxpyKernel(const int n, const T* a, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(index, n) { y[index] += x[index] * (*a); } } } // namespace template <> void Axpy<float, CUDAContext>( const int n, const float* alpha, const float* X, float* Y, CUDAContext* context) { AxpyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, alpha, X, Y); } template <> void Axpy<double, CUDAContext>( const int n, const double* alpha, const double* X, double* Y, CUDAContext* context) { AxpyKernel<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, alpha, X, Y); } namespace { template <typename T> __global__ void AxpbyKernel(const int n, const T a, const T* x, const T b, T* y) { CUDA_1D_KERNEL_LOOP(index, n) { y[index] = x[index] * a + y[index] * b; } } } // namespace template <> void Axpby<float, CUDAContext>( const int n, const float a, const float* x, const float b, float* y, CUDAContext* context) { AxpbyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, a, x, b, y); } template <> void Axpby<double, CUDAContext>( const int n, const double a, const double* x, const double b, double* y, CUDAContext* context) { AxpbyKernel<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, a, x, b, y); } namespace { template <typename T> __global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int height_col, const int width_col, T* data_col) { CUDA_1D_KERNEL_LOOP(index, n) { int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * kernel_h * kernel_w; int h_in = h_out * stride_h - pad_t; int w_in = w_out * stride_w - pad_l; T* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const T* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h = h_in + i * dilation_h; int w = w_in + j * dilation_w; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * dilation_h * width + j * dilation_w] : 0; data_col_ptr += height_col * width_col; } } } } template <typename T> __global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int width_col, const int channels, T* data_col) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { int channel_in = index % channels; int w_out = index / channels % width_col; int h_out = index / channels / width_col; int h_in = h_out * stride_h - pad_t; int w_in = w_out * stride_w - pad_l; T* local_data_col = data_col + ((h_out * width_col) + w_out) * channels * kernel_h * kernel_w + channel_in; for (int i = 0; i < dkernel_h; i += dilation_h) { int h = h_in + i; for (int j = 0; j < dkernel_w; j += dilation_w) { int w = w_in + j; *local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ? data_im[(h * width + w) * channels + channel_in] : 0; local_data_col += channels; } } } } template <typename T> __global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col, const int height, const int width, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int height_col, const int width_col, T* data_im) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; int w = index % width + pad_l; int h = (index / width) % height + pad_t; int c = index / (width * height); // compute the start and end of the output int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; int w_col_end = min(w / stride_w + 1, width_col); int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; int h_col_end = min(h / stride_h + 1, height_col); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = (h - h_col * stride_h); int w_k = (w - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int data_col_index = (((c * patch_h + h_k) * patch_w + w_k) * height_col + h_col) * width_col + w_col; val += data_col[data_col_index]; } } } data_im[index] = val; } } template <typename T> __global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col, const int width, const int channels, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int height_col, const int width_col, T* data_im) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; int c = index % channels; int w = index / channels % width + pad_l; int h = index / channels / width + pad_t; // compute the start and end of the output int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; int w_col_end = min(w / stride_w + 1, width_col); int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; int h_col_end = min(h / stride_h + 1, height_col); int channels_col = patch_h * patch_w * channels; for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = h - h_col * stride_h; int w_k = w - w_col * stride_w; if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int c_col = (h_k * patch_w + w_k) * channels + c; val += data_col[(h_col * width_col + w_col) * channels_col + c_col]; } } } data_im[index] = val; } } } // namespace template <> void Im2col<float, CUDAContext, StorageOrder::NCHW>( const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_col, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, data_im, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_col); } template <> void Im2col<float, CUDAContext, StorageOrder::NHWC>( const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_col, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; // We are going to launch height_col * width_col * channels kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = height_col * width_col * channels; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, data_im, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, width_col, channels, data_col); } template <> void Col2im<float, CUDAContext, StorageOrder::NCHW>( const float* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_im, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. col2im_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, data_col, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im); } template <> void Col2im<float, CUDAContext, StorageOrder::NHWC>( const float* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_im, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = height * width * channels; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. col2im_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, data_col, width, channels, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im); } template <> void CopyMatrix<CUDAContext>( const size_t itemsize, const int M, const int N, const void* A, const int lda, void* B, const int ldb, CUDAContext* context) { cudaMemcpy2DAsync(B, ldb * itemsize, A, lda * itemsize, N * itemsize, M, cudaMemcpyDeviceToDevice, context->cuda_stream()); } } // namespace math } // namespace caffe2
a8d3eed3883a852a6e7844ecbf28e505133d9c44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; //Check for edges valid to be part of augmented path //Update frontier __global__ void k2(const int N, bool* visited, int* frontier, bool* new_frontier) { int count = 0; for(int i=0;i<N;i++) { if(new_frontier[i]) { new_frontier[i] = false; frontier[++count] = i; visited[i] = true; } } frontier[0] = count; }
a8d3eed3883a852a6e7844ecbf28e505133d9c44.cu
#include "includes.h" using namespace std; //Check for edges valid to be part of augmented path //Update frontier __global__ void k2(const int N, bool* visited, int* frontier, bool* new_frontier) { int count = 0; for(int i=0;i<N;i++) { if(new_frontier[i]) { new_frontier[i] = false; frontier[++count] = i; visited[i] = true; } } frontier[0] = count; }
c989e7c9b43bfd60d99eb41655a5629a23ad1afb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <string> #include <cfloat> #include <ctime> #include <limits> #include <algorithm> #include <stack> #include <queue> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <device_launch_parameters.h> #include "Camera.cuh" #include "Scene.cuh" #include "Node.cuh" #include "filters.hh" #define STB_IMAGE_STATIC #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) #define cuRandom (hiprand_uniform(&local_random)) void error(const char *message) { std::cout << message << std::endl; exit(0); } void format() { std::cout << "File format for scene." << std::endl; std::cout << "\t # Comment, skip line." << std::endl; std::cout << "Spheres -> type center material" << std::endl; std::cout << "\t 1 Indicates that the 3D model is a Sphere object." << std::endl; std::cout << "\t Center The center of the Sphere." << std::endl; std::cout << "\t Radius The radius of the Sphere." << std::endl; std::cout << "\t Material -> type albedo [fuzz] [ref_idx]" << std::endl; std::cout << "\t\t 0 LAMBERTIAN" << std::endl; std::cout << "\t\t 1 METAL" << std::endl; std::cout << "\t\t 2 DIELECTRIC" << std::endl; std::cout << "\t\t 3 DIFFUSE LIGHT" << std::endl; std::cout << "\t\t albedo Defines the color." << std::endl; std::cout << "\t\t fuzz Only for METAL." << std::endl; std::cout << "\t\t ref_idx Only for DIELECTRIC." << std::endl; std::cout << "Examples of declaration:\n" << std::endl; std::cout << "# my scene" << std::endl; std::cout << "Object Center Rad Material Albedo Fuzz/ref_idx" << std::endl; std::cout << "1 0 1 0 2 1 0.5 0.78 0.9 " << std::endl; std::cout << "1 0 4 0 2 2 1 0 0.9 2 " << std::endl; std::cout << "1 1 4 1 2 3 0.9 0.9 0.9 1.5 " << std::endl; } void help(){ std::cout << "\n" << std::endl; std::cout << "\t[-d] [--defult] Set the parameters to default values" << std::endl; std::cout << "\t size: (1280x720) | AAit: 50 | depth: 50 | spheres: 11 | nthreads: 32" << std::endl; std::cout << "\t[-sizeX] Size in pixels of coordinate X. Number greater than 0." << std::endl; std::cout << "\t[-sizeY] Size in pixels of coordinate Y. Number greater than 0." << std::endl; std::cout << "\t[-AAit] Number of iterations to calculate color in one pixel. Number greater than 0." << std::endl; std::cout << "\t[-depth] The attenuation of scattered ray. Number greater than 0." << std::endl; std::cout << "\t[-spheres] Factor number to calculate the number of spheres in the scene. Number greater than 0." << std::endl; std::cout << "\t[-light] Turn on/off the ambient light. Values can be ON/OFF" << std::endl; std::cout << "\t[-nthreads] Number of threads to use" << std::endl; std::cout << "\t[-nGPUs] Number of GPUs to distribute the work" << std::endl; std::cout << "\t[-i][--image] File name of pic generated." << std::endl; std::cout << "\t[-f][--file] File name of the scene." << std::endl; std::cout << "\t[-h][--help] Show help." << std::endl; std::cout << "\t #spheres = (2*spheres)*(2*spheres) + 4" << std::endl; std::cout << "\n" << std::endl; std::cout << "Examples of usage:" << std::endl; std::cout << "./path_tracing_NGPUs -d" << std::endl; std::cout << "./path_tracing_NGPUs -nthreads 16 -sizeX 2000"<< std::endl; format(); exit(1); } void parse_argv(int argc, char **argv, int &nx, int &ny, int &ns, int &depth, int &dist, std::string &image, std::string &filename, bool &light, bool &random, bool &filter, int &diameterBi, float &gs, float &gr, int &diameterMean, int &diameterMedian, bool &skybox, bool &oneTex, int &nthreads, int &numGPUs, const int count){ if(argc <= 1) error("Error usage. Use [-h] [--help] to see the usage."); nx = 1280; ny = 720; ns = 50; depth = 50; dist = 11; image = "image"; filter = false; gs = 0; gr = 0; diameterBi = 11; diameterMean = 3; diameterMedian = 3; skybox = false; oneTex = false; light = true; random = true; bool imageName = false; nthreads = 32; numGPUs = 1; bool v_default = false; for(int i = 1; i < argc; i += 2){ if(v_default) error("Error usage. Use [-h] [--help] to see the usage."); if (std::string(argv[i]) == "-d" || std::string(argv[i]) == "--default"){ if((i+1) < argc) error("The default parameter cannot have more arguments."); std::cerr << "Default\n"; v_default = true; } else if (std::string(argv[i]) == "-sizeX"){ if((i+1) >= argc) error("-sizeX value expected"); nx = atoi(argv[i+1]); if(nx == 0) error("-sizeX value expected or cannot be 0"); } else if(std::string(argv[i]) == "-sizeY"){ if((i+1) >= argc) error("-sizeY value expected"); ny = atoi(argv[i+1]); if(ny == 0) error("-sizeY value expected or cannot be 0"); } else if(std::string(argv[i]) == "-AAit"){ if((i+1) >= argc) error("-AAit value expected"); ns = atoi(argv[i+1]); if(ns == 0) error("-AAit value expected or cannot be 0"); } else if(std::string(argv[i]) == "-depth"){ if((i+1) >= argc) error("-depth value expected"); depth = atoi(argv[i+1]); if(depth == 0) error("-depth value expected or cannot be 0"); } else if(std::string(argv[i]) == "-i" || std::string(argv[i]) == "--image"){ if((i+1) >= argc) error("--image / -i file expected"); filename = std::string(argv[i+1]); imageName = true; } else if(std::string(argv[i]) == "-f" || std::string(argv[i]) == "--file"){ if((i+1) >= argc) error("-name file expected"); filename = std::string(argv[i+1]); if(!imageName) image = filename; filename = filename+".txt"; random = false; } else if(std::string(argv[i]) == "-light") { if((i+1) >= argc) error("-light value expected"); if(std::string(argv[i+1]) == "ON") light = true; else if(std::string(argv[i+1]) == "OFF") light = false; } else if (std::string(argv[i]) == "-filter") { filter = true; diameterBi = atoi(argv[i+1]); i += 2; gs = atof(argv[i]); gr = atof(argv[i+1]); i+=2; diameterMean = atoi(argv[i]); diameterMedian = atoi(argv[i+1]); } else if(std::string(argv[i]) == "-skybox") { if((i+1) >= argc) error("-skybox value expected"); if(std::string(argv[i+1]) == "ON") skybox = true; else if(std::string(argv[i+1]) == "OFF") skybox = false; } else if(std::string(argv[i]) == "-oneTex") { if((i+1) >= argc) error("-oneTex value expected"); if(std::string(argv[i+1]) == "ON") oneTex = true; else if(std::string(argv[i+1]) == "OFF") oneTex = false; } else if(std::string(argv[i]) == "-nGPUs"){ if((i+1) >= argc) error("-nGPUs value expected"); numGPUs = atoi(argv[i+1]); if(numGPUs == 0) error("-nGPUs value expected or cannot be 0"); numGPUs = ::min(numGPUs, count); } else if(std::string(argv[i]) == "-nthreads"){ if((i+1) >= argc) error("-nthreads value expected"); nthreads = atoi(argv[i+1]); if(nthreads == 0) error("-nthreads value expected or cannot be 0"); } else if(std::string(argv[i]) == "-h" || std::string(argv[i]) == "--help" ){ help(); } else{ error("Error usage. Use [-h] [--help] to see the usage."); } } if(!light) image = image+"_noktem"; image = image+".png"; } void check_cuda(hipError_t result, char const *const func, const char *const file, int const line){ if(result){ std::cout << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << std::endl; std::cout << hipGetErrorString(result) << std::endl; hipDeviceReset(); exit(99); } } void properties(int numGPUs){ std::cout << "GPU Info " << std::endl; int device; for(int i = 0; i < numGPUs; i++){ hipSetDevice(i); checkCudaErrors( hipDeviceSetLimit( hipLimitMallocHeapSize, 67108864 ) ); checkCudaErrors( hipDeviceSetLimit( hipLimitStackSize, 131072 ) ); } hipSetDevice(0); hipGetDevice(&device); hipDeviceProp_t properties; checkCudaErrors( hipGetDeviceProperties( &properties, device ) ); hipGetDevice(&device); size_t limit1; checkCudaErrors( hipDeviceGetLimit( &limit1, hipLimitMallocHeapSize ) ); size_t limit2; checkCudaErrors( hipDeviceGetLimit( &limit2, hipLimitStackSize ) ); if( properties.major > 3 || ( properties.major == 3 && properties.minor >= 5 ) ) { std::cout << "Running on GPU " << device << " (" << properties.name << ")" << std::endl; std::cout << "Compute mode: " << properties.computeMode << std::endl; std::cout << "Concurrent Kernels: " << properties.concurrentKernels << std::endl; std::cout << "Warp size: " << properties.warpSize << std::endl; std::cout << "Major: " << properties.major << " Minor: " << properties.minor << std::endl; std::cout << "Cuda limit heap size: " << limit1 << std::endl; std::cout << "Cuda limit stack size: " << limit2 << "\n\n" << std::endl; } else std::cout << "GPU " << device << " (" << properties.name << ") does not support CUDA Dynamic Parallelism" << std::endl; } __device__ Vector3 color(const Ray& ray, Node *world, int depth, bool light, bool skybox, hiprandState_t *random, Skybox *sky, bool oneTex, unsigned char **d_textures){ Ray cur_ray = ray; Vector3 cur_attenuation = Vector3::One(); for(int i = 0; i < depth; i++){ hit_record rec; if( world->intersect(cur_ray, 0.00001, FLT_MAX, rec) ) { Ray scattered; Vector3 attenuation; Vector3 emitted = rec.mat_ptr.emitted(rec.u, rec.v, oneTex, d_textures); if(rec.mat_ptr.scatter(cur_ray, rec, attenuation, scattered, random, oneTex, d_textures)){ cur_attenuation *= attenuation; cur_attenuation += emitted; cur_ray = scattered; } else return cur_attenuation * emitted; } else { if(skybox && sky->hit(cur_ray, 0.00001, FLT_MAX, rec)){ return cur_attenuation * rec.mat_ptr.emitted(rec.u, rec.v, oneTex, d_textures); } else { if(light) { Vector3 unit_direction = unit_vector(cur_ray.direction()); float t = 0.5*(unit_direction.y() + 1.0); Vector3 c = (1.0 - t)*Vector3::One() + t*Vector3(0.5, 0.7, 1.0); return cur_attenuation * c; } else return Vector3::Zero(); } } } return Vector3::Zero(); } __device__ int LongestCommonPrefix(int i, int j, int numObjects, Triangle *d_list) { if(i < 0 or i > numObjects - 1 or j < 0 or j > numObjects - 1) return -1; int codeI = d_list[i].getMorton(); int codeJ = d_list[j].getMorton(); if(i == j) { printf("Equals Longest\n"); return __clz(codeI ^ codeJ); } else return __clz(codeI ^ codeJ); } __device__ int findSplit(Triangle *d_list, int first, int last) { if(first == last){ return -1; } int firstCode = d_list[first].getMorton(); int lastCode = d_list[last].getMorton(); int commonPrefix = __clz(firstCode ^ lastCode); int split = first; int step = last - first; do { step = (step + 1 ) >> 1; int newSplit = split + step; if(newSplit < last){ int splitCode = d_list[newSplit].getMorton(); int splitPrefix = __clz(firstCode ^ splitCode); if(splitPrefix > commonPrefix){ split = newSplit; } } } while (step > 1); return split; } __device__ int2 determineRange(Triangle *d_list, int idx, int objs) { int d = LongestCommonPrefix(idx, idx + 1, objs, d_list) - LongestCommonPrefix(idx, idx - 1, objs, d_list) >= 0 ? 1 : -1; int dmin = LongestCommonPrefix(idx, idx - d, objs, d_list); int lmax = 2; while(LongestCommonPrefix(idx, idx + lmax*d, objs, d_list) > dmin){ lmax <<=1; } int l = 0; int div = 2; for(int t = lmax/div; t >= 1; t >>= 1) { if(LongestCommonPrefix(idx, idx + (l + t) * d, objs, d_list) > dmin) l += t; } int jdx = idx + l * d; if(jdx < idx) return make_int2(jdx,idx); else return make_int2(idx,jdx); } __global__ void setupCamera(Camera **d_cam, int nx, int ny, Camera cam) { if (threadIdx.x == 0 && blockIdx.x == 0) { *d_cam = new Camera(cam.getLookfrom(), cam.getLookat(), cam.getVUP(), cam.getFOV(), float(nx)/float(ny), cam.getAperture(), cam.getFocus(),0.0,0.1); } } __global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state,unsigned long long seed, int minY, int maxY) { int num = blockIdx.x*blockDim.x + threadIdx.x; int i = num%max_x; int j = num/max_x + minY; if( (i >= max_x) || (j >= max_y) ) return; int pixel_index = num; hiprand_init((seed << 20) + pixel_index, 0, 0, &rand_state[pixel_index]); } __global__ void initLeafNodes(Node *leafNodes, int objs, Triangle *d_list) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx >= objs) return; leafNodes[idx].obj = &d_list[idx]; leafNodes[idx].box = d_list[idx].getBox(); } __global__ void boundingBoxBVH(Node *d_internalNodes, Node *d_leafNodes, int objs, int *nodeCounter) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx >= objs) return; Node *leaf = d_leafNodes + idx; Node* current = leaf->parent; int currentIdx = current - d_internalNodes; int res = atomicAdd(nodeCounter + currentIdx, 1); while (true) { if(res == 0) return; aabb leftBoundingBox = current->left->box; aabb rightBoundingBox = current->right->box; current->box = surrounding_box(leftBoundingBox, rightBoundingBox); if (current == d_internalNodes) { return; } current = current->parent; currentIdx = current - d_internalNodes; res = atomicAdd(nodeCounter + currentIdx, 1); } } __global__ void constructBVH(Node *d_internalNodes, Node *leafNodes, int objs, Triangle *d_list) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx >= objs) return; int2 range = determineRange(d_list, idx, objs+1); int first = range.x; int last = range.y; int split = findSplit(d_list, first, last); if(split == -1){ split = (first+last) >> 1; ++last; } Node *current = d_internalNodes + idx; if(split == first) { current->left = leafNodes + split; current->left->isLeaf = true; current->left->isLeft = true; (leafNodes + split)->parent = current; } else{ current->left = d_internalNodes + split; current->left->isLeft = true; (d_internalNodes + split)->parent = current; } if (split + 1 == last) { current->right = leafNodes + split + 1; current->right->isLeaf = true; current->right->isRight = true; (leafNodes + split + 1)->parent = current; } else{ current->right = d_internalNodes + split + 1; current->right->isRight = true; (d_internalNodes + split + 1)->parent = current; } } __global__ void render(Vector3 *fb, int max_x, int max_y, int ns, Camera **cam, Node *world, hiprandState_t *d_rand_state, int depth, bool light, bool skybox, Skybox *sky, bool oneTex, unsigned char ** d_textures, int minY, int maxY) { int num = blockIdx.x*blockDim.x + threadIdx.x; int i = num%max_x; int j = num/max_x + minY; hiprandState_t local_random; int pixel_index = num; local_random = d_rand_state[pixel_index]; Vector3 col(0,0,0); for(int s = 0; s < ns; s++){ float u = float(i + cuRandom) / float(max_x); float v = float(j + cuRandom) / float(max_y); Ray r = (*cam)->get_ray(u, v, &local_random); col += color(r, world, depth, light, skybox, &local_random, sky, oneTex, d_textures); } d_rand_state[pixel_index] = local_random; col /= float(ns); col[0] = sqrt(col[0]); col[1] = sqrt(col[1]); col[2] = sqrt(col[2]); fb[pixel_index] = col; } __global__ void checkBVH(Node *d_internalNodes, Node *d_leaves, int objs){ if (threadIdx.x == 0 && blockIdx.x == 0){ printf("Checking BVH...\n"); for(int i = 0; i < objs; i++){ if(!d_leaves[i].parent){ printf("Leaf without parent %d\n",i); } } for(int i = 0; i < objs-1; i++){ if(!d_internalNodes[i].left){ printf("Internal without left %d\n",i); } if(!d_internalNodes[i].right){ printf("Internal without right %d\n",i); } if(!d_internalNodes[i].parent){ printf("Internal without parent %d\n",i); } } printf("BVH checked!\n"); } } int main(int argc, char **argv) { hipDeviceReset(); float totalTime; int nx, ny, ns, depth, dist, diameterBi, diameterMean, diameterMedian, nthreads, numGPUs; bool light, random, filter, skybox, oneTex; float gs, gr; std::string filename, image; int countG; checkCudaErrors(hipGetDeviceCount(&countG)); parse_argv(argc, argv, nx, ny, ns, depth, dist, image, filename, light, random, filter, diameterBi, gs, gr, diameterMean, diameterMedian, skybox, oneTex, nthreads, numGPUs, countG); properties(numGPUs); /* Seed for CUDA cuRandom */ unsigned long long int seed = 1000; /* #pixels of the image */ int num_pixels = nx*ny; int elementsToJump = num_pixels/numGPUs; int bytesToJump = elementsToJump * sizeof(Vector3); int size = 0; int num_textures = 0; /* Host variables */ float fb_size = num_pixels*sizeof(Vector3); float drand_size = num_pixels*sizeof(hiprandState_t); float cam_size = sizeof(Camera*); Vector3 *h_frameBuffer; int blocks = (nx * ny)/(numGPUs * nthreads); /* Create world */ Scene scene(dist, nx, ny); if(random) scene.loadScene(TRIANGL); else scene.loadScene(FFILE,filename,oneTex); Triangle *h_objects = scene.getObjects(); Skybox *h_skybox = scene.getSkybox(); unsigned char **textures; unsigned char **h_textures; Vector3 *textureSizes; if(oneTex){ textures = scene.getTextures(); textureSizes = scene.getTextureSizes(); num_textures = scene.getNumTextures(); } size = scene.getSize(); float ob_size = size*sizeof(Triangle); int threads = nthreads; while(size < threads) threads /= 2; int blocks2 = (size+threads-1)/(threads); std::cout << "Creating " << image << " with (" << nx << "," << ny << ") pixels with " << nthreads << " threads, using " << numGPUs << " GPUs." << std::endl; std::cout << "With " << ns << " iterations for AntiAliasing and depth of " << depth << "." << std::endl; std::cout << "The world have " << size << " objects." << std::endl; if(light) std::cout << "Ambient light ON" << std::endl; else std::cout << "Ambient light OFF" << std::endl; /* Device variables */ Vector3 **d_frames = (Vector3 **) malloc(numGPUs * sizeof(Vector3)); Triangle **d_objectsGPUs = (Triangle **) malloc(numGPUs * sizeof(Triangle)); Camera ***d_cameras = (Camera ***) malloc(numGPUs * sizeof(Camera)); hiprandState_t **d_randstates = (hiprandState_t **) malloc(numGPUs * sizeof(hiprandState_t)); Node **d_internalNodes = (Node **) malloc(numGPUs * sizeof(Node)); Node **d_leafNodes = (Node **) malloc(numGPUs * sizeof(Node)); int **d_nodeCounters = (int **) malloc(numGPUs * sizeof(int)); Skybox **d_skyboxes = (Skybox **) malloc(numGPUs * sizeof(Skybox)); unsigned char ***d_textures = (unsigned char ***) malloc(numGPUs * sizeof(unsigned char)); float internal_size = (size-1)*sizeof(Node); float leaves_size = size*sizeof(Node); hipSetDevice(0); hipEvent_t E0, E1; hipEventCreate(&E0); hipEventCreate(&E1); /* Allocate Memory Host */ hipHostMalloc((Vector3**)&h_frameBuffer, fb_size); /* Allocate memory on Device */ hipEventRecord(E0,0); hipEventSynchronize(E0); if(num_textures > 0){ int count = 0; for(int i = 0; i < num_textures; i++){ Vector3 p = textureSizes[i]; count += (p[0]*p[1]*p[2]); } for(int j = 0; j < numGPUs; j++){ hipSetDevice(j); h_textures = (unsigned char **) malloc(sizeof(unsigned char)*count); std::cout << "Binding textures" << std::endl; for(int i = 0; i < num_textures; i++){ std::cout << "Texture " << i << std::endl; Vector3 p = textureSizes[i]; unsigned char *image = textures[i]; hipMalloc((void**)&h_textures[i], sizeof(unsigned char)*p[0]*p[1]*p[2]); hipMemcpy(h_textures[i], image, sizeof(unsigned char)*p[0]*p[1]*p[2], hipMemcpyHostToDevice); } hipMalloc(&d_textures[j], sizeof(unsigned char *) * num_textures); hipMemcpy(d_textures[j], h_textures, sizeof(unsigned char*) * num_textures, hipMemcpyHostToDevice); checkCudaErrors(hipGetLastError()); std::cout << "divice: " << j << std::endl; } } if(!oneTex){ for(int i = 0; i < size; i++){ h_objects[i].hostToDevice(i); } } /* Allocate memory on Device */ for(int i = 0; i < numGPUs; i++) { hipSetDevice(i); Vector3 *d_frameBuffer; Triangle *d_objects; Camera **d_cam; hiprandState_t *d_rand_state; Node *d_internals; Node *d_leaves; int *d_nodeCounter; Skybox *d_skybox; hipMallocManaged((void **)&d_frameBuffer, fb_size); hipMalloc((void **)&d_objects, ob_size); hipMalloc((void **)&d_cam, cam_size); hipMalloc((void **)&d_rand_state, drand_size); hipMalloc((void **)&d_internals, internal_size); hipMalloc((void **)&d_leaves, leaves_size); hipMalloc((void **)&d_nodeCounter, sizeof(int)*size); hipMalloc((void **)&d_skybox, sizeof(Skybox)); hipMemset(d_nodeCounter, 0, sizeof(int)*size); hipMemset(d_frameBuffer, 0, fb_size); d_frames[i] = d_frameBuffer; d_objectsGPUs[i] = d_objects; d_cameras[i] = d_cam; d_randstates[i] = d_rand_state; d_internalNodes[i] = d_internals; d_leafNodes[i] = d_leaves; d_nodeCounters[i] = d_nodeCounter; d_skyboxes[i] = d_skybox; } for(int i = 0; i < numGPUs; i++) { hipSetDevice(i); h_skybox->hostToDevice(i); hipMemcpy(d_objectsGPUs[i], h_objects, ob_size, hipMemcpyHostToDevice); checkCudaErrors(hipGetLastError()); hipMemcpy(d_skyboxes[i], h_skybox, sizeof(Skybox), hipMemcpyHostToDevice); checkCudaErrors(hipGetLastError()); } for(int i = 0; i < numGPUs; i++) { hipSetDevice(i); hipLaunchKernelGGL(( setupCamera), dim3(1),dim3(1), 0, 0, d_cameras[i],nx,ny, scene.getCamera()); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( render_init), dim3(blocks), dim3(nthreads), 0, 0, nx, ny, d_randstates[i], seed, i*(ny/numGPUs), (i+1)*(ny/numGPUs)); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( initLeafNodes), dim3(blocks2), dim3(threads), 0, 0, d_leafNodes[i], size, d_objectsGPUs[i]); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( constructBVH), dim3(blocks2), dim3(threads), 0, 0, d_internalNodes[i], d_leafNodes[i], size-1, d_objectsGPUs[i]); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( boundingBoxBVH), dim3(blocks2), dim3(threads), 0, 0, d_internalNodes[i], d_leafNodes[i], size, d_nodeCounters[i]); checkCudaErrors(hipGetLastError()); //checkBVH<<<1,1>>>(d_internalNodes[i], d_leafNodes[i], size); //checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( render), dim3(blocks), dim3(nthreads), 0, 0, d_frames[i], nx, ny, ns, d_cameras[i], d_internalNodes[i], d_randstates[i], depth, light, skybox, d_skyboxes[i], oneTex, d_textures[i], i*(ny/numGPUs), (i+1)*(ny/numGPUs)); checkCudaErrors(hipGetLastError()); } /* Copiamos del Device al Host*/ for(int i = 0; i < numGPUs; i++) { hipSetDevice(i); hipMemcpyAsync(&h_frameBuffer[elementsToJump*i], d_frames[i], bytesToJump, hipMemcpyDeviceToHost); checkCudaErrors(hipGetLastError()); } for(int i = 0; i < numGPUs; i++){ hipSetDevice(i); hipDeviceSynchronize(); } hipSetDevice(0); hipEventRecord(E1,0); checkCudaErrors(hipGetLastError()); hipEventSynchronize(E1); checkCudaErrors(hipGetLastError()); hipEventElapsedTime(&totalTime,E0,E1); checkCudaErrors(hipGetLastError()); hipEventDestroy(E0); hipEventDestroy(E1); std::cout << "Total time: " << totalTime << " milisegs. " << std::endl; std::cout << "Generating file image..." << std::endl; uint8_t *data = new uint8_t[nx*ny*3]; int count = 0; for(int j = ny-1; j >= 0; j--){ for(int i = 0; i < nx; i++){ size_t pixel_index = j*nx + i; Vector3 col = h_frameBuffer[pixel_index]; int ir = int(255.99*col.r()); int ig = int(255.99*col.g()); int ib = int(255.99*col.b()); data[count++] = ir; data[count++] = ig; data[count++] = ib; } } for(int i = 0; i < numGPUs; i++) { hipSetDevice(i); hipFree(d_cameras[i]); hipFree(d_objectsGPUs[i]); hipFree(d_randstates[i]); hipFree(d_frames[i]); hipFree(d_skyboxes[i]); hipFree(d_leafNodes[i]); hipFree(d_internalNodes[i]); } image = "../Resources/Images/GPU_BVH_"+std::to_string(numGPUs)+"_GPU/"+image; stbi_write_png(image.c_str(), nx, ny, 3, data, nx*3); if(filter){ std::cout << "Filtering image using bilateral filter with Gs = " << gs << " and Gr = " << gr << " and window of diameter " << diameterBi << std::endl; std::string filenameFiltered = image.substr(0, image.length()-4) + "_bilateral_filter.png"; int sx, sy, sc; unsigned char *imageData = stbi_load(image.c_str(), &sx, &sy, &sc, 0); unsigned char *imageFiltered = new unsigned char[sx*sy*3]; bilateralFilter(diameterBi, sx, sy, imageData, imageFiltered, gs, gr); stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3); std::cout << "Filtering image using median filter with window of diameter " << diameterMedian << std::endl; filenameFiltered = image.substr(0, image.length()-4) + "_median_filter.png"; medianFilter(diameterMedian, sx, sy, imageData, imageFiltered); stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3); std::cout << "Filtering image using mean filter with window of diameter " << diameterMean << std::endl; filenameFiltered = image.substr(0, image.length()-4) + "_mean_filter.png"; meanFilter(diameterMean,sx, sy, imageData, imageFiltered); stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3); } }
c989e7c9b43bfd60d99eb41655a5629a23ad1afb.cu
#include <iostream> #include <fstream> #include <string> #include <cfloat> #include <ctime> #include <limits> #include <algorithm> #include <stack> #include <queue> #include <curand.h> #include <curand_kernel.h> #include <device_launch_parameters.h> #include "Camera.cuh" #include "Scene.cuh" #include "Node.cuh" #include "filters.hh" #define STB_IMAGE_STATIC #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) #define cuRandom (curand_uniform(&local_random)) void error(const char *message) { std::cout << message << std::endl; exit(0); } void format() { std::cout << "File format for scene." << std::endl; std::cout << "\t # Comment, skip line." << std::endl; std::cout << "Spheres -> type center material" << std::endl; std::cout << "\t 1 Indicates that the 3D model is a Sphere object." << std::endl; std::cout << "\t Center The center of the Sphere." << std::endl; std::cout << "\t Radius The radius of the Sphere." << std::endl; std::cout << "\t Material -> type albedo [fuzz] [ref_idx]" << std::endl; std::cout << "\t\t 0 LAMBERTIAN" << std::endl; std::cout << "\t\t 1 METAL" << std::endl; std::cout << "\t\t 2 DIELECTRIC" << std::endl; std::cout << "\t\t 3 DIFFUSE LIGHT" << std::endl; std::cout << "\t\t albedo Defines the color." << std::endl; std::cout << "\t\t fuzz Only for METAL." << std::endl; std::cout << "\t\t ref_idx Only for DIELECTRIC." << std::endl; std::cout << "Examples of declaration:\n" << std::endl; std::cout << "# my scene" << std::endl; std::cout << "Object Center Rad Material Albedo Fuzz/ref_idx" << std::endl; std::cout << "1 0 1 0 2 1 0.5 0.78 0.9 " << std::endl; std::cout << "1 0 4 0 2 2 1 0 0.9 2 " << std::endl; std::cout << "1 1 4 1 2 3 0.9 0.9 0.9 1.5 " << std::endl; } void help(){ std::cout << "\n" << std::endl; std::cout << "\t[-d] [--defult] Set the parameters to default values" << std::endl; std::cout << "\t size: (1280x720) | AAit: 50 | depth: 50 | spheres: 11 | nthreads: 32" << std::endl; std::cout << "\t[-sizeX] Size in pixels of coordinate X. Number greater than 0." << std::endl; std::cout << "\t[-sizeY] Size in pixels of coordinate Y. Number greater than 0." << std::endl; std::cout << "\t[-AAit] Number of iterations to calculate color in one pixel. Number greater than 0." << std::endl; std::cout << "\t[-depth] The attenuation of scattered ray. Number greater than 0." << std::endl; std::cout << "\t[-spheres] Factor number to calculate the number of spheres in the scene. Number greater than 0." << std::endl; std::cout << "\t[-light] Turn on/off the ambient light. Values can be ON/OFF" << std::endl; std::cout << "\t[-nthreads] Number of threads to use" << std::endl; std::cout << "\t[-nGPUs] Number of GPUs to distribute the work" << std::endl; std::cout << "\t[-i][--image] File name of pic generated." << std::endl; std::cout << "\t[-f][--file] File name of the scene." << std::endl; std::cout << "\t[-h][--help] Show help." << std::endl; std::cout << "\t #spheres = (2*spheres)*(2*spheres) + 4" << std::endl; std::cout << "\n" << std::endl; std::cout << "Examples of usage:" << std::endl; std::cout << "./path_tracing_NGPUs -d" << std::endl; std::cout << "./path_tracing_NGPUs -nthreads 16 -sizeX 2000"<< std::endl; format(); exit(1); } void parse_argv(int argc, char **argv, int &nx, int &ny, int &ns, int &depth, int &dist, std::string &image, std::string &filename, bool &light, bool &random, bool &filter, int &diameterBi, float &gs, float &gr, int &diameterMean, int &diameterMedian, bool &skybox, bool &oneTex, int &nthreads, int &numGPUs, const int count){ if(argc <= 1) error("Error usage. Use [-h] [--help] to see the usage."); nx = 1280; ny = 720; ns = 50; depth = 50; dist = 11; image = "image"; filter = false; gs = 0; gr = 0; diameterBi = 11; diameterMean = 3; diameterMedian = 3; skybox = false; oneTex = false; light = true; random = true; bool imageName = false; nthreads = 32; numGPUs = 1; bool v_default = false; for(int i = 1; i < argc; i += 2){ if(v_default) error("Error usage. Use [-h] [--help] to see the usage."); if (std::string(argv[i]) == "-d" || std::string(argv[i]) == "--default"){ if((i+1) < argc) error("The default parameter cannot have more arguments."); std::cerr << "Default\n"; v_default = true; } else if (std::string(argv[i]) == "-sizeX"){ if((i+1) >= argc) error("-sizeX value expected"); nx = atoi(argv[i+1]); if(nx == 0) error("-sizeX value expected or cannot be 0"); } else if(std::string(argv[i]) == "-sizeY"){ if((i+1) >= argc) error("-sizeY value expected"); ny = atoi(argv[i+1]); if(ny == 0) error("-sizeY value expected or cannot be 0"); } else if(std::string(argv[i]) == "-AAit"){ if((i+1) >= argc) error("-AAit value expected"); ns = atoi(argv[i+1]); if(ns == 0) error("-AAit value expected or cannot be 0"); } else if(std::string(argv[i]) == "-depth"){ if((i+1) >= argc) error("-depth value expected"); depth = atoi(argv[i+1]); if(depth == 0) error("-depth value expected or cannot be 0"); } else if(std::string(argv[i]) == "-i" || std::string(argv[i]) == "--image"){ if((i+1) >= argc) error("--image / -i file expected"); filename = std::string(argv[i+1]); imageName = true; } else if(std::string(argv[i]) == "-f" || std::string(argv[i]) == "--file"){ if((i+1) >= argc) error("-name file expected"); filename = std::string(argv[i+1]); if(!imageName) image = filename; filename = filename+".txt"; random = false; } else if(std::string(argv[i]) == "-light") { if((i+1) >= argc) error("-light value expected"); if(std::string(argv[i+1]) == "ON") light = true; else if(std::string(argv[i+1]) == "OFF") light = false; } else if (std::string(argv[i]) == "-filter") { filter = true; diameterBi = atoi(argv[i+1]); i += 2; gs = atof(argv[i]); gr = atof(argv[i+1]); i+=2; diameterMean = atoi(argv[i]); diameterMedian = atoi(argv[i+1]); } else if(std::string(argv[i]) == "-skybox") { if((i+1) >= argc) error("-skybox value expected"); if(std::string(argv[i+1]) == "ON") skybox = true; else if(std::string(argv[i+1]) == "OFF") skybox = false; } else if(std::string(argv[i]) == "-oneTex") { if((i+1) >= argc) error("-oneTex value expected"); if(std::string(argv[i+1]) == "ON") oneTex = true; else if(std::string(argv[i+1]) == "OFF") oneTex = false; } else if(std::string(argv[i]) == "-nGPUs"){ if((i+1) >= argc) error("-nGPUs value expected"); numGPUs = atoi(argv[i+1]); if(numGPUs == 0) error("-nGPUs value expected or cannot be 0"); numGPUs = std::min(numGPUs, count); } else if(std::string(argv[i]) == "-nthreads"){ if((i+1) >= argc) error("-nthreads value expected"); nthreads = atoi(argv[i+1]); if(nthreads == 0) error("-nthreads value expected or cannot be 0"); } else if(std::string(argv[i]) == "-h" || std::string(argv[i]) == "--help" ){ help(); } else{ error("Error usage. Use [-h] [--help] to see the usage."); } } if(!light) image = image+"_noktem"; image = image+".png"; } void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line){ if(result){ std::cout << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << std::endl; std::cout << cudaGetErrorString(result) << std::endl; cudaDeviceReset(); exit(99); } } void properties(int numGPUs){ std::cout << "GPU Info " << std::endl; int device; for(int i = 0; i < numGPUs; i++){ cudaSetDevice(i); checkCudaErrors( cudaDeviceSetLimit( cudaLimitMallocHeapSize, 67108864 ) ); checkCudaErrors( cudaDeviceSetLimit( cudaLimitStackSize, 131072 ) ); } cudaSetDevice(0); cudaGetDevice(&device); cudaDeviceProp properties; checkCudaErrors( cudaGetDeviceProperties( &properties, device ) ); cudaGetDevice(&device); size_t limit1; checkCudaErrors( cudaDeviceGetLimit( &limit1, cudaLimitMallocHeapSize ) ); size_t limit2; checkCudaErrors( cudaDeviceGetLimit( &limit2, cudaLimitStackSize ) ); if( properties.major > 3 || ( properties.major == 3 && properties.minor >= 5 ) ) { std::cout << "Running on GPU " << device << " (" << properties.name << ")" << std::endl; std::cout << "Compute mode: " << properties.computeMode << std::endl; std::cout << "Concurrent Kernels: " << properties.concurrentKernels << std::endl; std::cout << "Warp size: " << properties.warpSize << std::endl; std::cout << "Major: " << properties.major << " Minor: " << properties.minor << std::endl; std::cout << "Cuda limit heap size: " << limit1 << std::endl; std::cout << "Cuda limit stack size: " << limit2 << "\n\n" << std::endl; } else std::cout << "GPU " << device << " (" << properties.name << ") does not support CUDA Dynamic Parallelism" << std::endl; } __device__ Vector3 color(const Ray& ray, Node *world, int depth, bool light, bool skybox, curandState *random, Skybox *sky, bool oneTex, unsigned char **d_textures){ Ray cur_ray = ray; Vector3 cur_attenuation = Vector3::One(); for(int i = 0; i < depth; i++){ hit_record rec; if( world->intersect(cur_ray, 0.00001, FLT_MAX, rec) ) { Ray scattered; Vector3 attenuation; Vector3 emitted = rec.mat_ptr.emitted(rec.u, rec.v, oneTex, d_textures); if(rec.mat_ptr.scatter(cur_ray, rec, attenuation, scattered, random, oneTex, d_textures)){ cur_attenuation *= attenuation; cur_attenuation += emitted; cur_ray = scattered; } else return cur_attenuation * emitted; } else { if(skybox && sky->hit(cur_ray, 0.00001, FLT_MAX, rec)){ return cur_attenuation * rec.mat_ptr.emitted(rec.u, rec.v, oneTex, d_textures); } else { if(light) { Vector3 unit_direction = unit_vector(cur_ray.direction()); float t = 0.5*(unit_direction.y() + 1.0); Vector3 c = (1.0 - t)*Vector3::One() + t*Vector3(0.5, 0.7, 1.0); return cur_attenuation * c; } else return Vector3::Zero(); } } } return Vector3::Zero(); } __device__ int LongestCommonPrefix(int i, int j, int numObjects, Triangle *d_list) { if(i < 0 or i > numObjects - 1 or j < 0 or j > numObjects - 1) return -1; int codeI = d_list[i].getMorton(); int codeJ = d_list[j].getMorton(); if(i == j) { printf("Equals Longest\n"); return __clz(codeI ^ codeJ); } else return __clz(codeI ^ codeJ); } __device__ int findSplit(Triangle *d_list, int first, int last) { if(first == last){ return -1; } int firstCode = d_list[first].getMorton(); int lastCode = d_list[last].getMorton(); int commonPrefix = __clz(firstCode ^ lastCode); int split = first; int step = last - first; do { step = (step + 1 ) >> 1; int newSplit = split + step; if(newSplit < last){ int splitCode = d_list[newSplit].getMorton(); int splitPrefix = __clz(firstCode ^ splitCode); if(splitPrefix > commonPrefix){ split = newSplit; } } } while (step > 1); return split; } __device__ int2 determineRange(Triangle *d_list, int idx, int objs) { int d = LongestCommonPrefix(idx, idx + 1, objs, d_list) - LongestCommonPrefix(idx, idx - 1, objs, d_list) >= 0 ? 1 : -1; int dmin = LongestCommonPrefix(idx, idx - d, objs, d_list); int lmax = 2; while(LongestCommonPrefix(idx, idx + lmax*d, objs, d_list) > dmin){ lmax <<=1; } int l = 0; int div = 2; for(int t = lmax/div; t >= 1; t >>= 1) { if(LongestCommonPrefix(idx, idx + (l + t) * d, objs, d_list) > dmin) l += t; } int jdx = idx + l * d; if(jdx < idx) return make_int2(jdx,idx); else return make_int2(idx,jdx); } __global__ void setupCamera(Camera **d_cam, int nx, int ny, Camera cam) { if (threadIdx.x == 0 && blockIdx.x == 0) { *d_cam = new Camera(cam.getLookfrom(), cam.getLookat(), cam.getVUP(), cam.getFOV(), float(nx)/float(ny), cam.getAperture(), cam.getFocus(),0.0,0.1); } } __global__ void render_init(int max_x, int max_y, curandState *rand_state,unsigned long long seed, int minY, int maxY) { int num = blockIdx.x*blockDim.x + threadIdx.x; int i = num%max_x; int j = num/max_x + minY; if( (i >= max_x) || (j >= max_y) ) return; int pixel_index = num; curand_init((seed << 20) + pixel_index, 0, 0, &rand_state[pixel_index]); } __global__ void initLeafNodes(Node *leafNodes, int objs, Triangle *d_list) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx >= objs) return; leafNodes[idx].obj = &d_list[idx]; leafNodes[idx].box = d_list[idx].getBox(); } __global__ void boundingBoxBVH(Node *d_internalNodes, Node *d_leafNodes, int objs, int *nodeCounter) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx >= objs) return; Node *leaf = d_leafNodes + idx; Node* current = leaf->parent; int currentIdx = current - d_internalNodes; int res = atomicAdd(nodeCounter + currentIdx, 1); while (true) { if(res == 0) return; aabb leftBoundingBox = current->left->box; aabb rightBoundingBox = current->right->box; current->box = surrounding_box(leftBoundingBox, rightBoundingBox); if (current == d_internalNodes) { return; } current = current->parent; currentIdx = current - d_internalNodes; res = atomicAdd(nodeCounter + currentIdx, 1); } } __global__ void constructBVH(Node *d_internalNodes, Node *leafNodes, int objs, Triangle *d_list) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx >= objs) return; int2 range = determineRange(d_list, idx, objs+1); int first = range.x; int last = range.y; int split = findSplit(d_list, first, last); if(split == -1){ split = (first+last) >> 1; ++last; } Node *current = d_internalNodes + idx; if(split == first) { current->left = leafNodes + split; current->left->isLeaf = true; current->left->isLeft = true; (leafNodes + split)->parent = current; } else{ current->left = d_internalNodes + split; current->left->isLeft = true; (d_internalNodes + split)->parent = current; } if (split + 1 == last) { current->right = leafNodes + split + 1; current->right->isLeaf = true; current->right->isRight = true; (leafNodes + split + 1)->parent = current; } else{ current->right = d_internalNodes + split + 1; current->right->isRight = true; (d_internalNodes + split + 1)->parent = current; } } __global__ void render(Vector3 *fb, int max_x, int max_y, int ns, Camera **cam, Node *world, curandState *d_rand_state, int depth, bool light, bool skybox, Skybox *sky, bool oneTex, unsigned char ** d_textures, int minY, int maxY) { int num = blockIdx.x*blockDim.x + threadIdx.x; int i = num%max_x; int j = num/max_x + minY; curandState local_random; int pixel_index = num; local_random = d_rand_state[pixel_index]; Vector3 col(0,0,0); for(int s = 0; s < ns; s++){ float u = float(i + cuRandom) / float(max_x); float v = float(j + cuRandom) / float(max_y); Ray r = (*cam)->get_ray(u, v, &local_random); col += color(r, world, depth, light, skybox, &local_random, sky, oneTex, d_textures); } d_rand_state[pixel_index] = local_random; col /= float(ns); col[0] = sqrt(col[0]); col[1] = sqrt(col[1]); col[2] = sqrt(col[2]); fb[pixel_index] = col; } __global__ void checkBVH(Node *d_internalNodes, Node *d_leaves, int objs){ if (threadIdx.x == 0 && blockIdx.x == 0){ printf("Checking BVH...\n"); for(int i = 0; i < objs; i++){ if(!d_leaves[i].parent){ printf("Leaf without parent %d\n",i); } } for(int i = 0; i < objs-1; i++){ if(!d_internalNodes[i].left){ printf("Internal without left %d\n",i); } if(!d_internalNodes[i].right){ printf("Internal without right %d\n",i); } if(!d_internalNodes[i].parent){ printf("Internal without parent %d\n",i); } } printf("BVH checked!\n"); } } int main(int argc, char **argv) { cudaDeviceReset(); float totalTime; int nx, ny, ns, depth, dist, diameterBi, diameterMean, diameterMedian, nthreads, numGPUs; bool light, random, filter, skybox, oneTex; float gs, gr; std::string filename, image; int countG; checkCudaErrors(cudaGetDeviceCount(&countG)); parse_argv(argc, argv, nx, ny, ns, depth, dist, image, filename, light, random, filter, diameterBi, gs, gr, diameterMean, diameterMedian, skybox, oneTex, nthreads, numGPUs, countG); properties(numGPUs); /* Seed for CUDA cuRandom */ unsigned long long int seed = 1000; /* #pixels of the image */ int num_pixels = nx*ny; int elementsToJump = num_pixels/numGPUs; int bytesToJump = elementsToJump * sizeof(Vector3); int size = 0; int num_textures = 0; /* Host variables */ float fb_size = num_pixels*sizeof(Vector3); float drand_size = num_pixels*sizeof(curandState); float cam_size = sizeof(Camera*); Vector3 *h_frameBuffer; int blocks = (nx * ny)/(numGPUs * nthreads); /* Create world */ Scene scene(dist, nx, ny); if(random) scene.loadScene(TRIANGL); else scene.loadScene(FFILE,filename,oneTex); Triangle *h_objects = scene.getObjects(); Skybox *h_skybox = scene.getSkybox(); unsigned char **textures; unsigned char **h_textures; Vector3 *textureSizes; if(oneTex){ textures = scene.getTextures(); textureSizes = scene.getTextureSizes(); num_textures = scene.getNumTextures(); } size = scene.getSize(); float ob_size = size*sizeof(Triangle); int threads = nthreads; while(size < threads) threads /= 2; int blocks2 = (size+threads-1)/(threads); std::cout << "Creating " << image << " with (" << nx << "," << ny << ") pixels with " << nthreads << " threads, using " << numGPUs << " GPUs." << std::endl; std::cout << "With " << ns << " iterations for AntiAliasing and depth of " << depth << "." << std::endl; std::cout << "The world have " << size << " objects." << std::endl; if(light) std::cout << "Ambient light ON" << std::endl; else std::cout << "Ambient light OFF" << std::endl; /* Device variables */ Vector3 **d_frames = (Vector3 **) malloc(numGPUs * sizeof(Vector3)); Triangle **d_objectsGPUs = (Triangle **) malloc(numGPUs * sizeof(Triangle)); Camera ***d_cameras = (Camera ***) malloc(numGPUs * sizeof(Camera)); curandState **d_randstates = (curandState **) malloc(numGPUs * sizeof(curandState)); Node **d_internalNodes = (Node **) malloc(numGPUs * sizeof(Node)); Node **d_leafNodes = (Node **) malloc(numGPUs * sizeof(Node)); int **d_nodeCounters = (int **) malloc(numGPUs * sizeof(int)); Skybox **d_skyboxes = (Skybox **) malloc(numGPUs * sizeof(Skybox)); unsigned char ***d_textures = (unsigned char ***) malloc(numGPUs * sizeof(unsigned char)); float internal_size = (size-1)*sizeof(Node); float leaves_size = size*sizeof(Node); cudaSetDevice(0); cudaEvent_t E0, E1; cudaEventCreate(&E0); cudaEventCreate(&E1); /* Allocate Memory Host */ cudaMallocHost((Vector3**)&h_frameBuffer, fb_size); /* Allocate memory on Device */ cudaEventRecord(E0,0); cudaEventSynchronize(E0); if(num_textures > 0){ int count = 0; for(int i = 0; i < num_textures; i++){ Vector3 p = textureSizes[i]; count += (p[0]*p[1]*p[2]); } for(int j = 0; j < numGPUs; j++){ cudaSetDevice(j); h_textures = (unsigned char **) malloc(sizeof(unsigned char)*count); std::cout << "Binding textures" << std::endl; for(int i = 0; i < num_textures; i++){ std::cout << "Texture " << i << std::endl; Vector3 p = textureSizes[i]; unsigned char *image = textures[i]; cudaMalloc((void**)&h_textures[i], sizeof(unsigned char)*p[0]*p[1]*p[2]); cudaMemcpy(h_textures[i], image, sizeof(unsigned char)*p[0]*p[1]*p[2], cudaMemcpyHostToDevice); } cudaMalloc(&d_textures[j], sizeof(unsigned char *) * num_textures); cudaMemcpy(d_textures[j], h_textures, sizeof(unsigned char*) * num_textures, cudaMemcpyHostToDevice); checkCudaErrors(cudaGetLastError()); std::cout << "divice: " << j << std::endl; } } if(!oneTex){ for(int i = 0; i < size; i++){ h_objects[i].hostToDevice(i); } } /* Allocate memory on Device */ for(int i = 0; i < numGPUs; i++) { cudaSetDevice(i); Vector3 *d_frameBuffer; Triangle *d_objects; Camera **d_cam; curandState *d_rand_state; Node *d_internals; Node *d_leaves; int *d_nodeCounter; Skybox *d_skybox; cudaMallocManaged((void **)&d_frameBuffer, fb_size); cudaMalloc((void **)&d_objects, ob_size); cudaMalloc((void **)&d_cam, cam_size); cudaMalloc((void **)&d_rand_state, drand_size); cudaMalloc((void **)&d_internals, internal_size); cudaMalloc((void **)&d_leaves, leaves_size); cudaMalloc((void **)&d_nodeCounter, sizeof(int)*size); cudaMalloc((void **)&d_skybox, sizeof(Skybox)); cudaMemset(d_nodeCounter, 0, sizeof(int)*size); cudaMemset(d_frameBuffer, 0, fb_size); d_frames[i] = d_frameBuffer; d_objectsGPUs[i] = d_objects; d_cameras[i] = d_cam; d_randstates[i] = d_rand_state; d_internalNodes[i] = d_internals; d_leafNodes[i] = d_leaves; d_nodeCounters[i] = d_nodeCounter; d_skyboxes[i] = d_skybox; } for(int i = 0; i < numGPUs; i++) { cudaSetDevice(i); h_skybox->hostToDevice(i); cudaMemcpy(d_objectsGPUs[i], h_objects, ob_size, cudaMemcpyHostToDevice); checkCudaErrors(cudaGetLastError()); cudaMemcpy(d_skyboxes[i], h_skybox, sizeof(Skybox), cudaMemcpyHostToDevice); checkCudaErrors(cudaGetLastError()); } for(int i = 0; i < numGPUs; i++) { cudaSetDevice(i); setupCamera<<<1,1>>>(d_cameras[i],nx,ny, scene.getCamera()); checkCudaErrors(cudaGetLastError()); render_init<<<blocks, nthreads>>>(nx, ny, d_randstates[i], seed, i*(ny/numGPUs), (i+1)*(ny/numGPUs)); checkCudaErrors(cudaGetLastError()); initLeafNodes<<<blocks2, threads>>>(d_leafNodes[i], size, d_objectsGPUs[i]); checkCudaErrors(cudaGetLastError()); constructBVH<<<blocks2, threads>>>(d_internalNodes[i], d_leafNodes[i], size-1, d_objectsGPUs[i]); checkCudaErrors(cudaGetLastError()); boundingBoxBVH<<<blocks2, threads>>>(d_internalNodes[i], d_leafNodes[i], size, d_nodeCounters[i]); checkCudaErrors(cudaGetLastError()); //checkBVH<<<1,1>>>(d_internalNodes[i], d_leafNodes[i], size); //checkCudaErrors(cudaGetLastError()); render<<<blocks, nthreads>>>(d_frames[i], nx, ny, ns, d_cameras[i], d_internalNodes[i], d_randstates[i], depth, light, skybox, d_skyboxes[i], oneTex, d_textures[i], i*(ny/numGPUs), (i+1)*(ny/numGPUs)); checkCudaErrors(cudaGetLastError()); } /* Copiamos del Device al Host*/ for(int i = 0; i < numGPUs; i++) { cudaSetDevice(i); cudaMemcpyAsync(&h_frameBuffer[elementsToJump*i], d_frames[i], bytesToJump, cudaMemcpyDeviceToHost); checkCudaErrors(cudaGetLastError()); } for(int i = 0; i < numGPUs; i++){ cudaSetDevice(i); cudaDeviceSynchronize(); } cudaSetDevice(0); cudaEventRecord(E1,0); checkCudaErrors(cudaGetLastError()); cudaEventSynchronize(E1); checkCudaErrors(cudaGetLastError()); cudaEventElapsedTime(&totalTime,E0,E1); checkCudaErrors(cudaGetLastError()); cudaEventDestroy(E0); cudaEventDestroy(E1); std::cout << "Total time: " << totalTime << " milisegs. " << std::endl; std::cout << "Generating file image..." << std::endl; uint8_t *data = new uint8_t[nx*ny*3]; int count = 0; for(int j = ny-1; j >= 0; j--){ for(int i = 0; i < nx; i++){ size_t pixel_index = j*nx + i; Vector3 col = h_frameBuffer[pixel_index]; int ir = int(255.99*col.r()); int ig = int(255.99*col.g()); int ib = int(255.99*col.b()); data[count++] = ir; data[count++] = ig; data[count++] = ib; } } for(int i = 0; i < numGPUs; i++) { cudaSetDevice(i); cudaFree(d_cameras[i]); cudaFree(d_objectsGPUs[i]); cudaFree(d_randstates[i]); cudaFree(d_frames[i]); cudaFree(d_skyboxes[i]); cudaFree(d_leafNodes[i]); cudaFree(d_internalNodes[i]); } image = "../Resources/Images/GPU_BVH_"+std::to_string(numGPUs)+"_GPU/"+image; stbi_write_png(image.c_str(), nx, ny, 3, data, nx*3); if(filter){ std::cout << "Filtering image using bilateral filter with Gs = " << gs << " and Gr = " << gr << " and window of diameter " << diameterBi << std::endl; std::string filenameFiltered = image.substr(0, image.length()-4) + "_bilateral_filter.png"; int sx, sy, sc; unsigned char *imageData = stbi_load(image.c_str(), &sx, &sy, &sc, 0); unsigned char *imageFiltered = new unsigned char[sx*sy*3]; bilateralFilter(diameterBi, sx, sy, imageData, imageFiltered, gs, gr); stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3); std::cout << "Filtering image using median filter with window of diameter " << diameterMedian << std::endl; filenameFiltered = image.substr(0, image.length()-4) + "_median_filter.png"; medianFilter(diameterMedian, sx, sy, imageData, imageFiltered); stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3); std::cout << "Filtering image using mean filter with window of diameter " << diameterMean << std::endl; filenameFiltered = image.substr(0, image.length()-4) + "_mean_filter.png"; meanFilter(diameterMean,sx, sy, imageData, imageFiltered); stbi_write_png(filenameFiltered.c_str(), sx, sy, 3, imageFiltered, sx*3); } }
0f6740ca3fbff520b37cfbc22ac34ccbc0c5aac2.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THHGeneral.h> #include <THH/THHThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <tuple> #include <thrust/unique.h> #include <thrust/sort.h> #include <thrust/scan.h> #include <thrust/scatter.h> namespace at { namespace native{ namespace { template <typename scalar_t> std::tuple<Tensor, Tensor> _unique_cuda_template( const Tensor& self, const bool return_inverse) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); const Tensor& input = self.contiguous(); int64_t num_inp = input.numel(); const scalar_t* input_data = input.data<scalar_t>(); //sort & unique Tensor output = input.clone(); output = output.view(-1); scalar_t* output_data = output.data<scalar_t>(); Tensor inverse_indices; if (!return_inverse) { inverse_indices = at::empty({0}, self.type().toScalarType(kLong)); thrust::sort(policy, output_data, output_data + num_inp); } else { Tensor sorted_indices = at::arange(0, num_inp, self.type().toScalarType(kLong)); int64_t* sorted_indices_ptr = sorted_indices.data<int64_t>(); thrust::sort_by_key(policy, output_data, output_data + num_inp, sorted_indices_ptr); Tensor inv_loc = at::empty({num_inp}, self.type().toScalarType(kLong)); inverse_indices = at::empty({num_inp}, self.type().toScalarType(kLong)); int64_t* inv_loc_ptr = inv_loc.data<int64_t>(); int64_t* inverse_indices_ptr = inverse_indices.data<int64_t>(); thrust::adjacent_difference(policy, output_data, output_data + num_inp, inv_loc_ptr, [=] __device__ (scalar_t a, scalar_t b) -> int64_t { if (a != b) {return 1;} else { return 0; }}); inv_loc[0] = 0; thrust::inclusive_scan(policy, inv_loc_ptr, inv_loc_ptr + num_inp, inv_loc_ptr); thrust::scatter(policy,inv_loc_ptr, inv_loc_ptr + num_inp, sorted_indices_ptr, inverse_indices_ptr); inverse_indices.resize_(input.sizes()); } int64_t num_out = thrust::unique(policy, output_data, output_data + num_inp) - output_data; output.resize_(num_out); THCudaCheck(hipGetLastError()); return std::tuple<Tensor, Tensor>(output, inverse_indices); } template <typename scalar_t> std::tuple<Tensor, Tensor> _unique_dim_cuda_template( const Tensor& self, const int64_t dim, const bool return_inverse) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); Tensor input_flat = self.transpose(dim, 0); auto orig_sizes = input_flat.sizes().vec(); input_flat = input_flat.contiguous().view({input_flat.size(0), -1}); scalar_t* input_flat_ptr = input_flat.data<scalar_t>(); Tensor indices = at::arange(0, input_flat.size(0), self.type().toScalarType(kLong)); int64_t* indices_ptr = indices.data<int64_t>(); int64_t numel = input_flat.size(1); // sort indices using data thrust::sort(policy, indices_ptr, indices_ptr + indices.numel(), [=] __device__ (int64_t a, int64_t b) -> bool { for (int64_t i = 0; i < numel; ++i) { scalar_t lhs = input_flat_ptr[i + a * numel]; scalar_t rhs = input_flat_ptr[i + b * numel]; if (lhs < rhs) { return true; } else if (lhs > rhs) { return false; } } return false; }); Tensor input_sorted = input_flat.index_select(0, indices); // get unique tensors scalar_t* input_sorted_ptr = input_sorted.data<scalar_t>(); Tensor input_sorted_indices = at::arange(0, input_sorted.size(0), self.type().toScalarType(kLong)); int64_t* input_sorted_indices_ptr = input_sorted_indices.data<int64_t>(); auto last = thrust::unique(policy, input_sorted_indices_ptr, input_sorted_indices_ptr + input_sorted_indices.numel(), [=] __device__ (int64_t a, int64_t b) -> bool { for (int64_t i = 0; i < numel; ++i) { scalar_t lhs = input_sorted_ptr[i + a * numel]; scalar_t rhs = input_sorted_ptr[i + b * numel]; if (lhs != rhs) { return false; } } return true; }); input_sorted_indices.resize_(last - input_sorted_indices_ptr); Tensor output = input_sorted.index_select(0, input_sorted_indices); // reshape back auto new_sizes = std::vector<int64_t>(orig_sizes); new_sizes[0] = -1; output = output.view(new_sizes); output = output.transpose(0, dim); // calculate inverse indices Tensor inverse_indices = at::empty({0}, self.type().toScalarType(kLong)); if (return_inverse) { int64_t size = self.size(dim); inverse_indices.resize_(size); Tensor mask = at::empty(input_sorted.size(0), self.type().toScalarType(kLong)); mask[0] = 1; for (int i = 0; i < input_sorted.size(0) - 1; ++i) { if (!at::equal(input_sorted[i], input_sorted[i+1])) { mask[i+1] = 1; } else { mask[i+1] = 0; } } Tensor imask = at::cumsum(mask, 0) - 1; for (int i = 0; i < indices.size(0); ++i) { inverse_indices[indices[i]] = imask[i]; } } THCudaCheck(hipGetLastError()); return std::tuple<Tensor, Tensor>(output, inverse_indices); } } // namespace std::tuple<Tensor, Tensor> _unique_cuda(const Tensor& self, const bool sorted, const bool return_inverse) { return AT_DISPATCH_ALL_TYPES(self.type(), "unique", [&] { // The current CUDA implementation of unique always sort due to the // lack of hashtable implementation in thrust return _unique_cuda_template<scalar_t>(self, return_inverse); }); } std::tuple<Tensor, Tensor> _unique_dim_cuda(const Tensor& self, const int64_t dim, const bool sorted, const bool return_inverse) { return AT_DISPATCH_ALL_TYPES(self.type(), "unique_dim", [&] { return _unique_dim_cuda_template<scalar_t>(self, dim, return_inverse); }); } } // namespace native } // namespace at
0f6740ca3fbff520b37cfbc22ac34ccbc0c5aac2.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THCGeneral.h> #include <THC/THCThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <tuple> #include <thrust/unique.h> #include <thrust/sort.h> #include <thrust/scan.h> #include <thrust/scatter.h> namespace at { namespace native{ namespace { template <typename scalar_t> std::tuple<Tensor, Tensor> _unique_cuda_template( const Tensor& self, const bool return_inverse) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); const Tensor& input = self.contiguous(); int64_t num_inp = input.numel(); const scalar_t* input_data = input.data<scalar_t>(); //sort & unique Tensor output = input.clone(); output = output.view(-1); scalar_t* output_data = output.data<scalar_t>(); Tensor inverse_indices; if (!return_inverse) { inverse_indices = at::empty({0}, self.type().toScalarType(kLong)); thrust::sort(policy, output_data, output_data + num_inp); } else { Tensor sorted_indices = at::arange(0, num_inp, self.type().toScalarType(kLong)); int64_t* sorted_indices_ptr = sorted_indices.data<int64_t>(); thrust::sort_by_key(policy, output_data, output_data + num_inp, sorted_indices_ptr); Tensor inv_loc = at::empty({num_inp}, self.type().toScalarType(kLong)); inverse_indices = at::empty({num_inp}, self.type().toScalarType(kLong)); int64_t* inv_loc_ptr = inv_loc.data<int64_t>(); int64_t* inverse_indices_ptr = inverse_indices.data<int64_t>(); thrust::adjacent_difference(policy, output_data, output_data + num_inp, inv_loc_ptr, [=] __device__ (scalar_t a, scalar_t b) -> int64_t { if (a != b) {return 1;} else { return 0; }}); inv_loc[0] = 0; thrust::inclusive_scan(policy, inv_loc_ptr, inv_loc_ptr + num_inp, inv_loc_ptr); thrust::scatter(policy,inv_loc_ptr, inv_loc_ptr + num_inp, sorted_indices_ptr, inverse_indices_ptr); inverse_indices.resize_(input.sizes()); } int64_t num_out = thrust::unique(policy, output_data, output_data + num_inp) - output_data; output.resize_(num_out); THCudaCheck(cudaGetLastError()); return std::tuple<Tensor, Tensor>(output, inverse_indices); } template <typename scalar_t> std::tuple<Tensor, Tensor> _unique_dim_cuda_template( const Tensor& self, const int64_t dim, const bool return_inverse) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); Tensor input_flat = self.transpose(dim, 0); auto orig_sizes = input_flat.sizes().vec(); input_flat = input_flat.contiguous().view({input_flat.size(0), -1}); scalar_t* input_flat_ptr = input_flat.data<scalar_t>(); Tensor indices = at::arange(0, input_flat.size(0), self.type().toScalarType(kLong)); int64_t* indices_ptr = indices.data<int64_t>(); int64_t numel = input_flat.size(1); // sort indices using data thrust::sort(policy, indices_ptr, indices_ptr + indices.numel(), [=] __device__ (int64_t a, int64_t b) -> bool { for (int64_t i = 0; i < numel; ++i) { scalar_t lhs = input_flat_ptr[i + a * numel]; scalar_t rhs = input_flat_ptr[i + b * numel]; if (lhs < rhs) { return true; } else if (lhs > rhs) { return false; } } return false; }); Tensor input_sorted = input_flat.index_select(0, indices); // get unique tensors scalar_t* input_sorted_ptr = input_sorted.data<scalar_t>(); Tensor input_sorted_indices = at::arange(0, input_sorted.size(0), self.type().toScalarType(kLong)); int64_t* input_sorted_indices_ptr = input_sorted_indices.data<int64_t>(); auto last = thrust::unique(policy, input_sorted_indices_ptr, input_sorted_indices_ptr + input_sorted_indices.numel(), [=] __device__ (int64_t a, int64_t b) -> bool { for (int64_t i = 0; i < numel; ++i) { scalar_t lhs = input_sorted_ptr[i + a * numel]; scalar_t rhs = input_sorted_ptr[i + b * numel]; if (lhs != rhs) { return false; } } return true; }); input_sorted_indices.resize_(last - input_sorted_indices_ptr); Tensor output = input_sorted.index_select(0, input_sorted_indices); // reshape back auto new_sizes = std::vector<int64_t>(orig_sizes); new_sizes[0] = -1; output = output.view(new_sizes); output = output.transpose(0, dim); // calculate inverse indices Tensor inverse_indices = at::empty({0}, self.type().toScalarType(kLong)); if (return_inverse) { int64_t size = self.size(dim); inverse_indices.resize_(size); Tensor mask = at::empty(input_sorted.size(0), self.type().toScalarType(kLong)); mask[0] = 1; for (int i = 0; i < input_sorted.size(0) - 1; ++i) { if (!at::equal(input_sorted[i], input_sorted[i+1])) { mask[i+1] = 1; } else { mask[i+1] = 0; } } Tensor imask = at::cumsum(mask, 0) - 1; for (int i = 0; i < indices.size(0); ++i) { inverse_indices[indices[i]] = imask[i]; } } THCudaCheck(cudaGetLastError()); return std::tuple<Tensor, Tensor>(output, inverse_indices); } } // namespace std::tuple<Tensor, Tensor> _unique_cuda(const Tensor& self, const bool sorted, const bool return_inverse) { return AT_DISPATCH_ALL_TYPES(self.type(), "unique", [&] { // The current CUDA implementation of unique always sort due to the // lack of hashtable implementation in thrust return _unique_cuda_template<scalar_t>(self, return_inverse); }); } std::tuple<Tensor, Tensor> _unique_dim_cuda(const Tensor& self, const int64_t dim, const bool sorted, const bool return_inverse) { return AT_DISPATCH_ALL_TYPES(self.type(), "unique_dim", [&] { return _unique_dim_cuda_template<scalar_t>(self, dim, return_inverse); }); } } // namespace native } // namespace at
09de4aca6ba0063a0701e9ed62c8e4b2eb951571.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
09de4aca6ba0063a0701e9ed62c8e4b2eb951571.cu
/*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nn_n_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
6ff0178749d38e789513e01aed86b2db3fef95c2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> /* * Currently, `initializeElementsTo`, if executed in a thread whose * `i` is calculated to be greater than `N`, will try to access a value * outside the range of `a`. * * Refactor the kernel defintition to prevent our of range accesses. */ __global__ void initializeElementsTo(int initialValue, int *a, int N) { int i = threadIdx.x + blockIdx.x * blockDim.x; a[i] = initialValue; } int main() { /* * Do not modify `N`. */ int N = 1000; int *a; size_t size = N * sizeof(int); hipMallocManaged(&a, size); /* * Assume we have reason to want the number of threads * fixed at `256`: do not modify `threads_per_block`. */ int threads_per_block = 256; /* * Assign a value to `number_of_blocks` that will * allow for a working execution configuration given * the fixed values for `N` and `threads_per_block`. */ int number_of_blocks = 0; int initialValue = 6; initializeElementsTo << <number_of_blocks, threads_per_block >> > (initialValue, a, N); hipDeviceSynchronize(); /* * Check to make sure all values in `a`, were initialized. */ for (int i = 0; i < N; ++i) { if (a[i] != initialValue) { printf("FAILURE: target value: %d\t a[%d]: %d\n", initialValue, i, a[i]); exit(1); } } printf("SUCCESS!\n"); hipFree(a); }
6ff0178749d38e789513e01aed86b2db3fef95c2.cu
#include <stdio.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> /* * Currently, `initializeElementsTo`, if executed in a thread whose * `i` is calculated to be greater than `N`, will try to access a value * outside the range of `a`. * * Refactor the kernel defintition to prevent our of range accesses. */ __global__ void initializeElementsTo(int initialValue, int *a, int N) { int i = threadIdx.x + blockIdx.x * blockDim.x; a[i] = initialValue; } int main() { /* * Do not modify `N`. */ int N = 1000; int *a; size_t size = N * sizeof(int); cudaMallocManaged(&a, size); /* * Assume we have reason to want the number of threads * fixed at `256`: do not modify `threads_per_block`. */ int threads_per_block = 256; /* * Assign a value to `number_of_blocks` that will * allow for a working execution configuration given * the fixed values for `N` and `threads_per_block`. */ int number_of_blocks = 0; int initialValue = 6; initializeElementsTo << <number_of_blocks, threads_per_block >> > (initialValue, a, N); cudaDeviceSynchronize(); /* * Check to make sure all values in `a`, were initialized. */ for (int i = 0; i < N; ++i) { if (a[i] != initialValue) { printf("FAILURE: target value: %d\t a[%d]: %d\n", initialValue, i, a[i]); exit(1); } } printf("SUCCESS!\n"); cudaFree(a); }
bf775d5e8d518cc4beb57283fc55b670ed13e489.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * http://github.com/dusty-nv/jetson-inference */ #include "cuda/cudaUtility.h" #include <iostream> // gpuPreImageNet __global__ void gpuPreImageNet( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int n = oWidth * oHeight; if( x >= oWidth || y >= oHeight ) return; const int dx = ((float)x * scale.x); const int dy = ((float)y * scale.y); const float4 px = input[ dy * iWidth + dx ]; const float3 bgr = make_float3(px.z, px.y, px.x); output[n * 0 + y * oWidth + x] = bgr.x; output[n * 1 + y * oWidth + x] = bgr.y; output[n * 2 + y * oWidth + x] = bgr.z; } // cudaPreImageNet hipError_t cudaPreImageNet( float4* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight ) { if( !input || !output ) return hipErrorInvalidDevicePointer; if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ) return hipErrorInvalidValue; const float2 scale = make_float2( float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight) ); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y)); hipLaunchKernelGGL(( gpuPreImageNet), dim3(gridDim), dim3(blockDim), 0, 0, scale, input, inputWidth, output, outputWidth, outputHeight); return CUDA(hipGetLastError()); } // gpuPreImageNetMean __global__ void gpuPreImageNetMean( float2 scale, float3* input, int iWidth, float* output, int oWidth, int oHeight, float3 mean_value ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int n = oWidth * oHeight; if( x >= oWidth || y >= oHeight ) return; const int dx = ((float)x * scale.x); const int dy = ((float)y * scale.y); const float3 px = input[ dy * iWidth + dx ]; const float3 bgr = make_float3(px.z - mean_value.x, px.y - mean_value.y, px.x - mean_value.z); output[n * 0 + y * oWidth + x] = bgr.x; output[n * 1 + y * oWidth + x] = bgr.y; output[n * 2 + y * oWidth + x] = bgr.z; } // cudaPreImageNetMean hipError_t cudaPreImageNetMean( float3* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight, const float3& mean_value ) { if( !input || !output ){ std::cout << "error here. "<< std::endl; return hipErrorInvalidDevicePointer; } if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ){ std::cout << "Or here. " << std::endl; return hipErrorInvalidValue; } const float2 scale = make_float2( float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight) ); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y)); hipLaunchKernelGGL(( gpuPreImageNetMean), dim3(gridDim), dim3(blockDim), 0, 0, scale, input, inputWidth, output, outputWidth, outputHeight, mean_value); return CUDA(hipGetLastError()); } __global__ void kernel_extract_roi(float* input, float* output, char* mean, const int input_w, const int output_w, const int output_h, const int in_plane_r, const int in_plane_g, const int in_plane_b, const int out_plane_r, const int out_plane_g, const int out_plane_b, const int bbox_x, const int bbox_y, const int bbox_w, const int bbox_h) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if( x < output_w && y < output_h) { float r[2] = { float(x) * bbox_w / output_w + bbox_x, float(y) * bbox_h / output_h + bbox_y }; int pos[4][2] = { { int(floor(r[0])), int(floor(r[1])) }, { int( ceil(r[0])), int(floor(r[1])) }, { int(floor(r[0])), int(ceil(r[1])) }, { int( ceil(r[0])), int(ceil(r[1])) } }; float u = r[0]-floor(r[0]); float v = r[1]-floor(r[1]); float s[4] = { (1-u)*(1-v), u*(1-v), (1-u)*v, u*v }; int map[4] = { pos[0][1]*input_w + pos[0][0], pos[1][1]*input_w + pos[1][0], pos[2][1]*input_w + pos[2][0], pos[3][1]*input_w + pos[3][0]}; int idx = y * output_w + x; output[idx+out_plane_r] = round( s[0]*input[map[0]+in_plane_r] + s[1]*input[map[1]+in_plane_r] + s[2]*input[map[2]+in_plane_r] + s[3]*input[map[3]+in_plane_r] );// float(mean[idx+out_plane_r])); output[idx+out_plane_g] = round( s[0]*input[map[0]+in_plane_g] + s[1]*input[map[1]+in_plane_g] + s[2]*input[map[2]+in_plane_g] + s[3]*input[map[3]+in_plane_g] );//float(mean[idx+out_plane_g])); output[idx+out_plane_b] = round( s[0]*input[map[0]+in_plane_b] + s[1]*input[map[1]+in_plane_b] + s[2]*input[map[2]+in_plane_b] + s[3]*input[map[3]+in_plane_b] );//float(mean[idx+out_plane_b])); } } void convertROI(float* input, float* output, char* mean, const int* srcSize, const int* dstSize, const int* roi, hipStream_t stream) { int in_plane_r = 0; int in_plane_g = srcSize[1] * srcSize[2]; int in_plane_b = srcSize[1] * srcSize[2] * 2; int out_plane_r = 0; int out_plane_g = dstSize[1] * dstSize[2]; int out_plane_b = dstSize[1] * dstSize[2] * 2; int bbox_x = min(max(roi[0], 0), srcSize[2]-1); int bbox_y = min(max(roi[1], 0), srcSize[1]-1); int bbox_w = min(max(roi[2]-roi[0], 0), srcSize[2]-bbox_x-1 ); int bbox_h = min(max(roi[3]-roi[1], 0), srcSize[1]-bbox_y-1 ); dim3 dimBlock(32,32); dim3 dimGrid(dstSize[2]/dimBlock.x+1, dstSize[1]/dimBlock.y+1); std::cout << "ROI: " << bbox_x << " " << bbox_y << " " << bbox_w << " " << bbox_h << std::endl; hipLaunchKernelGGL(( kernel_extract_roi) , dim3(dimGrid), dim3(dimBlock), 0, stream , input, output, mean, srcSize[2], dstSize[2], dstSize[1], in_plane_r, in_plane_g, in_plane_b, out_plane_r, out_plane_g, out_plane_b, bbox_x, bbox_y, bbox_w, bbox_h); } __global__ void kernelSoftmax( float* x, int channels, float* y) { extern __shared__ float mem[]; __shared__ float sum_value; float number = *(x + blockDim.x*blockIdx.x + threadIdx.x); float number_exp = __expf(number); // sum_value += number_exp ; /* * * @TODO: Can do with the help of atomicAdd. * */ atomicAdd(&sum_value, number_exp); __syncthreads(); // mem[threadIdx.x] = number_exp; /* * * @TODO: Can do with the help of a for loop. Try different methods and find the time taken. * */ // float sum = 0.0f; // for (int i=0;i<channels;i++) // { // sum += mem[i]; // } y[blockDim.x*blockIdx.x + threadIdx.x] = __fdiv_rd(number_exp, sum_value); } void cudaSoftmax(int n, int channels, float* x, float*y) { hipLaunchKernelGGL(( kernelSoftmax), dim3((n/channels)), dim3(channels), channels*sizeof(float), 0, x, channels, y); hipDeviceSynchronize(); }
bf775d5e8d518cc4beb57283fc55b670ed13e489.cu
/* * http://github.com/dusty-nv/jetson-inference */ #include "cuda/cudaUtility.h" #include <iostream> // gpuPreImageNet __global__ void gpuPreImageNet( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int n = oWidth * oHeight; if( x >= oWidth || y >= oHeight ) return; const int dx = ((float)x * scale.x); const int dy = ((float)y * scale.y); const float4 px = input[ dy * iWidth + dx ]; const float3 bgr = make_float3(px.z, px.y, px.x); output[n * 0 + y * oWidth + x] = bgr.x; output[n * 1 + y * oWidth + x] = bgr.y; output[n * 2 + y * oWidth + x] = bgr.z; } // cudaPreImageNet cudaError_t cudaPreImageNet( float4* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight ) { if( !input || !output ) return cudaErrorInvalidDevicePointer; if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ) return cudaErrorInvalidValue; const float2 scale = make_float2( float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight) ); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y)); gpuPreImageNet<<<gridDim, blockDim>>>(scale, input, inputWidth, output, outputWidth, outputHeight); return CUDA(cudaGetLastError()); } // gpuPreImageNetMean __global__ void gpuPreImageNetMean( float2 scale, float3* input, int iWidth, float* output, int oWidth, int oHeight, float3 mean_value ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int n = oWidth * oHeight; if( x >= oWidth || y >= oHeight ) return; const int dx = ((float)x * scale.x); const int dy = ((float)y * scale.y); const float3 px = input[ dy * iWidth + dx ]; const float3 bgr = make_float3(px.z - mean_value.x, px.y - mean_value.y, px.x - mean_value.z); output[n * 0 + y * oWidth + x] = bgr.x; output[n * 1 + y * oWidth + x] = bgr.y; output[n * 2 + y * oWidth + x] = bgr.z; } // cudaPreImageNetMean cudaError_t cudaPreImageNetMean( float3* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight, const float3& mean_value ) { if( !input || !output ){ std::cout << "error here. "<< std::endl; return cudaErrorInvalidDevicePointer; } if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 ){ std::cout << "Or here. " << std::endl; return cudaErrorInvalidValue; } const float2 scale = make_float2( float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight) ); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y)); gpuPreImageNetMean<<<gridDim, blockDim>>>(scale, input, inputWidth, output, outputWidth, outputHeight, mean_value); return CUDA(cudaGetLastError()); } __global__ void kernel_extract_roi(float* input, float* output, char* mean, const int input_w, const int output_w, const int output_h, const int in_plane_r, const int in_plane_g, const int in_plane_b, const int out_plane_r, const int out_plane_g, const int out_plane_b, const int bbox_x, const int bbox_y, const int bbox_w, const int bbox_h) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if( x < output_w && y < output_h) { float r[2] = { float(x) * bbox_w / output_w + bbox_x, float(y) * bbox_h / output_h + bbox_y }; int pos[4][2] = { { int(floor(r[0])), int(floor(r[1])) }, { int( ceil(r[0])), int(floor(r[1])) }, { int(floor(r[0])), int(ceil(r[1])) }, { int( ceil(r[0])), int(ceil(r[1])) } }; float u = r[0]-floor(r[0]); float v = r[1]-floor(r[1]); float s[4] = { (1-u)*(1-v), u*(1-v), (1-u)*v, u*v }; int map[4] = { pos[0][1]*input_w + pos[0][0], pos[1][1]*input_w + pos[1][0], pos[2][1]*input_w + pos[2][0], pos[3][1]*input_w + pos[3][0]}; int idx = y * output_w + x; output[idx+out_plane_r] = round( s[0]*input[map[0]+in_plane_r] + s[1]*input[map[1]+in_plane_r] + s[2]*input[map[2]+in_plane_r] + s[3]*input[map[3]+in_plane_r] );// float(mean[idx+out_plane_r])); output[idx+out_plane_g] = round( s[0]*input[map[0]+in_plane_g] + s[1]*input[map[1]+in_plane_g] + s[2]*input[map[2]+in_plane_g] + s[3]*input[map[3]+in_plane_g] );//float(mean[idx+out_plane_g])); output[idx+out_plane_b] = round( s[0]*input[map[0]+in_plane_b] + s[1]*input[map[1]+in_plane_b] + s[2]*input[map[2]+in_plane_b] + s[3]*input[map[3]+in_plane_b] );//float(mean[idx+out_plane_b])); } } void convertROI(float* input, float* output, char* mean, const int* srcSize, const int* dstSize, const int* roi, cudaStream_t stream) { int in_plane_r = 0; int in_plane_g = srcSize[1] * srcSize[2]; int in_plane_b = srcSize[1] * srcSize[2] * 2; int out_plane_r = 0; int out_plane_g = dstSize[1] * dstSize[2]; int out_plane_b = dstSize[1] * dstSize[2] * 2; int bbox_x = min(max(roi[0], 0), srcSize[2]-1); int bbox_y = min(max(roi[1], 0), srcSize[1]-1); int bbox_w = min(max(roi[2]-roi[0], 0), srcSize[2]-bbox_x-1 ); int bbox_h = min(max(roi[3]-roi[1], 0), srcSize[1]-bbox_y-1 ); dim3 dimBlock(32,32); dim3 dimGrid(dstSize[2]/dimBlock.x+1, dstSize[1]/dimBlock.y+1); std::cout << "ROI: " << bbox_x << " " << bbox_y << " " << bbox_w << " " << bbox_h << std::endl; kernel_extract_roi <<< dimGrid, dimBlock, 0, stream >>> (input, output, mean, srcSize[2], dstSize[2], dstSize[1], in_plane_r, in_plane_g, in_plane_b, out_plane_r, out_plane_g, out_plane_b, bbox_x, bbox_y, bbox_w, bbox_h); } __global__ void kernelSoftmax( float* x, int channels, float* y) { extern __shared__ float mem[]; __shared__ float sum_value; float number = *(x + blockDim.x*blockIdx.x + threadIdx.x); float number_exp = __expf(number); // sum_value += number_exp ; /* * * @TODO: Can do with the help of atomicAdd. * */ atomicAdd(&sum_value, number_exp); __syncthreads(); // mem[threadIdx.x] = number_exp; /* * * @TODO: Can do with the help of a for loop. Try different methods and find the time taken. * */ // float sum = 0.0f; // for (int i=0;i<channels;i++) // { // sum += mem[i]; // } y[blockDim.x*blockIdx.x + threadIdx.x] = __fdiv_rd(number_exp, sum_value); } void cudaSoftmax(int n, int channels, float* x, float*y) { kernelSoftmax<<< (n/channels), channels, channels*sizeof(float)>>>( x, channels, y); cudaDeviceSynchronize(); }
83a258ac5d659692656f3f5cef334016624f4a07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2021, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/factorization/par_ilut_kernels.hpp" #include <algorithm> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/matrix/coo.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "core/components/prefix_sum.hpp" #include "core/matrix/coo_builder.hpp" #include "core/matrix/csr_builder.hpp" #include "core/matrix/csr_kernels.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/config.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/intrinsics.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/sorting.cuh" #include "cuda/components/thread_ids.cuh" #include "cuda/factorization/par_ilut_select_common.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The parallel ILUT factorization namespace. * * @ingroup factor */ namespace par_ilut_factorization { // subwarp sizes for filter kernels using compiled_kernels = syn::value_list<int, 1, 2, 4, 8, 16, 32, config::warp_size>; #include "common/cuda_hip/factorization/par_ilut_filter_kernels.hpp.inc" #include "common/cuda_hip/factorization/par_ilut_select_kernels.hpp.inc" template <int subwarp_size, typename ValueType, typename IndexType> void threshold_filter_approx(syn::value_list<int, subwarp_size>, std::shared_ptr<const DefaultExecutor> exec, const matrix::Csr<ValueType, IndexType>* m, IndexType rank, Array<ValueType>* tmp, remove_complex<ValueType>* threshold, matrix::Csr<ValueType, IndexType>* m_out, matrix::Coo<ValueType, IndexType>* m_out_coo) { auto values = m->get_const_values(); IndexType size = m->get_num_stored_elements(); using AbsType = remove_complex<ValueType>; constexpr auto bucket_count = kernel::searchtree_width; auto max_num_threads = ceildiv(size, items_per_thread); auto max_num_blocks = ceildiv(max_num_threads, default_block_size); size_type tmp_size_totals = ceildiv((bucket_count + 1) * sizeof(IndexType), sizeof(ValueType)); size_type tmp_size_partials = ceildiv( bucket_count * max_num_blocks * sizeof(IndexType), sizeof(ValueType)); size_type tmp_size_oracles = ceildiv(size * sizeof(unsigned char), sizeof(ValueType)); size_type tmp_size_tree = ceildiv(kernel::searchtree_size * sizeof(AbsType), sizeof(ValueType)); size_type tmp_size = tmp_size_totals + tmp_size_partials + tmp_size_oracles + tmp_size_tree; tmp->resize_and_reset(tmp_size); auto total_counts = reinterpret_cast<IndexType*>(tmp->get_data()); auto partial_counts = reinterpret_cast<IndexType*>(tmp->get_data() + tmp_size_totals); auto oracles = reinterpret_cast<unsigned char*>( tmp->get_data() + tmp_size_totals + tmp_size_partials); auto tree = reinterpret_cast<AbsType*>(tmp->get_data() + tmp_size_totals + tmp_size_partials + tmp_size_oracles); sampleselect_count(exec, values, size, tree, oracles, partial_counts, total_counts); // determine bucket with correct rank auto bucket = static_cast<unsigned char>( sampleselect_find_bucket(exec, total_counts, rank).idx); *threshold = exec->copy_val_to_host(tree + kernel::searchtree_inner_size + bucket); // we implicitly set the first splitter to -inf, but 0 works as well if (bucket == 0) { *threshold = zero<AbsType>(); } // filter the elements auto old_row_ptrs = m->get_const_row_ptrs(); auto old_col_idxs = m->get_const_col_idxs(); auto old_vals = m->get_const_values(); // compute nnz for each row auto num_rows = static_cast<IndexType>(m->get_size()[0]); auto block_size = default_block_size / subwarp_size; auto num_blocks = ceildiv(num_rows, block_size); auto new_row_ptrs = m_out->get_row_ptrs(); hipLaunchKernelGGL(( kernel::bucket_filter_nnz<subwarp_size>), dim3(num_blocks), dim3(default_block_size), 0, 0, old_row_ptrs, oracles, num_rows, bucket, new_row_ptrs); // build row pointers components::prefix_sum(exec, new_row_ptrs, num_rows + 1); // build matrix auto new_nnz = exec->copy_val_to_host(new_row_ptrs + num_rows); // resize arrays and update aliases matrix::CsrBuilder<ValueType, IndexType> builder{m_out}; builder.get_col_idx_array().resize_and_reset(new_nnz); builder.get_value_array().resize_and_reset(new_nnz); auto new_col_idxs = m_out->get_col_idxs(); auto new_vals = m_out->get_values(); IndexType* new_row_idxs{}; if (m_out_coo) { matrix::CooBuilder<ValueType, IndexType> coo_builder{m_out_coo}; coo_builder.get_row_idx_array().resize_and_reset(new_nnz); coo_builder.get_col_idx_array() = Array<IndexType>::view(exec, new_nnz, new_col_idxs); coo_builder.get_value_array() = Array<ValueType>::view(exec, new_nnz, new_vals); new_row_idxs = m_out_coo->get_row_idxs(); } hipLaunchKernelGGL(( kernel::bucket_filter<subwarp_size>), dim3(num_blocks), dim3(default_block_size), 0, 0, old_row_ptrs, old_col_idxs, as_cuda_type(old_vals), oracles, num_rows, bucket, new_row_ptrs, new_row_idxs, new_col_idxs, as_cuda_type(new_vals)); } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_threshold_filter_approx, threshold_filter_approx); template <typename ValueType, typename IndexType> void threshold_filter_approx(std::shared_ptr<const DefaultExecutor> exec, const matrix::Csr<ValueType, IndexType>* m, IndexType rank, Array<ValueType>& tmp, remove_complex<ValueType>& threshold, matrix::Csr<ValueType, IndexType>* m_out, matrix::Coo<ValueType, IndexType>* m_out_coo) { auto num_rows = m->get_size()[0]; auto total_nnz = m->get_num_stored_elements(); auto total_nnz_per_row = total_nnz / num_rows; select_threshold_filter_approx( compiled_kernels(), [&](int compiled_subwarp_size) { return total_nnz_per_row <= compiled_subwarp_size || compiled_subwarp_size == config::warp_size; }, syn::value_list<int>(), syn::type_list<>(), exec, m, rank, &tmp, &threshold, m_out, m_out_coo); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_PAR_ILUT_THRESHOLD_FILTER_APPROX_KERNEL); } // namespace par_ilut_factorization } // namespace cuda } // namespace kernels } // namespace gko
83a258ac5d659692656f3f5cef334016624f4a07.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2021, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/factorization/par_ilut_kernels.hpp" #include <algorithm> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/matrix/coo.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "core/components/prefix_sum.hpp" #include "core/matrix/coo_builder.hpp" #include "core/matrix/csr_builder.hpp" #include "core/matrix/csr_kernels.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/config.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/intrinsics.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/sorting.cuh" #include "cuda/components/thread_ids.cuh" #include "cuda/factorization/par_ilut_select_common.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The parallel ILUT factorization namespace. * * @ingroup factor */ namespace par_ilut_factorization { // subwarp sizes for filter kernels using compiled_kernels = syn::value_list<int, 1, 2, 4, 8, 16, 32, config::warp_size>; #include "common/cuda_hip/factorization/par_ilut_filter_kernels.hpp.inc" #include "common/cuda_hip/factorization/par_ilut_select_kernels.hpp.inc" template <int subwarp_size, typename ValueType, typename IndexType> void threshold_filter_approx(syn::value_list<int, subwarp_size>, std::shared_ptr<const DefaultExecutor> exec, const matrix::Csr<ValueType, IndexType>* m, IndexType rank, Array<ValueType>* tmp, remove_complex<ValueType>* threshold, matrix::Csr<ValueType, IndexType>* m_out, matrix::Coo<ValueType, IndexType>* m_out_coo) { auto values = m->get_const_values(); IndexType size = m->get_num_stored_elements(); using AbsType = remove_complex<ValueType>; constexpr auto bucket_count = kernel::searchtree_width; auto max_num_threads = ceildiv(size, items_per_thread); auto max_num_blocks = ceildiv(max_num_threads, default_block_size); size_type tmp_size_totals = ceildiv((bucket_count + 1) * sizeof(IndexType), sizeof(ValueType)); size_type tmp_size_partials = ceildiv( bucket_count * max_num_blocks * sizeof(IndexType), sizeof(ValueType)); size_type tmp_size_oracles = ceildiv(size * sizeof(unsigned char), sizeof(ValueType)); size_type tmp_size_tree = ceildiv(kernel::searchtree_size * sizeof(AbsType), sizeof(ValueType)); size_type tmp_size = tmp_size_totals + tmp_size_partials + tmp_size_oracles + tmp_size_tree; tmp->resize_and_reset(tmp_size); auto total_counts = reinterpret_cast<IndexType*>(tmp->get_data()); auto partial_counts = reinterpret_cast<IndexType*>(tmp->get_data() + tmp_size_totals); auto oracles = reinterpret_cast<unsigned char*>( tmp->get_data() + tmp_size_totals + tmp_size_partials); auto tree = reinterpret_cast<AbsType*>(tmp->get_data() + tmp_size_totals + tmp_size_partials + tmp_size_oracles); sampleselect_count(exec, values, size, tree, oracles, partial_counts, total_counts); // determine bucket with correct rank auto bucket = static_cast<unsigned char>( sampleselect_find_bucket(exec, total_counts, rank).idx); *threshold = exec->copy_val_to_host(tree + kernel::searchtree_inner_size + bucket); // we implicitly set the first splitter to -inf, but 0 works as well if (bucket == 0) { *threshold = zero<AbsType>(); } // filter the elements auto old_row_ptrs = m->get_const_row_ptrs(); auto old_col_idxs = m->get_const_col_idxs(); auto old_vals = m->get_const_values(); // compute nnz for each row auto num_rows = static_cast<IndexType>(m->get_size()[0]); auto block_size = default_block_size / subwarp_size; auto num_blocks = ceildiv(num_rows, block_size); auto new_row_ptrs = m_out->get_row_ptrs(); kernel::bucket_filter_nnz<subwarp_size><<<num_blocks, default_block_size>>>( old_row_ptrs, oracles, num_rows, bucket, new_row_ptrs); // build row pointers components::prefix_sum(exec, new_row_ptrs, num_rows + 1); // build matrix auto new_nnz = exec->copy_val_to_host(new_row_ptrs + num_rows); // resize arrays and update aliases matrix::CsrBuilder<ValueType, IndexType> builder{m_out}; builder.get_col_idx_array().resize_and_reset(new_nnz); builder.get_value_array().resize_and_reset(new_nnz); auto new_col_idxs = m_out->get_col_idxs(); auto new_vals = m_out->get_values(); IndexType* new_row_idxs{}; if (m_out_coo) { matrix::CooBuilder<ValueType, IndexType> coo_builder{m_out_coo}; coo_builder.get_row_idx_array().resize_and_reset(new_nnz); coo_builder.get_col_idx_array() = Array<IndexType>::view(exec, new_nnz, new_col_idxs); coo_builder.get_value_array() = Array<ValueType>::view(exec, new_nnz, new_vals); new_row_idxs = m_out_coo->get_row_idxs(); } kernel::bucket_filter<subwarp_size><<<num_blocks, default_block_size>>>( old_row_ptrs, old_col_idxs, as_cuda_type(old_vals), oracles, num_rows, bucket, new_row_ptrs, new_row_idxs, new_col_idxs, as_cuda_type(new_vals)); } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_threshold_filter_approx, threshold_filter_approx); template <typename ValueType, typename IndexType> void threshold_filter_approx(std::shared_ptr<const DefaultExecutor> exec, const matrix::Csr<ValueType, IndexType>* m, IndexType rank, Array<ValueType>& tmp, remove_complex<ValueType>& threshold, matrix::Csr<ValueType, IndexType>* m_out, matrix::Coo<ValueType, IndexType>* m_out_coo) { auto num_rows = m->get_size()[0]; auto total_nnz = m->get_num_stored_elements(); auto total_nnz_per_row = total_nnz / num_rows; select_threshold_filter_approx( compiled_kernels(), [&](int compiled_subwarp_size) { return total_nnz_per_row <= compiled_subwarp_size || compiled_subwarp_size == config::warp_size; }, syn::value_list<int>(), syn::type_list<>(), exec, m, rank, &tmp, &threshold, m_out, m_out_coo); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_PAR_ILUT_THRESHOLD_FILTER_APPROX_KERNEL); } // namespace par_ilut_factorization } // namespace cuda } // namespace kernels } // namespace gko
f4b6a4ff45eaf647dc549389fcb34cc0b84864ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef PADDLE_WITH_HIP // To-do(qili93): fix this after issue resolved // https://github.com/ROCmSoftwarePlatform/rocPRIM/issues/202 #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/scan.h> #include <thrust/transform.h> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/multinomial_op.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { template <typename T> __global__ void NormalizeProbability(T* norm_probs, const T* in_data, T* sum_rows) { int id = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; PADDLE_ENFORCE( in_data[id] >= 0.0, "The input of multinomial distribution should be >= 0, but got %f.", in_data[id]); PADDLE_ENFORCE(sum_rows[blockIdx.y] > 0.0, "The sum of one multinomial distribution probability should " "be > 0, but got %f.", sum_rows[blockIdx.y]); norm_probs[id] = in_data[id] / sum_rows[blockIdx.y]; } template <typename T> __global__ void GetCumulativeProbs(T* norm_probs_data, int64_t num_distributions, int64_t num_categories, T* cumulative_probs) { for (int id = blockIdx.x; id < num_distributions; id += gridDim.x) { thrust::inclusive_scan(thrust::device, norm_probs_data + id * num_categories, norm_probs_data + (id + 1) * num_categories, cumulative_probs + id * num_categories); } } template <typename T> struct RandomGeneratorCudaFunctor { unsigned int seed_; __host__ __device__ RandomGeneratorCudaFunctor(int seed) : seed_(seed) {} __host__ __device__ T operator()(const unsigned int n) const { thrust::minstd_rand rng; rng.seed(seed_); thrust::uniform_real_distribution<T> dist(0.0, 1.0); rng.discard(n); return dist(rng); } }; template <typename T> __device__ int binarySearchFunctor(T* cumulative_probs, T* norm_probs_data, int num_categories, T rng_number) { int left = 0; int right = num_categories; while (right - left > 0) { int mid = left + (right - left) / 2; T temp_prob = cumulative_probs[mid]; if (temp_prob < rng_number) { left = mid + 1; } else { right = mid; } } if (left == num_categories) { left = num_categories - 1; } while (left >= 1 && norm_probs_data[left] == 0) left--; return left; } template <typename T> __global__ void sampleMultinomialWithReplacement( T* rng_data, const int64_t num_samples, int64_t* out_data, const int64_t num_distributions, const int64_t num_categories, T* cumulative_probs, T* norm_probs_data) { // use binary search to get the selected category sample id. // let cumulative_probs[id-1] < rng_data < cumulative_probs[id]. int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; // for every distribution for (int dist = blockIdx.y; dist < num_distributions; dist += gridDim.y) { // for every sample for (int sample = blockIdx.x * blockDim.x + threadIdx.x; sample < num_samples; sample += blockDim.x * gridDim.x) { T rng_number = rng_data[sample + dist * num_samples]; // Find the bucket that a uniform random number lies in int selected_category = binarySearchFunctor<T>( cumulative_probs + dist * num_categories, norm_probs_data + dist * num_categories, num_categories, rng_number); out_data[sample + dist * num_samples] = selected_category; } } } template <typename T> class MultinomialOpKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto x = ctx.Input<framework::Tensor>("X"); auto out = ctx.Output<framework::Tensor>("Out"); const int64_t num_samples = ctx.Attr<int>("num_samples"); const bool replacement = ctx.Attr<bool>("replacement"); auto* in_data = x->data<T>(); int64_t* out_data = out->mutable_data<int64_t>(ctx.GetPlace()); auto in_dims = x->dims(); int64_t in_rank = in_dims.size(); const int64_t num_categories = in_dims[in_rank - 1]; const int64_t num_distributions = in_rank > 1 ? in_dims[in_rank - 2] : 1; // If replacement is False, it's not a replaceable sample. Every category // can // be used only once. So after every sample, probability of the distribution // will change. The implementation can't be parallelizable. Thus, call CPU // implementation ``MultinomialFunctor`` to sample the distribution. if (!replacement) { int64_t in_data_numel = x->numel(); int64_t out_data_numel = out->numel(); T* cpu_in_data = new T[in_data_numel]; int64_t* cpu_out_data = new int64_t[out_data_numel]; #ifdef PADDLE_WITH_HIP hipMemcpy(cpu_in_data, in_data, in_data_numel * sizeof(T), hipMemcpyDeviceToHost); #else hipMemcpy(cpu_in_data, in_data, in_data_numel * sizeof(T), hipMemcpyDeviceToHost); #endif MultinomialFunctor<T>(cpu_out_data, cpu_in_data, num_samples, replacement, num_categories, num_distributions); #ifdef PADDLE_WITH_HIP hipMemcpy(out_data, cpu_out_data, out_data_numel * sizeof(int64_t), hipMemcpyHostToDevice); #else hipMemcpy(out_data, cpu_out_data, out_data_numel * sizeof(int64_t), hipMemcpyHostToDevice); #endif delete[] cpu_in_data; delete[] cpu_out_data; return; } // Sum of input may not be 1. To get probability in range [0, 1], calculate // sum of each row of input, and then use the sum to normalize the input. // sum_row_data: sum of each row framework::Tensor sum_rows_tensor; auto* sum_rows_data = sum_rows_tensor.mutable_data<T>({num_distributions}, ctx.GetPlace()); auto& place = *ctx.template device_context<platform::CUDADeviceContext>() .eigen_device(); if (num_distributions == 1) { auto eigen_input = framework::EigenVector<T>::Flatten(*x); auto eigen_sum_rows = framework::EigenVector<T>::Flatten(sum_rows_tensor); eigen_sum_rows.device(place) = eigen_input.sum(Eigen::DSizes<int, 1>(1)) .eval() .reshape(Eigen::DSizes<int, 1>(sum_rows_tensor.dims()[0])); } else { auto eigen_input = framework::EigenMatrix<T>::From(*x); auto eigen_sum_rows = framework::EigenVector<T>::Flatten(sum_rows_tensor); eigen_sum_rows.device(place) = eigen_input.sum(Eigen::DSizes<int, 1>(1)); } // Normalize row of each distribution to get the probability in range [0, // 1]. // norm_probs_data: probability of the distribution framework::Tensor norm_probs_tensor; auto* norm_probs_data = norm_probs_tensor.mutable_data<T>( {num_distributions, num_categories}, ctx.GetPlace()); // number of threads in a block is min(num_categories, 512) dim3 block_norm(num_categories < 512 ? num_categories : 512); dim3 grid_norm((num_categories - 1) / block_norm.x + 1, num_distributions); hipLaunchKernelGGL(( NormalizeProbability< T>), dim3(grid_norm), dim3(block_norm), 0, ctx.cuda_device_context().stream(), norm_probs_data, in_data, sum_rows_data); // Get cumulative probability of each distribution. It's the same function // of // ``cumsum`` op. framework::Tensor cumulative_probs_tensor; auto* cumulative_probs = cumulative_probs_tensor.mutable_data<T>( {num_distributions, num_categories}, ctx.GetPlace()); dim3 block_cumsum(1); dim3 grid_cumsum(num_distributions); hipLaunchKernelGGL(( GetCumulativeProbs<T>), dim3(grid_cumsum), dim3(block_cumsum), 0, ctx.cuda_device_context().stream(), norm_probs_data, num_distributions, num_categories, cumulative_probs); // Generate random number for each sample. std::random_device rd; auto seed = rd(); framework::Tensor rng_data_tensor; auto* rng_data = rng_data_tensor.mutable_data<T>( {num_distributions, num_samples}, ctx.GetPlace()); thrust::counting_iterator<unsigned int> index_sequence_begin(0); platform::Transform<platform::CUDADeviceContext> trans; auto* context = static_cast<const platform::CUDADeviceContext*>(&ctx.device_context()); trans(*context, index_sequence_begin, index_sequence_begin + num_distributions * num_samples, rng_data, RandomGeneratorCudaFunctor<T>(seed)); // Sample the multinomial distributions. dim3 block_sample(128); dim3 grid_sample((num_samples - 1) / block_sample.x + 1, num_distributions); hipLaunchKernelGGL(( sampleMultinomialWithReplacement<T>), dim3(grid_sample), dim3(block_sample), 0, ctx.cuda_device_context().stream(), rng_data, num_samples, out_data, num_distributions, num_categories, cumulative_probs, norm_probs_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( multinomial, ops::MultinomialOpKernel<plat::CUDADeviceContext, double>, ops::MultinomialOpKernel<plat::CUDADeviceContext, float>); #endif
f4b6a4ff45eaf647dc549389fcb34cc0b84864ab.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef PADDLE_WITH_HIP // To-do(qili93): fix this after issue resolved // https://github.com/ROCmSoftwarePlatform/rocPRIM/issues/202 #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/scan.h> #include <thrust/transform.h> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/multinomial_op.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { template <typename T> __global__ void NormalizeProbability(T* norm_probs, const T* in_data, T* sum_rows) { int id = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; PADDLE_ENFORCE( in_data[id] >= 0.0, "The input of multinomial distribution should be >= 0, but got %f.", in_data[id]); PADDLE_ENFORCE(sum_rows[blockIdx.y] > 0.0, "The sum of one multinomial distribution probability should " "be > 0, but got %f.", sum_rows[blockIdx.y]); norm_probs[id] = in_data[id] / sum_rows[blockIdx.y]; } template <typename T> __global__ void GetCumulativeProbs(T* norm_probs_data, int64_t num_distributions, int64_t num_categories, T* cumulative_probs) { for (int id = blockIdx.x; id < num_distributions; id += gridDim.x) { thrust::inclusive_scan(thrust::device, norm_probs_data + id * num_categories, norm_probs_data + (id + 1) * num_categories, cumulative_probs + id * num_categories); } } template <typename T> struct RandomGeneratorCudaFunctor { unsigned int seed_; __host__ __device__ RandomGeneratorCudaFunctor(int seed) : seed_(seed) {} __host__ __device__ T operator()(const unsigned int n) const { thrust::minstd_rand rng; rng.seed(seed_); thrust::uniform_real_distribution<T> dist(0.0, 1.0); rng.discard(n); return dist(rng); } }; template <typename T> __device__ int binarySearchFunctor(T* cumulative_probs, T* norm_probs_data, int num_categories, T rng_number) { int left = 0; int right = num_categories; while (right - left > 0) { int mid = left + (right - left) / 2; T temp_prob = cumulative_probs[mid]; if (temp_prob < rng_number) { left = mid + 1; } else { right = mid; } } if (left == num_categories) { left = num_categories - 1; } while (left >= 1 && norm_probs_data[left] == 0) left--; return left; } template <typename T> __global__ void sampleMultinomialWithReplacement( T* rng_data, const int64_t num_samples, int64_t* out_data, const int64_t num_distributions, const int64_t num_categories, T* cumulative_probs, T* norm_probs_data) { // use binary search to get the selected category sample id. // let cumulative_probs[id-1] < rng_data < cumulative_probs[id]. int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; // for every distribution for (int dist = blockIdx.y; dist < num_distributions; dist += gridDim.y) { // for every sample for (int sample = blockIdx.x * blockDim.x + threadIdx.x; sample < num_samples; sample += blockDim.x * gridDim.x) { T rng_number = rng_data[sample + dist * num_samples]; // Find the bucket that a uniform random number lies in int selected_category = binarySearchFunctor<T>( cumulative_probs + dist * num_categories, norm_probs_data + dist * num_categories, num_categories, rng_number); out_data[sample + dist * num_samples] = selected_category; } } } template <typename T> class MultinomialOpKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto x = ctx.Input<framework::Tensor>("X"); auto out = ctx.Output<framework::Tensor>("Out"); const int64_t num_samples = ctx.Attr<int>("num_samples"); const bool replacement = ctx.Attr<bool>("replacement"); auto* in_data = x->data<T>(); int64_t* out_data = out->mutable_data<int64_t>(ctx.GetPlace()); auto in_dims = x->dims(); int64_t in_rank = in_dims.size(); const int64_t num_categories = in_dims[in_rank - 1]; const int64_t num_distributions = in_rank > 1 ? in_dims[in_rank - 2] : 1; // If replacement is False, it's not a replaceable sample. Every category // can // be used only once. So after every sample, probability of the distribution // will change. The implementation can't be parallelizable. Thus, call CPU // implementation ``MultinomialFunctor`` to sample the distribution. if (!replacement) { int64_t in_data_numel = x->numel(); int64_t out_data_numel = out->numel(); T* cpu_in_data = new T[in_data_numel]; int64_t* cpu_out_data = new int64_t[out_data_numel]; #ifdef PADDLE_WITH_HIP hipMemcpy(cpu_in_data, in_data, in_data_numel * sizeof(T), hipMemcpyDeviceToHost); #else cudaMemcpy(cpu_in_data, in_data, in_data_numel * sizeof(T), cudaMemcpyDeviceToHost); #endif MultinomialFunctor<T>(cpu_out_data, cpu_in_data, num_samples, replacement, num_categories, num_distributions); #ifdef PADDLE_WITH_HIP hipMemcpy(out_data, cpu_out_data, out_data_numel * sizeof(int64_t), hipMemcpyHostToDevice); #else cudaMemcpy(out_data, cpu_out_data, out_data_numel * sizeof(int64_t), cudaMemcpyHostToDevice); #endif delete[] cpu_in_data; delete[] cpu_out_data; return; } // Sum of input may not be 1. To get probability in range [0, 1], calculate // sum of each row of input, and then use the sum to normalize the input. // sum_row_data: sum of each row framework::Tensor sum_rows_tensor; auto* sum_rows_data = sum_rows_tensor.mutable_data<T>({num_distributions}, ctx.GetPlace()); auto& place = *ctx.template device_context<platform::CUDADeviceContext>() .eigen_device(); if (num_distributions == 1) { auto eigen_input = framework::EigenVector<T>::Flatten(*x); auto eigen_sum_rows = framework::EigenVector<T>::Flatten(sum_rows_tensor); eigen_sum_rows.device(place) = eigen_input.sum(Eigen::DSizes<int, 1>(1)) .eval() .reshape(Eigen::DSizes<int, 1>(sum_rows_tensor.dims()[0])); } else { auto eigen_input = framework::EigenMatrix<T>::From(*x); auto eigen_sum_rows = framework::EigenVector<T>::Flatten(sum_rows_tensor); eigen_sum_rows.device(place) = eigen_input.sum(Eigen::DSizes<int, 1>(1)); } // Normalize row of each distribution to get the probability in range [0, // 1]. // norm_probs_data: probability of the distribution framework::Tensor norm_probs_tensor; auto* norm_probs_data = norm_probs_tensor.mutable_data<T>( {num_distributions, num_categories}, ctx.GetPlace()); // number of threads in a block is min(num_categories, 512) dim3 block_norm(num_categories < 512 ? num_categories : 512); dim3 grid_norm((num_categories - 1) / block_norm.x + 1, num_distributions); NormalizeProbability< T><<<grid_norm, block_norm, 0, ctx.cuda_device_context().stream()>>>( norm_probs_data, in_data, sum_rows_data); // Get cumulative probability of each distribution. It's the same function // of // ``cumsum`` op. framework::Tensor cumulative_probs_tensor; auto* cumulative_probs = cumulative_probs_tensor.mutable_data<T>( {num_distributions, num_categories}, ctx.GetPlace()); dim3 block_cumsum(1); dim3 grid_cumsum(num_distributions); GetCumulativeProbs<T><<<grid_cumsum, block_cumsum, 0, ctx.cuda_device_context().stream()>>>( norm_probs_data, num_distributions, num_categories, cumulative_probs); // Generate random number for each sample. std::random_device rd; auto seed = rd(); framework::Tensor rng_data_tensor; auto* rng_data = rng_data_tensor.mutable_data<T>( {num_distributions, num_samples}, ctx.GetPlace()); thrust::counting_iterator<unsigned int> index_sequence_begin(0); platform::Transform<platform::CUDADeviceContext> trans; auto* context = static_cast<const platform::CUDADeviceContext*>(&ctx.device_context()); trans(*context, index_sequence_begin, index_sequence_begin + num_distributions * num_samples, rng_data, RandomGeneratorCudaFunctor<T>(seed)); // Sample the multinomial distributions. dim3 block_sample(128); dim3 grid_sample((num_samples - 1) / block_sample.x + 1, num_distributions); sampleMultinomialWithReplacement<T><<<grid_sample, block_sample, 0, ctx.cuda_device_context().stream()>>>( rng_data, num_samples, out_data, num_distributions, num_categories, cumulative_probs, norm_probs_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( multinomial, ops::MultinomialOpKernel<plat::CUDADeviceContext, double>, ops::MultinomialOpKernel<plat::CUDADeviceContext, float>); #endif
72f2a4ed6a2ca1ef093bc06dd1c217975b745556.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include "../defines.h" #include "../gpu.h" #include "b-l.h" //**************************** //TODO: copy to another place //**************************** // void data_initialization(ptr_Arrays HostArraysPtr, long int* t, consts def) { *t = 0; for (int i = 0; i < def.locNx; i++) for (int j = 0; j < def.locNy; j++) for (int k = 0; k < def.locNz; k++) if (is_active_point(i, j, k, def)) { int local = i + j * def.locNx + k * def.locNx * def.locNy; HostArraysPtr.m[local]=def.porosity[0]; HostArraysPtr.S_n[local] = def.Background_Sn; double ro_g_dy = (def.ro0_n * HostArraysPtr.S_n[local] + def.ro0_w * (1 - HostArraysPtr.S_n[local])) * (HostArraysPtr.m[local]) * (def.g_const) * (def.hy); if (j == 0) { HostArraysPtr.P_w[local] = def.Background_Pw;//def.P_atm; } else { HostArraysPtr.P_w[local] = HostArraysPtr.P_w[i + (j - 1) * def.locNx + k * def.locNx * def.locNy] + ro_g_dy; } /* // if (is_injection_well(i, j, k, def)) { HostArraysPtr.P_w[local] = Injection_well_P(HostArraysPtr, i, j, k, def); } // if (is_output_well(i, j, k, def)) { HostArraysPtr.P_w[local] = Production_well_P(HostArraysPtr, i, j, k, def); } */ HostArraysPtr.ro_w[local] = def.ro0_w * (1. + (def.beta_w) * (HostArraysPtr.P_w[local] - def.P_atm)); HostArraysPtr.ro_n[local] = def.ro0_n * (1. + (def.beta_n) * (HostArraysPtr.P_w[local] - def.P_atm)); test_S(HostArraysPtr.S_n[local], __FILE__, __LINE__); test_positive(HostArraysPtr.P_w[local], __FILE__, __LINE__); test_positive(HostArraysPtr.m[local], __FILE__, __LINE__); } } //**************************** //TODO: copy to another place //**************************** // __device__ void device_assing_k(double* k_w, double* k_n, double S_w) { /* // SPE- double S_wc = 0.2; double S_or = 0.2; double S_e = (S_w - S_wc) / (1. - S_wc - S_or); *k_w = S_e * S_e; *k_n = (1. - S_e) * (1. - S_e); if (S_w < S_wc) { *k_w = 0.; *k_n = 1.; } if (S_w > (1 - S_or)) { *k_w = 1.; *k_n = 0.; } */ // double S_sv = 0.1; double S_zv = 0.8; double S_1 = 0.70324; if ((S_sv<=S_w) && (S_w<=S_zv)) *k_n=((S_zv-S_w)/(S_zv-S_sv))*((S_zv-S_w)/(S_zv-S_sv)); else if ((0<=S_w) && (S_w<=S_sv)) *k_n=1.; else //S_zv<S<=1 *k_n=0.; if ((S_sv<=S_w) && (S_w<=S_1)) *k_w=((S_w-S_sv)/(S_zv-S_sv))*((S_w-S_sv)/(S_zv-S_sv)); else if ((0<=S_w) && (S_w<=S_sv)) *k_w=0.; else if ((S_1<=S_w) && (S_w<=S_zv)) *k_w=0.8*pow((S_w-S_sv)/(S_zv-S_sv), 0.5); else//S_zv<S<=1 *k_w=1.; device_test_S(*k_n, __FILE__, __LINE__); device_test_S(*k_w, __FILE__, __LINE__); } // , NAPL P2 Xi ( ) __global__ void assign_P_Xi_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < (gpu_def->locNx)) && (j < (gpu_def->locNy)) && (k < (gpu_def->locNz)) && (device_is_active_point(i, j, k) == 1)) { double k_w=0., k_n=0.; int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); device_assing_k(&k_w, &k_n, 1. - DevArraysPtr.S_n[local]); DevArraysPtr.P_n[local] = DevArraysPtr.P_w[local]; DevArraysPtr.Xi_w[local] = -1 * (DevArraysPtr.K[local]) * k_w / gpu_def->mu_w; DevArraysPtr.Xi_n[local] = -1 * (DevArraysPtr.K[local]) * k_n / gpu_def->mu_n; device_test_positive(DevArraysPtr.P_n[local], __FILE__, __LINE__); device_test_nan(DevArraysPtr.Xi_w[local], __FILE__, __LINE__); device_test_nan(DevArraysPtr.Xi_n[local], __FILE__, __LINE__); } } // ( ) __global__ void Newton_method_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < (gpu_def->locNx) - 1) && (j < gpu_def->locNy - 1) && (k < (gpu_def->locNz)) && (i != 0) && (j != 0) && (((k != 0) && (k != (gpu_def->locNz) - 1)) || ((gpu_def->locNz) < 2))) { int media = 0; double S_n, P_w, F1, F2, F1P, F2P, F1S, F2S, det; int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); for (int w = 1; w <= gpu_def->newton_iterations; w++) { S_n = DevArraysPtr.S_n[local]; P_w = DevArraysPtr.P_w[local]; F1 = gpu_def->ro0_w * (1. + (gpu_def->beta_w) * (P_w - gpu_def->P_atm)) * (1. - S_n) - DevArraysPtr.roS_w[local]; F2 = gpu_def->ro0_n * (1. + (gpu_def->beta_n) * (P_w - gpu_def->P_atm)) * S_n - DevArraysPtr.roS_n[local]; F1P = gpu_def->ro0_w * (gpu_def->beta_w) * (1. - S_n); F2P = gpu_def->ro0_n * (gpu_def->beta_n) * S_n; F1S = (-1.) * gpu_def->ro0_w * (1. + (gpu_def->beta_w) * (P_w - gpu_def->P_atm)); F2S = gpu_def->ro0_n * (1. + (gpu_def->beta_n) * (P_w - gpu_def->P_atm)); det = F1P * F2S - F1S * F2P; DevArraysPtr.P_w[local] = P_w - (1. / det) * (F2S * F1 - F1S * F2); DevArraysPtr.S_n[local] = S_n - (1. / det) * (F1P * F2 - F2P * F1); } device_test_positive(DevArraysPtr.P_w[local], __FILE__, __LINE__); device_test_S(DevArraysPtr.S_n[local], __FILE__, __LINE__); } } // , __global__ void Border_S_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < gpu_def->locNx) && (j < gpu_def->locNy) && (k < gpu_def->locNz) && (device_is_active_point(i, j, k) == 1)) //if (((i == 0) || (i == (gpu_def->locNx) - 1) || (j == 0) || (j == (gpu_def->locNy) - 1) || //(((k == 0) || (k == (gpu_def->locNz) - 1)) && ((gpu_def->locNz) >= 2))) && (device_is_active_point(i, j, k) == 1)) { int local1 = device_set_boundary_basic_coordinate(i, j, k); int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); DevArraysPtr.S_n[local] = DevArraysPtr.S_n[local1]; device_test_S(DevArraysPtr.S_n[local], __FILE__, __LINE__); } } __global__ void Border_P_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < gpu_def->locNx) && (j < gpu_def->locNy) && (k < gpu_def->locNz) && (device_is_active_point(i, j, k) == 1)) //if (((i == 0) || (i == (gpu_def->locNx) - 1) || (j == 0) || (j == (gpu_def->locNy) - 1) || // (((k == 0) || (k == (gpu_def->locNz) - 1)) && ((gpu_def->locNz) >= 2))) && (device_is_active_point(i, j, k) == 1)) { int local1 = device_set_boundary_basic_coordinate(i, j, k); int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); if ((j != 0) && (j != (gpu_def->locNy) - 1)) { DevArraysPtr.P_w[local] = DevArraysPtr.P_w[local1]; } //else if(j == 0) // DevArraysPtr.P_w[local] = gpu_def->P_atm; else { double ro_g_dy = (gpu_def->ro0_n * DevArraysPtr.S_n[local] + gpu_def->ro0_w * (1 - DevArraysPtr.S_n[local])) * (DevArraysPtr.m[ local]) * (gpu_def->g_const) * (gpu_def->hy); DevArraysPtr.P_w[local] = DevArraysPtr.P_w[local1] + ro_g_dy;//DevArraysPtr.ro_w[local1] * (gpu_def->g_const) * (gpu_def->hy); } // if (device_is_injection_well(i, j, k)) //if (((i == 0) && (j == 0)) || ((i == 1) && (j == 0)) || ((i == 0) && (j == 1))) { //DevArraysPtr.P_w[local] = gpu_def->InjWell_Pw; } // if (device_is_output_well(i, j, k)) //if (((i == gpu_def->Nx - 1) && (j == gpu_def->Ny - 1)) || ((i == gpu_def->Nx - 1) && (j == gpu_def->Ny - 2)) || ((i == gpu_def->Nx - 2) && (j == gpu_def->Ny - 1))) { //DevArraysPtr.P_w[local] = gpu_def->OutWell_Pw; } device_test_positive(DevArraysPtr.P_w[local], __FILE__, __LINE__); } } // __device__ int device_is_injection_well(int i, int j, int k) { if (((i == 1) && (j == 1)) || ((i == 0) && (j == 0)) || ((i == 1) && (j == 0)) || ((i == 0) && (j == 1))) return 1; else return 0; } // __device__ int device_is_output_well(int i, int j, int k) { if (((i == gpu_def->Nx - 2) && (j == gpu_def->Ny - 2)) || ((i == gpu_def->Nx - 1) && (j == gpu_def->Ny - 1)) || ((i == gpu_def->Nx - 1) && (j == gpu_def->Ny - 2)) || ((i == gpu_def->Nx - 2) && (j == gpu_def->Ny - 1))) return 1; else return 0; } // / q_i __device__ void device_wells_q(ptr_Arrays DevArraysPtr, int i, int j, int k, double* q_w, double* q_n, double* q_g) { // if (device_is_injection_well(i, j, k)) { *q_w = gpu_def->Q; *q_n = 0.; *q_g = 0.; } // if (device_is_output_well(i, j, k)) { *q_g = 0; double k_w=0., k_n=0.; device_assing_k(&k_w, &k_n, 1. - DevArraysPtr.S_n[i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy)]); double F_bl = (k_w / gpu_def->mu_w) / (k_w / gpu_def->mu_w + k_n / gpu_def->mu_n); *q_w = -1. * gpu_def->Q * F_bl; *q_n = -1. * gpu_def->Q * (1. - F_bl); } }
72f2a4ed6a2ca1ef093bc06dd1c217975b745556.cu
//#include "../defines.h" #include "../gpu.h" #include "b-l.h" //**************************** //TODO: copy to another place //**************************** // Присвоение начальных условий void data_initialization(ptr_Arrays HostArraysPtr, long int* t, consts def) { *t = 0; for (int i = 0; i < def.locNx; i++) for (int j = 0; j < def.locNy; j++) for (int k = 0; k < def.locNz; k++) if (is_active_point(i, j, k, def)) { int local = i + j * def.locNx + k * def.locNx * def.locNy; HostArraysPtr.m[local]=def.porosity[0]; HostArraysPtr.S_n[local] = def.Background_Sn; double ro_g_dy = (def.ro0_n * HostArraysPtr.S_n[local] + def.ro0_w * (1 - HostArraysPtr.S_n[local])) * (HostArraysPtr.m[local]) * (def.g_const) * (def.hy); if (j == 0) { HostArraysPtr.P_w[local] = def.Background_Pw;//def.P_atm; } else { HostArraysPtr.P_w[local] = HostArraysPtr.P_w[i + (j - 1) * def.locNx + k * def.locNx * def.locNy] + ro_g_dy; } /* // нагнетательная скважина if (is_injection_well(i, j, k, def)) { HostArraysPtr.P_w[local] = Injection_well_P(HostArraysPtr, i, j, k, def); } // добывающая скважина if (is_output_well(i, j, k, def)) { HostArraysPtr.P_w[local] = Production_well_P(HostArraysPtr, i, j, k, def); } */ HostArraysPtr.ro_w[local] = def.ro0_w * (1. + (def.beta_w) * (HostArraysPtr.P_w[local] - def.P_atm)); HostArraysPtr.ro_n[local] = def.ro0_n * (1. + (def.beta_n) * (HostArraysPtr.P_w[local] - def.P_atm)); test_S(HostArraysPtr.S_n[local], __FILE__, __LINE__); test_positive(HostArraysPtr.P_w[local], __FILE__, __LINE__); test_positive(HostArraysPtr.m[local], __FILE__, __LINE__); } } //**************************** //TODO: copy to another place //**************************** // Расчет относительных проницаемостей в точке __device__ void device_assing_k(double* k_w, double* k_n, double S_w) { /* // SPE-постановка double S_wc = 0.2; double S_or = 0.2; double S_e = (S_w - S_wc) / (1. - S_wc - S_or); *k_w = S_e * S_e; *k_n = (1. - S_e) * (1. - S_e); if (S_w < S_wc) { *k_w = 0.; *k_n = 1.; } if (S_w > (1 - S_or)) { *k_w = 1.; *k_n = 0.; } */ // постановка ИПМ double S_sv = 0.1; double S_zv = 0.8; double S_1 = 0.70324; if ((S_sv<=S_w) && (S_w<=S_zv)) *k_n=((S_zv-S_w)/(S_zv-S_sv))*((S_zv-S_w)/(S_zv-S_sv)); else if ((0<=S_w) && (S_w<=S_sv)) *k_n=1.; else //S_zv<S<=1 *k_n=0.; if ((S_sv<=S_w) && (S_w<=S_1)) *k_w=((S_w-S_sv)/(S_zv-S_sv))*((S_w-S_sv)/(S_zv-S_sv)); else if ((0<=S_w) && (S_w<=S_sv)) *k_w=0.; else if ((S_1<=S_w) && (S_w<=S_zv)) *k_w=0.8*pow((S_w-S_sv)/(S_zv-S_sv), 0.5); else//S_zv<S<=1 *k_w=1.; device_test_S(*k_n, __FILE__, __LINE__); device_test_S(*k_w, __FILE__, __LINE__); } // Расчет плотностей, давления NAPL P2 и Xi в каждой точке сетки (независимо от остальных точек) __global__ void assign_P_Xi_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < (gpu_def->locNx)) && (j < (gpu_def->locNy)) && (k < (gpu_def->locNz)) && (device_is_active_point(i, j, k) == 1)) { double k_w=0., k_n=0.; int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); device_assing_k(&k_w, &k_n, 1. - DevArraysPtr.S_n[local]); DevArraysPtr.P_n[local] = DevArraysPtr.P_w[local]; DevArraysPtr.Xi_w[local] = -1 * (DevArraysPtr.K[local]) * k_w / gpu_def->mu_w; DevArraysPtr.Xi_n[local] = -1 * (DevArraysPtr.K[local]) * k_n / gpu_def->mu_n; device_test_positive(DevArraysPtr.P_n[local], __FILE__, __LINE__); device_test_nan(DevArraysPtr.Xi_w[local], __FILE__, __LINE__); device_test_nan(DevArraysPtr.Xi_n[local], __FILE__, __LINE__); } } // Метод Ньютона для каждой точки сетки (независимо от остальных точек) __global__ void Newton_method_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < (gpu_def->locNx) - 1) && (j < gpu_def->locNy - 1) && (k < (gpu_def->locNz)) && (i != 0) && (j != 0) && (((k != 0) && (k != (gpu_def->locNz) - 1)) || ((gpu_def->locNz) < 2))) { int media = 0; double S_n, P_w, F1, F2, F1P, F2P, F1S, F2S, det; int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); for (int w = 1; w <= gpu_def->newton_iterations; w++) { S_n = DevArraysPtr.S_n[local]; P_w = DevArraysPtr.P_w[local]; F1 = gpu_def->ro0_w * (1. + (gpu_def->beta_w) * (P_w - gpu_def->P_atm)) * (1. - S_n) - DevArraysPtr.roS_w[local]; F2 = gpu_def->ro0_n * (1. + (gpu_def->beta_n) * (P_w - gpu_def->P_atm)) * S_n - DevArraysPtr.roS_n[local]; F1P = gpu_def->ro0_w * (gpu_def->beta_w) * (1. - S_n); F2P = gpu_def->ro0_n * (gpu_def->beta_n) * S_n; F1S = (-1.) * gpu_def->ro0_w * (1. + (gpu_def->beta_w) * (P_w - gpu_def->P_atm)); F2S = gpu_def->ro0_n * (1. + (gpu_def->beta_n) * (P_w - gpu_def->P_atm)); det = F1P * F2S - F1S * F2P; DevArraysPtr.P_w[local] = P_w - (1. / det) * (F2S * F1 - F1S * F2); DevArraysPtr.S_n[local] = S_n - (1. / det) * (F1P * F2 - F2P * F1); } device_test_positive(DevArraysPtr.P_w[local], __FILE__, __LINE__); device_test_S(DevArraysPtr.S_n[local], __FILE__, __LINE__); } } // Задание граничных условий с меньшим числом проверок, но с введением дополнительных переменных __global__ void Border_S_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < gpu_def->locNx) && (j < gpu_def->locNy) && (k < gpu_def->locNz) && (device_is_active_point(i, j, k) == 1)) //if (((i == 0) || (i == (gpu_def->locNx) - 1) || (j == 0) || (j == (gpu_def->locNy) - 1) || //(((k == 0) || (k == (gpu_def->locNz) - 1)) && ((gpu_def->locNz) >= 2))) && (device_is_active_point(i, j, k) == 1)) { int local1 = device_set_boundary_basic_coordinate(i, j, k); int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); DevArraysPtr.S_n[local] = DevArraysPtr.S_n[local1]; device_test_S(DevArraysPtr.S_n[local], __FILE__, __LINE__); } } __global__ void Border_P_kernel(ptr_Arrays DevArraysPtr) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int k = threadIdx.z + blockIdx.z * blockDim.z; if ((i < gpu_def->locNx) && (j < gpu_def->locNy) && (k < gpu_def->locNz) && (device_is_active_point(i, j, k) == 1)) //if (((i == 0) || (i == (gpu_def->locNx) - 1) || (j == 0) || (j == (gpu_def->locNy) - 1) || // (((k == 0) || (k == (gpu_def->locNz) - 1)) && ((gpu_def->locNz) >= 2))) && (device_is_active_point(i, j, k) == 1)) { int local1 = device_set_boundary_basic_coordinate(i, j, k); int local = i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy); if ((j != 0) && (j != (gpu_def->locNy) - 1)) { DevArraysPtr.P_w[local] = DevArraysPtr.P_w[local1]; } //else if(j == 0) // DevArraysPtr.P_w[local] = gpu_def->P_atm; else { double ro_g_dy = (gpu_def->ro0_n * DevArraysPtr.S_n[local] + gpu_def->ro0_w * (1 - DevArraysPtr.S_n[local])) * (DevArraysPtr.m[ local]) * (gpu_def->g_const) * (gpu_def->hy); DevArraysPtr.P_w[local] = DevArraysPtr.P_w[local1] + ro_g_dy;//DevArraysPtr.ro_w[local1] * (gpu_def->g_const) * (gpu_def->hy); } // В центре резервуара находится нагнетающая скважина if (device_is_injection_well(i, j, k)) //if (((i == 0) && (j == 0)) || ((i == 1) && (j == 0)) || ((i == 0) && (j == 1))) { //DevArraysPtr.P_w[local] = gpu_def->InjWell_Pw; } // В центре резервуара находится добывающая скважина if (device_is_output_well(i, j, k)) //if (((i == gpu_def->Nx - 1) && (j == gpu_def->Ny - 1)) || ((i == gpu_def->Nx - 1) && (j == gpu_def->Ny - 2)) || ((i == gpu_def->Nx - 2) && (j == gpu_def->Ny - 1))) { //DevArraysPtr.P_w[local] = gpu_def->OutWell_Pw; } device_test_positive(DevArraysPtr.P_w[local], __FILE__, __LINE__); } } // Является ли точка нагнетательной скважиной __device__ int device_is_injection_well(int i, int j, int k) { if (((i == 1) && (j == 1)) || ((i == 0) && (j == 0)) || ((i == 1) && (j == 0)) || ((i == 0) && (j == 1))) return 1; else return 0; } // Является ли точка добывающей скважиной __device__ int device_is_output_well(int i, int j, int k) { if (((i == gpu_def->Nx - 2) && (j == gpu_def->Ny - 2)) || ((i == gpu_def->Nx - 1) && (j == gpu_def->Ny - 1)) || ((i == gpu_def->Nx - 1) && (j == gpu_def->Ny - 2)) || ((i == gpu_def->Nx - 2) && (j == gpu_def->Ny - 1))) return 1; else return 0; } // Устанавливает значения втекаемых/вытекаемых жидкостей q_i на скважинах __device__ void device_wells_q(ptr_Arrays DevArraysPtr, int i, int j, int k, double* q_w, double* q_n, double* q_g) { // нагнетательная скважина if (device_is_injection_well(i, j, k)) { *q_w = gpu_def->Q; *q_n = 0.; *q_g = 0.; } // добывающая скважина if (device_is_output_well(i, j, k)) { *q_g = 0; double k_w=0., k_n=0.; device_assing_k(&k_w, &k_n, 1. - DevArraysPtr.S_n[i + j * (gpu_def->locNx) + k * (gpu_def->locNx) * (gpu_def->locNy)]); double F_bl = (k_w / gpu_def->mu_w) / (k_w / gpu_def->mu_w + k_n / gpu_def->mu_n); *q_w = -1. * gpu_def->Q * F_bl; *q_n = -1. * gpu_def->Q * (1. - F_bl); } }
ExampleUpdater.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ExampleUpdater_hip.cuh" /*! \file ExampleUpdater.cu \brief CUDA kernels for ExampleUpdater */ // First, the kernel code for zeroing the velocities on the GPU //! Kernel that zeroes velocities on the GPU /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This kernel executes one thread per particle and zeros the velocity of each. It can be run with any 1D block size as long as block_size * num_blocks is >= the number of particles. */ extern "C" __global__ void gpu_zero_velocities_kernel(Scalar4 *d_vel, unsigned int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // vel.w is the mass, don't want to modify that Scalar4 vel = d_vel[idx]; vel.x = vel.y = vel.z = 0.0f; d_vel[idx] = vel; } } /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This is just a driver for gpu_zero_velocities_kernel(), see it for the details */ hipError_t gpu_zero_velocities(Scalar4 *d_vel, unsigned int N) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_zero_velocities_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel, N); // this method always succeds. If you had a cuda* call in this driver, you could return its error code if not // hipSuccess return hipSuccess; }
ExampleUpdater.cuh
/* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory Iowa State University and The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _EXAMPLE_UPDATER_CUH_ #define _EXAMPLE_UPDATER_CUH_ // there is no convenient header to include all GPU related headers, we need to include those that are needed #include <hoomd/hoomd_config.h> // need to include the particle data definition #include <hoomd/ParticleData.cuh> /*! \file ExampleUpdater.cuh \brief Declaration of CUDA kernels for ExampleUpdater */ // A C API call to run a CUDA kernel is needed for ExampleUpdaterGPU to call //! Zeros velocities on the GPU extern "C" cudaError_t gpu_zero_velocities(const gpu_pdata_arrays &pdata); #endif // _EXAMPLE_UPDATER_CUH_
ExampleUpdater.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ExampleUpdater_hip.cuh" /*! \file ExampleUpdater.cu \brief CUDA kernels for ExampleUpdater */ // First, the kernel code for zeroing the velocities on the GPU //! Kernel that zeroes velocities on the GPU /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This kernel executes one thread per particle and zeros the velocity of each. It can be run with any 1D block size as long as block_size * num_blocks is >= the number of particles. */ extern "C" __global__ void gpu_zero_velocities_kernel(Scalar4 *d_vel, unsigned int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // vel.w is the mass, don't want to modify that Scalar4 vel = d_vel[idx]; vel.x = vel.y = vel.z = 0.0f; d_vel[idx] = vel; } } /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This is just a driver for gpu_zero_velocities_kernel(), see it for the details */ hipError_t gpu_zero_velocities(Scalar4 *d_vel, unsigned int N) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_zero_velocities_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel, N); // this method always succeds. If you had a cuda* call in this driver, you could return its error code if not // hipSuccess return hipSuccess; }
ExampleUpdater.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ExampleUpdater.cuh" /*! \file ExampleUpdater.cu \brief CUDA kernels for ExampleUpdater */ // First, the kernel code for zeroing the velocities on the GPU //! Kernel that zeroes velocities on the GPU /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This kernel executes one thread per particle and zeros the velocity of each. It can be run with any 1D block size as long as block_size * num_blocks is >= the number of particles. */ extern "C" __global__ void gpu_zero_velocities_kernel(Scalar4 *d_vel, unsigned int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // vel.w is the mass, don't want to modify that Scalar4 vel = d_vel[idx]; vel.x = vel.y = vel.z = 0.0f; d_vel[idx] = vel; } } /*! \param d_vel Velocity-mass array from the ParticleData \param N Number of particles This is just a driver for gpu_zero_velocities_kernel(), see it for the details */ cudaError_t gpu_zero_velocities(Scalar4 *d_vel, unsigned int N) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_zero_velocities_kernel<<< grid, threads >>>(d_vel, N); // this method always succeds. If you had a cuda* call in this driver, you could return its error code if not // cudaSuccess return cudaSuccess; }
bb0d64195a2455953af3d02e929346fa010e28e8.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <algorithm> #include <iostream> #include <metrics/contingencyMatrix.cuh> #include <random> #include "test_utils.h" namespace MLCommon { namespace Metrics { struct ContingencyMatrixParam { int nElements; int minClass; int maxClass; bool calcCardinality; bool skipLabels; float tolerance; }; template <typename T> class ContingencyMatrixTest : public ::testing::TestWithParam<ContingencyMatrixParam> { protected: void SetUp() override { params = ::testing::TestWithParam<ContingencyMatrixParam>::GetParam(); int numElements = params.nElements; int lowerLabelRange = params.minClass; int upperLabelRange = params.maxClass; std::vector<int> y(numElements, 0); std::vector<int> y_hat(numElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(y.begin(), y.end(), [&]() { return intGenerator(dre); }); std::generate(y_hat.begin(), y_hat.end(), [&]() { return intGenerator(dre); }); if (params.skipLabels) { // remove two label value from input arrays int y1 = (upperLabelRange - lowerLabelRange) / 2; int y2 = y1 + (upperLabelRange - lowerLabelRange) / 4; // replacement values int y1_R = y1 + 1; int y2_R = y2 + 1; std::replace(y.begin(), y.end(), y1, y1_R); std::replace(y.begin(), y.end(), y2, y2_R); std::replace(y_hat.begin(), y_hat.end(), y1, y1_R); std::replace(y_hat.begin(), y_hat.end(), y2, y2_R); } CUDA_CHECK(hipStreamCreate(&stream)); MLCommon::allocate(dY, numElements); MLCommon::allocate(dYHat, numElements); MLCommon::updateDevice(dYHat, &y_hat[0], numElements, stream); MLCommon::updateDevice(dY, &y[0], numElements, stream); if (params.calcCardinality) { MLCommon::Metrics::getInputClassCardinality(dY, numElements, stream, minLabel, maxLabel); } else { minLabel = lowerLabelRange; maxLabel = upperLabelRange; } numUniqueClasses = maxLabel - minLabel + 1; MLCommon::allocate(dComputedOutput, numUniqueClasses * numUniqueClasses); MLCommon::allocate(dGoldenOutput, numUniqueClasses * numUniqueClasses); // generate golden output on CPU size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int); hGoldenOutput = (int *)malloc(sizeOfMat); memset(hGoldenOutput, 0, sizeOfMat); for (int i = 0; i < numElements; i++) { auto row = y[i] - minLabel; auto column = y_hat[i] - minLabel; hGoldenOutput[row * numUniqueClasses + column] += 1; } MLCommon::updateDevice(dGoldenOutput, hGoldenOutput, numUniqueClasses * numUniqueClasses, stream); workspaceSz = MLCommon::Metrics::getContingencyMatrixWorkspaceSize( numElements, dY, stream, minLabel, maxLabel); if (workspaceSz != 0) MLCommon::allocate(pWorkspace, workspaceSz); } void TearDown() override { CUDA_CHECK(hipStreamSynchronize(stream)); free(hGoldenOutput); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipFree(dY)); CUDA_CHECK(hipFree(dYHat)); CUDA_CHECK(hipFree(dComputedOutput)); CUDA_CHECK(hipFree(dGoldenOutput)); if (pWorkspace) CUDA_CHECK(hipFree(pWorkspace)); } void RunTest() { int numElements = params.nElements; MLCommon::Metrics::contingencyMatrix( dY, dYHat, numElements, dComputedOutput, stream, (void *)pWorkspace, workspaceSz, minLabel, maxLabel); ASSERT_TRUE(devArrMatch(dComputedOutput, dGoldenOutput, numUniqueClasses * numUniqueClasses, Compare<T>())); } ContingencyMatrixParam params; int numUniqueClasses = -1; T *dY = nullptr; T *dYHat = nullptr; T minLabel, maxLabel; int *dComputedOutput = nullptr; int *dGoldenOutput = nullptr; int *hGoldenOutput = nullptr; char *pWorkspace = nullptr; hipStream_t stream; size_t workspaceSz; }; const std::vector<ContingencyMatrixParam> inputs = { {10000, 1, 10, true, false, 0.000001}, {10000, 1, 5000, true, false, 0.000001}, {10000, 1, 10000, true, false, 0.000001}, {10000, 1, 20000, true, false, 0.000001}, {10000, 1, 10, false, false, 0.000001}, {10000, 1, 5000, false, false, 0.000001}, {10000, 1, 10000, false, false, 0.000001}, {10000, 1, 20000, false, false, 0.000001}, {100000, 1, 100, false, false, 0.000001}, {1000000, 1, 1200, true, false, 0.000001}, {1000000, 1, 10000, false, false, 0.000001}, {100000, 1, 100, false, true, 0.000001}, }; typedef ContingencyMatrixTest<int> ContingencyMatrixTestS; TEST_P(ContingencyMatrixTestS, Result) { RunTest(); } INSTANTIATE_TEST_CASE_P(ContingencyMatrix, ContingencyMatrixTestS, ::testing::ValuesIn(inputs)); } // namespace Metrics } // namespace MLCommon
bb0d64195a2455953af3d02e929346fa010e28e8.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <algorithm> #include <iostream> #include <metrics/contingencyMatrix.cuh> #include <random> #include "test_utils.h" namespace MLCommon { namespace Metrics { struct ContingencyMatrixParam { int nElements; int minClass; int maxClass; bool calcCardinality; bool skipLabels; float tolerance; }; template <typename T> class ContingencyMatrixTest : public ::testing::TestWithParam<ContingencyMatrixParam> { protected: void SetUp() override { params = ::testing::TestWithParam<ContingencyMatrixParam>::GetParam(); int numElements = params.nElements; int lowerLabelRange = params.minClass; int upperLabelRange = params.maxClass; std::vector<int> y(numElements, 0); std::vector<int> y_hat(numElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(y.begin(), y.end(), [&]() { return intGenerator(dre); }); std::generate(y_hat.begin(), y_hat.end(), [&]() { return intGenerator(dre); }); if (params.skipLabels) { // remove two label value from input arrays int y1 = (upperLabelRange - lowerLabelRange) / 2; int y2 = y1 + (upperLabelRange - lowerLabelRange) / 4; // replacement values int y1_R = y1 + 1; int y2_R = y2 + 1; std::replace(y.begin(), y.end(), y1, y1_R); std::replace(y.begin(), y.end(), y2, y2_R); std::replace(y_hat.begin(), y_hat.end(), y1, y1_R); std::replace(y_hat.begin(), y_hat.end(), y2, y2_R); } CUDA_CHECK(cudaStreamCreate(&stream)); MLCommon::allocate(dY, numElements); MLCommon::allocate(dYHat, numElements); MLCommon::updateDevice(dYHat, &y_hat[0], numElements, stream); MLCommon::updateDevice(dY, &y[0], numElements, stream); if (params.calcCardinality) { MLCommon::Metrics::getInputClassCardinality(dY, numElements, stream, minLabel, maxLabel); } else { minLabel = lowerLabelRange; maxLabel = upperLabelRange; } numUniqueClasses = maxLabel - minLabel + 1; MLCommon::allocate(dComputedOutput, numUniqueClasses * numUniqueClasses); MLCommon::allocate(dGoldenOutput, numUniqueClasses * numUniqueClasses); // generate golden output on CPU size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int); hGoldenOutput = (int *)malloc(sizeOfMat); memset(hGoldenOutput, 0, sizeOfMat); for (int i = 0; i < numElements; i++) { auto row = y[i] - minLabel; auto column = y_hat[i] - minLabel; hGoldenOutput[row * numUniqueClasses + column] += 1; } MLCommon::updateDevice(dGoldenOutput, hGoldenOutput, numUniqueClasses * numUniqueClasses, stream); workspaceSz = MLCommon::Metrics::getContingencyMatrixWorkspaceSize( numElements, dY, stream, minLabel, maxLabel); if (workspaceSz != 0) MLCommon::allocate(pWorkspace, workspaceSz); } void TearDown() override { CUDA_CHECK(cudaStreamSynchronize(stream)); free(hGoldenOutput); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaFree(dY)); CUDA_CHECK(cudaFree(dYHat)); CUDA_CHECK(cudaFree(dComputedOutput)); CUDA_CHECK(cudaFree(dGoldenOutput)); if (pWorkspace) CUDA_CHECK(cudaFree(pWorkspace)); } void RunTest() { int numElements = params.nElements; MLCommon::Metrics::contingencyMatrix( dY, dYHat, numElements, dComputedOutput, stream, (void *)pWorkspace, workspaceSz, minLabel, maxLabel); ASSERT_TRUE(devArrMatch(dComputedOutput, dGoldenOutput, numUniqueClasses * numUniqueClasses, Compare<T>())); } ContingencyMatrixParam params; int numUniqueClasses = -1; T *dY = nullptr; T *dYHat = nullptr; T minLabel, maxLabel; int *dComputedOutput = nullptr; int *dGoldenOutput = nullptr; int *hGoldenOutput = nullptr; char *pWorkspace = nullptr; cudaStream_t stream; size_t workspaceSz; }; const std::vector<ContingencyMatrixParam> inputs = { {10000, 1, 10, true, false, 0.000001}, {10000, 1, 5000, true, false, 0.000001}, {10000, 1, 10000, true, false, 0.000001}, {10000, 1, 20000, true, false, 0.000001}, {10000, 1, 10, false, false, 0.000001}, {10000, 1, 5000, false, false, 0.000001}, {10000, 1, 10000, false, false, 0.000001}, {10000, 1, 20000, false, false, 0.000001}, {100000, 1, 100, false, false, 0.000001}, {1000000, 1, 1200, true, false, 0.000001}, {1000000, 1, 10000, false, false, 0.000001}, {100000, 1, 100, false, true, 0.000001}, }; typedef ContingencyMatrixTest<int> ContingencyMatrixTestS; TEST_P(ContingencyMatrixTestS, Result) { RunTest(); } INSTANTIATE_TEST_CASE_P(ContingencyMatrix, ContingencyMatrixTestS, ::testing::ValuesIn(inputs)); } // namespace Metrics } // namespace MLCommon
2b7ac76d1a361e3fd31657c403bc30e226f51396.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <c10/util/Exception.h> namespace at { namespace native { using namespace at::cuda::detail; template <typename T> __host__ __device__ __forceinline__ T ceilDiv(T a, T b) { return (a + b - 1) / b; } template <typename T> __global__ void max_unpooling2d_forward_kernel( const int64_t numInputElements, const T* input, const int64_t* indices, const int64_t numChannels, const int64_t inputHeight, const int64_t inputWidth, const int64_t outputHeight, const int64_t outputWidth, T* output) { CUDA_KERNEL_LOOP(linearIndex, numInputElements) { int c = (linearIndex / inputWidth / inputHeight) % numChannels; int n = linearIndex / inputWidth / inputHeight / numChannels; output += (n * numChannels + c) * outputHeight * outputWidth; int maxind = indices[linearIndex]; output[maxind] = input[linearIndex]; } } template <typename T> __global__ void max_unpooling3d_forward_kernel( PackedTensorAccessor<T, 4> input, PackedTensorAccessor<int64_t, 4> indices, T* output, const int64_t oT, const int64_t oH, const int64_t oW, const int64_t offsetZ) { int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x; int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y; int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature if (iRow < input.size(2) && iColumn < input.size(3)) { T val = input[slice][iFrame][iRow][iColumn]; int64_t index = indices[slice][iFrame][iRow][iColumn]; output[slice * oT * oH * oW + index] = val; } } template <typename T> __global__ void max_unpooling2d_backward_kernel( const int64_t numInputElements, const T* input, const int64_t* indices, const int64_t numChannels, const int64_t inputHeight, const int64_t inputWidth, const int64_t outputHeight, const int64_t outputWidth, T* output) { CUDA_KERNEL_LOOP(linearIndex, numInputElements) { int c = (linearIndex / inputWidth / inputHeight) % numChannels; int n = linearIndex / inputWidth / inputHeight / numChannels; input += (n * numChannels + c) * outputHeight * outputWidth; int maxind = indices[linearIndex]; output[linearIndex] = input[maxind]; } } template <typename T> __global__ void max_unpooling3d_backward_kernel( T* gradOutputData, int64_t oT, int64_t oH, int64_t oW, PackedTensorAccessor<int64_t, 4> indices, PackedTensorAccessor<T, 4> gradInput, int offsetZ) { int iColumn = blockIdx.x * blockDim.x + threadIdx.x; int iRow = blockIdx.y * blockDim.y + threadIdx.y; int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) { int64_t index = indices[slice][iFrame][iRow][iColumn]; T grad_val = gradOutputData[slice * oT * oH * oW + index]; gradInput[slice][iFrame][iRow][iColumn] = grad_val; } } Tensor& max_unpooling2d_forward_out_cuda( Tensor& output, const Tensor& self_, const Tensor& indices_, IntList output_size) { TORCH_CHECK(output.is_contiguous(), "output must be contiguous"); TORCH_CHECK( indices_.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); auto oheight = output_size[0]; auto owidth = output_size[1]; TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2}, indices_arg{indices_, "indices_", 3}; checkAllSameGPU( "max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg}); TORCH_CHECK(self_.numel() > 0, "Input must be non-empty tensor"); TORCH_CHECK( (self_.ndimension() == 3 || self_.ndimension() == 4), "Input to max_unpooling2d should be a 3d or 4d Tensor", self_.sizes()); TORCH_CHECK( self_.sizes() == indices_.sizes(), "Shape of input must match shape of indices"); TORCH_CHECK( output_size.size() == 2, "There should be exactly two elements (width, height) in output_size"); int64_t dimw = 2; int64_t dimh = 1; int64_t numBatch = 1; int64_t numChannels; int64_t inputHeight; int64_t inputWidth; auto self = self_.contiguous(); auto indices = indices_.contiguous(); if (self.ndimension() == 4) { numBatch = self.size(0); dimw++; dimh++; } numChannels = self.size(dimh - 1); inputHeight = self.size(dimh); inputWidth = self.size(dimw); output.resize_({numBatch, numChannels, oheight, owidth}); output.zero_(); auto count = self.numel(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] { hipLaunchKernelGGL(( max_unpooling2d_forward_kernel), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self.numel(), self.data<scalar_t>(), indices.data<int64_t>(), numChannels, inputHeight, inputWidth, oheight, owidth, output.data<scalar_t>()); })); // TORCH_CHECK( // hipGetLastError() == hipSuccess, // "max_unpooling2d_forward_kernel failed with error code ", // hipGetLastError()); if (self.ndimension() == 3) { output.resize_({numChannels, oheight, owidth}); } return output; } Tensor max_unpooling2d_forward_cuda( const Tensor& self, const Tensor& indices, IntList output_size) { auto output = at::empty({0}, self.options()); max_unpooling2d_forward_out_cuda(output, self, indices, output_size); return output; } static void max_unpooling3d_shape_check( const Tensor& input, const Tensor& gradOutput, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; TORCH_CHECK( indices.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); TORCH_CHECK( (input.ndimension() == 4 || input.ndimension() == 5), "Input to max_unpooling3d should be a 4d or 5d Tensor", input.sizes()); TORCH_CHECK( output_size.size() == 3, "There should be exactly three elements (depth, height, width) in output_size"); TORCH_CHECK( stride.size() == 3, "There should be exactly three elements (depth, height, width) in stride"); TORCH_CHECK( padding.size() == 3, "There should be exactly three elements (depth, height, width) in padding"); TORCH_CHECK( input.sizes() == indices.sizes(), "Shape of indices should match shape of input"); TORCH_CHECK(input.numel() > 0, "Input must be non-empty"); TORCH_CHECK( stride[0] > 0 && stride[1] > 0 && stride[2] > 0, "strides should be greater than zero, but got stride: ", stride); int dimw = 3; int dimh = 2; int dimt = 1; int dimn = 0; if (input.ndimension() == 5) { dimw++; dimh++; dimt++; dimn++; } int nslices = input.size(dimn); if (gradOutput.defined()) { if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) || oW != gradOutput.size(dimw)) { AT_ERROR( "Inconsistent gradOutput size. oT= ", oT, ", oH= ", oH, ", oW= ", oW, ". gradOutput: ", gradOutput.size(dimt), "x", gradOutput.size(dimh), "x", gradOutput.size(dimw)); } TORCH_CHECK( gradOutput.ndimension() == input.ndimension() && gradOutput.size(dimn) == nslices, "gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices"); } } Tensor& max_unpooling3d_forward_out_cuda( Tensor& output, const Tensor& self_, const Tensor& indices_, IntList output_size, IntList stride, IntList padding) { TORCH_CHECK(output.is_contiguous(), "output must be contiguous"); max_unpooling3d_shape_check( self_, Tensor(), indices_, output_size, stride, padding); int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2}, indices_arg{indices_, "indices_", 3}; checkAllSameGPU( "max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg}); auto self = self_.contiguous(); auto indices = indices_.contiguous(); int64_t batchSize; int64_t inputSlices; int64_t inputTime; int64_t inputHeight; int64_t inputWidth; if (self.ndimension() == 4) { batchSize = 1; inputSlices = self.size(0); inputTime = self.size(1); inputHeight = self.size(2); inputWidth = self.size(3); output.resize_({inputSlices, oT, oH, oW}); } else { batchSize = self.size(0); inputSlices = self.size(1); inputTime = self.size(2); inputHeight = self.size(3); inputWidth = self.size(4); output.resize_({batchSize, inputSlices, oT, oH, oW}); } output.zero_(); // Collapse batch and feature dimensions if needed if (self.ndimension() == 5) { self = self.reshape({self.size(0) * self.size(1), self.size(2), self.size(3), self.size(4)}); indices = indices.reshape({indices.size(0) * indices.size(1), indices.size(2), indices.size(3), indices.size(4)}); } int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] { while (totalZ > 0) { dim3 grid( ceilDiv(inputWidth, static_cast<int64_t>(block.x)), ceilDiv(inputHeight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_unpooling3d_forward_kernel), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self.packed_accessor<scalar_t, 4>(), indices.packed_accessor<int64_t, 4>(), output.data<scalar_t>(), oT, oH, oW, offsetZ); // TORCH_CHECK( // hipGetLastError() == hipSuccess, // "max_unpooling3d_forward_kernel failed with error code ", // hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } })); return output; } Tensor max_unpooling3d_forward_cuda( const Tensor& self, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { auto output = at::empty({0}, self.options()); max_unpooling3d_forward_out_cuda( output, self, indices, output_size, stride, padding); return output; } at::Tensor& max_unpooling2d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output_, const Tensor& self_, const Tensor& indices_, IntList output_size) { int64_t oheight = output_size[0]; int64_t owidth = output_size[1]; TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); TORCH_CHECK( indices_.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}, self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4}; checkAllSameGPU( "max_unpooling2d_backward_out_cuda", {grad_input_arg, grad_output_arg, self_arg, indices_arg}); TORCH_CHECK( (self_.ndimension() == 3 || self_.ndimension() == 4), "Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ", self_); TORCH_CHECK( self_.sizes() == indices_.sizes(), "Input should have same shape as indices"); TORCH_CHECK(output_size.size() == 2, "output_size must have two elements"); int64_t nInputCols, nInputRows, nInputPlane, batchSize; int dimw = 2; int dimh = 1; auto self = self_.contiguous(); auto indices = indices_.contiguous(); auto grad_output = grad_output_.contiguous(); if (self.ndimension() == 3) { nInputPlane = self.size(0); batchSize = 1; } else { ++dimw; ++dimh; nInputPlane = self.size(1); batchSize = self.size(0); } nInputCols = self.size(dimw); nInputRows = self.size(dimh); if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) { AT_ERROR( "Inconsistent gradOutput size. output height: ", oheight, ", output width= ", owidth, ", gradOutput: ", grad_output.size(dimh), "x", grad_output.size(dimw)); } grad_input.resize_as_(self); grad_input.zero_(); int count = self.numel(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] { hipLaunchKernelGGL(( max_unpooling2d_backward_kernel), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, grad_output.data<scalar_t>(), indices.data<int64_t>(), nInputPlane, nInputRows, nInputCols, oheight, owidth, grad_input.data<scalar_t>()); })); // TORCH_CHECK( // hipGetLastError() == hipSuccess, // "max_unpooling2d_backward_kernel failed with error code ", // hipGetLastError()); return grad_input; } at::Tensor max_unpooling2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& indices, IntList output_size) { auto grad_input = at::empty_like(self); max_unpooling2d_backward_out_cuda( grad_input, grad_output, self, indices, output_size); return grad_input; } at::Tensor& max_unpooling3d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output_, const Tensor& self_, const Tensor& indices_, IntList output_size, IntList stride, IntList padding) { TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; max_unpooling3d_shape_check( self_, grad_output_, indices_, output_size, stride, padding); int batchSize = 0; int inputSlices = 0; int inputTime = 0; int64_t inputHeight = 0; int64_t inputWidth = 0; TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2}, grad_output_arg{grad_output_, "grad_output_", 3}, grad_input_arg{grad_input, "grad_input", 4}; checkAllSameGPU( "max_unpooling3d_backward_out_cuda", {self_arg, indices_arg, grad_output_arg, grad_input_arg}); auto self = self_.contiguous(); auto indices = indices_.contiguous(); auto grad_output = grad_output_.contiguous(); if (self.ndimension() == 4) { batchSize = 1; inputSlices = self.size(0); inputTime = self.size(1); inputHeight = self.size(2); inputWidth = self.size(3); } else { batchSize = self.size(0); inputSlices = self.size(1); inputTime = self.size(2); inputHeight = self.size(3); inputWidth = self.size(4); } grad_input.resize_as_(self); grad_input.zero_(); // Collapse batch and feature dimensions if needed auto grad_input_reshaped = grad_input; if (grad_input.ndimension() == 5) { grad_input_reshaped = grad_input.reshape({grad_input.size(0) * grad_input.size(1), grad_input.size(2), grad_input.size(3), grad_input.size(4)}); indices = indices.reshape({indices.size(0) * indices.size(1), indices.size(2), indices.size(3), indices.size(4)}); } int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] { while (totalZ > 0) { dim3 grid( ceilDiv(inputWidth, static_cast<int64_t>(block.x)), ceilDiv(inputHeight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_unpooling3d_backward_kernel), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output.data<scalar_t>(), oT, oH, oW, indices.packed_accessor<int64_t, 4>(), grad_input_reshaped.packed_accessor<scalar_t, 4>(), offsetZ); // TORCH_CHECK( // hipGetLastError() == hipSuccess, // "max_unpooling3d_backward_kernel failed with error code ", // hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } })); return grad_input; } at::Tensor max_unpooling3d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { auto grad_input = at::empty_like(self); max_unpooling3d_backward_out_cuda( grad_input, grad_output, self, indices, output_size, stride, padding); return grad_input; } } // namespace native } // namespace at
2b7ac76d1a361e3fd31657c403bc30e226f51396.cu
#include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/util/Exception.h> namespace at { namespace native { using namespace at::cuda::detail; template <typename T> __host__ __device__ __forceinline__ T ceilDiv(T a, T b) { return (a + b - 1) / b; } template <typename T> __global__ void max_unpooling2d_forward_kernel( const int64_t numInputElements, const T* input, const int64_t* indices, const int64_t numChannels, const int64_t inputHeight, const int64_t inputWidth, const int64_t outputHeight, const int64_t outputWidth, T* output) { CUDA_KERNEL_LOOP(linearIndex, numInputElements) { int c = (linearIndex / inputWidth / inputHeight) % numChannels; int n = linearIndex / inputWidth / inputHeight / numChannels; output += (n * numChannels + c) * outputHeight * outputWidth; int maxind = indices[linearIndex]; output[maxind] = input[linearIndex]; } } template <typename T> __global__ void max_unpooling3d_forward_kernel( PackedTensorAccessor<T, 4> input, PackedTensorAccessor<int64_t, 4> indices, T* output, const int64_t oT, const int64_t oH, const int64_t oW, const int64_t offsetZ) { int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x; int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y; int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature if (iRow < input.size(2) && iColumn < input.size(3)) { T val = input[slice][iFrame][iRow][iColumn]; int64_t index = indices[slice][iFrame][iRow][iColumn]; output[slice * oT * oH * oW + index] = val; } } template <typename T> __global__ void max_unpooling2d_backward_kernel( const int64_t numInputElements, const T* input, const int64_t* indices, const int64_t numChannels, const int64_t inputHeight, const int64_t inputWidth, const int64_t outputHeight, const int64_t outputWidth, T* output) { CUDA_KERNEL_LOOP(linearIndex, numInputElements) { int c = (linearIndex / inputWidth / inputHeight) % numChannels; int n = linearIndex / inputWidth / inputHeight / numChannels; input += (n * numChannels + c) * outputHeight * outputWidth; int maxind = indices[linearIndex]; output[linearIndex] = input[maxind]; } } template <typename T> __global__ void max_unpooling3d_backward_kernel( T* gradOutputData, int64_t oT, int64_t oH, int64_t oW, PackedTensorAccessor<int64_t, 4> indices, PackedTensorAccessor<T, 4> gradInput, int offsetZ) { int iColumn = blockIdx.x * blockDim.x + threadIdx.x; int iRow = blockIdx.y * blockDim.y + threadIdx.y; int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) { int64_t index = indices[slice][iFrame][iRow][iColumn]; T grad_val = gradOutputData[slice * oT * oH * oW + index]; gradInput[slice][iFrame][iRow][iColumn] = grad_val; } } Tensor& max_unpooling2d_forward_out_cuda( Tensor& output, const Tensor& self_, const Tensor& indices_, IntList output_size) { TORCH_CHECK(output.is_contiguous(), "output must be contiguous"); TORCH_CHECK( indices_.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); auto oheight = output_size[0]; auto owidth = output_size[1]; TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2}, indices_arg{indices_, "indices_", 3}; checkAllSameGPU( "max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg}); TORCH_CHECK(self_.numel() > 0, "Input must be non-empty tensor"); TORCH_CHECK( (self_.ndimension() == 3 || self_.ndimension() == 4), "Input to max_unpooling2d should be a 3d or 4d Tensor", self_.sizes()); TORCH_CHECK( self_.sizes() == indices_.sizes(), "Shape of input must match shape of indices"); TORCH_CHECK( output_size.size() == 2, "There should be exactly two elements (width, height) in output_size"); int64_t dimw = 2; int64_t dimh = 1; int64_t numBatch = 1; int64_t numChannels; int64_t inputHeight; int64_t inputWidth; auto self = self_.contiguous(); auto indices = indices_.contiguous(); if (self.ndimension() == 4) { numBatch = self.size(0); dimw++; dimh++; } numChannels = self.size(dimh - 1); inputHeight = self.size(dimh); inputWidth = self.size(dimw); output.resize_({numBatch, numChannels, oheight, owidth}); output.zero_(); auto count = self.numel(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] { max_unpooling2d_forward_kernel<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( self.numel(), self.data<scalar_t>(), indices.data<int64_t>(), numChannels, inputHeight, inputWidth, oheight, owidth, output.data<scalar_t>()); })); // TORCH_CHECK( // cudaGetLastError() == cudaSuccess, // "max_unpooling2d_forward_kernel failed with error code ", // cudaGetLastError()); if (self.ndimension() == 3) { output.resize_({numChannels, oheight, owidth}); } return output; } Tensor max_unpooling2d_forward_cuda( const Tensor& self, const Tensor& indices, IntList output_size) { auto output = at::empty({0}, self.options()); max_unpooling2d_forward_out_cuda(output, self, indices, output_size); return output; } static void max_unpooling3d_shape_check( const Tensor& input, const Tensor& gradOutput, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; TORCH_CHECK( indices.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); TORCH_CHECK( (input.ndimension() == 4 || input.ndimension() == 5), "Input to max_unpooling3d should be a 4d or 5d Tensor", input.sizes()); TORCH_CHECK( output_size.size() == 3, "There should be exactly three elements (depth, height, width) in output_size"); TORCH_CHECK( stride.size() == 3, "There should be exactly three elements (depth, height, width) in stride"); TORCH_CHECK( padding.size() == 3, "There should be exactly three elements (depth, height, width) in padding"); TORCH_CHECK( input.sizes() == indices.sizes(), "Shape of indices should match shape of input"); TORCH_CHECK(input.numel() > 0, "Input must be non-empty"); TORCH_CHECK( stride[0] > 0 && stride[1] > 0 && stride[2] > 0, "strides should be greater than zero, but got stride: ", stride); int dimw = 3; int dimh = 2; int dimt = 1; int dimn = 0; if (input.ndimension() == 5) { dimw++; dimh++; dimt++; dimn++; } int nslices = input.size(dimn); if (gradOutput.defined()) { if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) || oW != gradOutput.size(dimw)) { AT_ERROR( "Inconsistent gradOutput size. oT= ", oT, ", oH= ", oH, ", oW= ", oW, ". gradOutput: ", gradOutput.size(dimt), "x", gradOutput.size(dimh), "x", gradOutput.size(dimw)); } TORCH_CHECK( gradOutput.ndimension() == input.ndimension() && gradOutput.size(dimn) == nslices, "gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices"); } } Tensor& max_unpooling3d_forward_out_cuda( Tensor& output, const Tensor& self_, const Tensor& indices_, IntList output_size, IntList stride, IntList padding) { TORCH_CHECK(output.is_contiguous(), "output must be contiguous"); max_unpooling3d_shape_check( self_, Tensor(), indices_, output_size, stride, padding); int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2}, indices_arg{indices_, "indices_", 3}; checkAllSameGPU( "max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg}); auto self = self_.contiguous(); auto indices = indices_.contiguous(); int64_t batchSize; int64_t inputSlices; int64_t inputTime; int64_t inputHeight; int64_t inputWidth; if (self.ndimension() == 4) { batchSize = 1; inputSlices = self.size(0); inputTime = self.size(1); inputHeight = self.size(2); inputWidth = self.size(3); output.resize_({inputSlices, oT, oH, oW}); } else { batchSize = self.size(0); inputSlices = self.size(1); inputTime = self.size(2); inputHeight = self.size(3); inputWidth = self.size(4); output.resize_({batchSize, inputSlices, oT, oH, oW}); } output.zero_(); // Collapse batch and feature dimensions if needed if (self.ndimension() == 5) { self = self.reshape({self.size(0) * self.size(1), self.size(2), self.size(3), self.size(4)}); indices = indices.reshape({indices.size(0) * indices.size(1), indices.size(2), indices.size(3), indices.size(4)}); } int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] { while (totalZ > 0) { dim3 grid( ceilDiv(inputWidth, static_cast<int64_t>(block.x)), ceilDiv(inputHeight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_unpooling3d_forward_kernel<<< grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( self.packed_accessor<scalar_t, 4>(), indices.packed_accessor<int64_t, 4>(), output.data<scalar_t>(), oT, oH, oW, offsetZ); // TORCH_CHECK( // cudaGetLastError() == cudaSuccess, // "max_unpooling3d_forward_kernel failed with error code ", // cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } })); return output; } Tensor max_unpooling3d_forward_cuda( const Tensor& self, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { auto output = at::empty({0}, self.options()); max_unpooling3d_forward_out_cuda( output, self, indices, output_size, stride, padding); return output; } at::Tensor& max_unpooling2d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output_, const Tensor& self_, const Tensor& indices_, IntList output_size) { int64_t oheight = output_size[0]; int64_t owidth = output_size[1]; TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); TORCH_CHECK( indices_.scalar_type() == at::ScalarType::Long, "elements in indices should be type int64"); TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}, self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4}; checkAllSameGPU( "max_unpooling2d_backward_out_cuda", {grad_input_arg, grad_output_arg, self_arg, indices_arg}); TORCH_CHECK( (self_.ndimension() == 3 || self_.ndimension() == 4), "Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ", self_); TORCH_CHECK( self_.sizes() == indices_.sizes(), "Input should have same shape as indices"); TORCH_CHECK(output_size.size() == 2, "output_size must have two elements"); int64_t nInputCols, nInputRows, nInputPlane, batchSize; int dimw = 2; int dimh = 1; auto self = self_.contiguous(); auto indices = indices_.contiguous(); auto grad_output = grad_output_.contiguous(); if (self.ndimension() == 3) { nInputPlane = self.size(0); batchSize = 1; } else { ++dimw; ++dimh; nInputPlane = self.size(1); batchSize = self.size(0); } nInputCols = self.size(dimw); nInputRows = self.size(dimh); if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) { AT_ERROR( "Inconsistent gradOutput size. output height: ", oheight, ", output width= ", owidth, ", gradOutput: ", grad_output.size(dimh), "x", grad_output.size(dimw)); } grad_input.resize_as_(self); grad_input.zero_(); int count = self.numel(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] { max_unpooling2d_backward_kernel<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, grad_output.data<scalar_t>(), indices.data<int64_t>(), nInputPlane, nInputRows, nInputCols, oheight, owidth, grad_input.data<scalar_t>()); })); // TORCH_CHECK( // cudaGetLastError() == cudaSuccess, // "max_unpooling2d_backward_kernel failed with error code ", // cudaGetLastError()); return grad_input; } at::Tensor max_unpooling2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& indices, IntList output_size) { auto grad_input = at::empty_like(self); max_unpooling2d_backward_out_cuda( grad_input, grad_output, self, indices, output_size); return grad_input; } at::Tensor& max_unpooling3d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output_, const Tensor& self_, const Tensor& indices_, IntList output_size, IntList stride, IntList padding) { TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); int64_t oT = output_size[0]; int64_t oH = output_size[1]; int64_t oW = output_size[2]; max_unpooling3d_shape_check( self_, grad_output_, indices_, output_size, stride, padding); int batchSize = 0; int inputSlices = 0; int inputTime = 0; int64_t inputHeight = 0; int64_t inputWidth = 0; TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2}, grad_output_arg{grad_output_, "grad_output_", 3}, grad_input_arg{grad_input, "grad_input", 4}; checkAllSameGPU( "max_unpooling3d_backward_out_cuda", {self_arg, indices_arg, grad_output_arg, grad_input_arg}); auto self = self_.contiguous(); auto indices = indices_.contiguous(); auto grad_output = grad_output_.contiguous(); if (self.ndimension() == 4) { batchSize = 1; inputSlices = self.size(0); inputTime = self.size(1); inputHeight = self.size(2); inputWidth = self.size(3); } else { batchSize = self.size(0); inputSlices = self.size(1); inputTime = self.size(2); inputHeight = self.size(3); inputWidth = self.size(4); } grad_input.resize_as_(self); grad_input.zero_(); // Collapse batch and feature dimensions if needed auto grad_input_reshaped = grad_input; if (grad_input.ndimension() == 5) { grad_input_reshaped = grad_input.reshape({grad_input.size(0) * grad_input.size(1), grad_input.size(2), grad_input.size(3), grad_input.size(4)}); indices = indices.reshape({indices.size(0) * indices.size(1), indices.size(2), indices.size(3), indices.size(4)}); } int totalZ = inputTime * inputSlices * batchSize; int offsetZ = 0; dim3 block(32, 8); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] { while (totalZ > 0) { dim3 grid( ceilDiv(inputWidth, static_cast<int64_t>(block.x)), ceilDiv(inputHeight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_unpooling3d_backward_kernel<<< grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( grad_output.data<scalar_t>(), oT, oH, oW, indices.packed_accessor<int64_t, 4>(), grad_input_reshaped.packed_accessor<scalar_t, 4>(), offsetZ); // TORCH_CHECK( // cudaGetLastError() == cudaSuccess, // "max_unpooling3d_backward_kernel failed with error code ", // cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } })); return grad_input; } at::Tensor max_unpooling3d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& indices, IntList output_size, IntList stride, IntList padding) { auto grad_input = at::empty_like(self); max_unpooling3d_backward_out_cuda( grad_input, grad_output, self, indices, output_size, stride, padding); return grad_input; } } // namespace native } // namespace at
2116144fbbf0f95105f21c690f5a06caf376ac79.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <float.h> #include <time.h> #include <hip/hip_runtime.h> struct timeval startwtime,endwtime; double seq_time; __device__ double kernel(double a,double *d_sigma); __device__ double distance(int l,int q,double *a, double *b, int *d_d); void printmatrixd(int a, int b, double *matrix); void printmatrixi(int a,int b, int *matrix); void printsolution(int a, int b, double *point, int *kmatrix); void checkfunction(void); __global__ void devMain(double *d_m,int *d_kmatrix,double *d_x,double *d_y,double *d_y2, double *d_epsilon, double *d_sigma, int *d_n, int *d_d, int *d_count, double *d_xtemp, double *d_sum2, int *d_numthreads, double *d_sum); int i,j,s,n,d,len,h,numthreads; int *d_n, *d_d; double *sum2,*m, *xtemp, *sum; double *d_sum2, *d_m, *d_xtemp, *d_sum; int *kmatrix, *count; int *d_kmatrix, *d_count, *d_numthreads; FILE *myfile; double *x, *y, *y2; double *d_x, *d_y, *d_y2; double sigma,epsilon,p; //only use sigma and epsilon double *d_sigma, *d_epsilon; int main(int argc, char **argv){ numthreads=6 ;// you can change this if you want if (argc!=5) { printf("Usage: %s, s, n, d, file.bin, \nwhere s is sigma*0.1, e is sigma*0.0001, \nn is the number of elements, \nd its dimension and file.bin\nthe binary file with the elements \n",argv[0]); exit(1); } s=atoi(argv[1]); sigma=s*0.1; hipMalloc((void **)&d_sigma,sizeof(double)); hipMemcpy(d_sigma,&sigma,sizeof(double),hipMemcpyHostToDevice); hipMalloc((void **)&d_numthreads,sizeof(int)); hipMemcpy(d_numthreads,&numthreads,sizeof(int),hipMemcpyHostToDevice); epsilon=sigma*0.0001; hipMalloc((void **)&d_epsilon,sizeof(double)); hipMemcpy(d_epsilon,&epsilon,sizeof(double),hipMemcpyHostToDevice); n=atoi(argv[2]); hipMalloc((void **)&d_n,sizeof(int)); hipMemcpy(d_n,&n,sizeof(int),hipMemcpyHostToDevice); d=atoi(argv[3]); hipMalloc((void **)&d_d,sizeof(int)); hipMemcpy(d_d,&d,sizeof(int),hipMemcpyHostToDevice); xtemp=(double*)malloc(n*numthreads*sizeof(double)); hipMalloc((void **)&d_xtemp,n*numthreads*sizeof(double)); m=(double*)malloc(n*sizeof(double)); hipMalloc((void **)&d_m,n*sizeof(double)); sum2=(double*)malloc(n*numthreads*sizeof(double)); hipMalloc((void **)&d_sum2,n*numthreads*sizeof(double)); sum=(double*)malloc(n*sizeof(double)); hipMalloc((void **)&d_sum,n*sizeof(double)); kmatrix=(int*)malloc(n*sizeof(int)); hipMalloc((void **)&d_kmatrix,n*sizeof(int)); count=(int*)malloc(n*sizeof(int)); hipMalloc((void **)&d_count,n*sizeof(int)); x=(double*)malloc(n*d*sizeof(double)); hipMalloc((void **)&d_x,n*d*sizeof(double)); y=(double*)malloc(n*d*sizeof(double)); hipMalloc((void **)&d_y,n*d*sizeof(double)); y2=(double*)malloc(n*d*sizeof(double)); hipMalloc((void **)&d_y2,n*d*sizeof(double)); for (i=0;i<n;i++){ m[i]=100; for (j=0;j<numthreads;j++){ xtemp[i*numthreads+j]=0; sum2[i*numthreads+j]=0; } sum[i]=0; kmatrix[i]=0; count[i]=0; } hipMemcpy(d_m,m,n*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_sum,sum,n*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_sum2,sum2,n*numthreads*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_kmatrix,kmatrix,n*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(d_xtemp,xtemp,n*numthreads*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_count,count,n*sizeof(int),hipMemcpyHostToDevice); myfile=fopen(argv[4],"rb"); for (i=0;i<n;i++){ for (j=0;j<d;j++){ len=fread(&p,8,1,myfile); x[i*d+j]=p; y[i*d+j]=0; y2[i*d+j]=p; } } fclose(myfile); hipMemcpy(d_x,x,n*d*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_y,y,n*d*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_y2,y2,n*d*sizeof(double),hipMemcpyHostToDevice); /** start of the main part of the program**/ gettimeofday(&startwtime,NULL); hipLaunchKernelGGL(( devMain), dim3(n),dim3(numthreads),n*d*sizeof(double), 0, d_m,d_kmatrix,d_x,d_y,d_y2,d_epsilon,d_sigma,d_n,d_d,d_count,d_xtemp,d_sum2,d_numthreads,d_sum); gettimeofday(&endwtime,NULL); hipMemcpy(kmatrix,d_kmatrix,n*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(y2,d_y2,n*d*sizeof(double),hipMemcpyDeviceToHost); printsolution(n,d,y2,kmatrix); //checkfunction(); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); printf("total time: %f secs\n",seq_time); free(x); free(y); free(y2); free(count); free(kmatrix); free (sum); free (sum2); free(m); hipFree(d_x); hipFree(d_y); hipFree(d_y2); hipFree(d_count); hipFree(d_kmatrix); hipFree(d_sum2); hipFree(d_sum); hipFree(m); } __device__ double kernel(double a, double *d_sigma){ double result; result=exp(-a/(2*(*d_sigma)*(*d_sigma))); return result; } __device__ double distance(int l,int q, double *a, double *b, int *d_d){ double sum=0; double root=0; int j; for (j=0;j<(*d_d);j++){ sum+=(a[l*(*d_d)+j]-b[q*(*d_d)+j])*(a[l*(*d_d)+j]-b[q*(*d_d)+j]); } root=sqrt(sum); return root; } void printmatrixd(int a,int b, double *matrix){ int i,j; for (i=0;i<a;i++){ for (j=0;j<b;j++){ printf("%f ",matrix[i*b+j]); } printf("\n"); } } void printmatrixi(int a,int b, int *matrix){ int i,j; for (i=0;i<a;i++){ for (j=0;j<b;j++){ printf("%d ",matrix[i*b+j]); } printf("\n"); } } void printsolution(int a, int b, double *point, int *kmatrix){ int i,j; for (i=0;i<a;i++){ for (j=0;j<b;j++){ printf("%f ",point[i*b+j]); } printf("i=%d, k=%d\n",i, kmatrix[i]); } } void checkfunction(void){ char binary[50]; FILE *myfile2; int count,i,j; double *a; double p,dist; a=(double*)malloc(n*d*sizeof(double)); printf("please give the name of the binary file \n(of double floats-8-bytes) that is a right solution\nto the mean shift problem:\n"); dist=0; count=0; scanf("%s",binary); myfile2=fopen(binary,"rb"); for (i=0;i<n;i++){ //printf("reached here\n"); for (int j=0;j<d;j++){ fread(&p,8,1,myfile); a[i*d+j]=p; } } fclose(myfile2); for (i=0;i<n;i++){ for (j=0;j<d;j++){ dist+=(a[i*d+j]-y2[i*d+j])*(a[i*d+j]-y2[i*d+j]); } if (dist>sigma*sigma/100){ //distance>sigma/10 count++; } dist=0; } free(a); printf("we have problem in %d out of %d points\n",count,n); } __global__ void devMain(double *d_m,int *d_kmatrix,double *d_x,double *d_y,double *d_y2, double *d_epsilon, double *d_sigma, int *d_n, int *d_d, int *d_count, double *d_xtemp, double *d_sum2, int *d_numthreads, double *d_sum){ int j,i; int h; int bid=blockIdx.x; int tid=threadIdx.x; int steps=(*d_n/(*d_numthreads)); extern __shared__ double sharedx[]; for (j=0;j<(*d_d);j++){ sharedx[bid*(*d_d)+j]=d_x[bid*(*d_d)+j]; } __syncthreads(); while ((d_m[bid]>*d_epsilon)&&(d_kmatrix[bid]<15)){ //d_count[bid]=0; for (j=tid*steps;j<(tid+1)*steps;j++){ //printf("j=%d\n",j); if (distance(bid,j,d_y2,d_x,d_d)<(*d_sigma)*(*d_sigma)){ d_xtemp[bid*(*d_numthreads)+tid]=kernel(distance(bid,j,d_y2,d_x,d_d),d_sigma); //d_count[bid*(*d_numthreads)+tid]++; d_sum2[bid*(*d_numthreads)+tid]+=d_xtemp[bid*(*d_numthreads)+tid]; for (h=0;h<(*d_d);h++){ for (i=0;i<(*d_numthreads);i++){ if (i==tid) //to prevent data races d_y[bid*(*d_d)+h]+=d_xtemp[bid*(*d_numthreads)+tid]*d_x[j*(*d_d)+h]; __syncthreads(); } } } } if (1==tid){ for (h=0;h<(*d_numthreads);h++){ d_sum[bid]+=d_sum2[bid*(*d_numthreads)+h]; } for (h=0;h<*d_d;h++){ d_y[bid*(*d_d)+h]/=d_sum[bid]; } d_sum[bid]=0; d_m[bid]=distance(bid,bid,d_y,d_y2,d_d); d_kmatrix[bid]++; for (h=0;h<(*d_d);h++){ d_y2[bid*(*d_d)+h]=d_y[bid*(*d_d)+h]; d_y[bid*(*d_d)+h]=0; } } __syncthreads(); d_sum2[bid*(*d_numthreads)+tid]=0; __syncthreads(); } }
2116144fbbf0f95105f21c690f5a06caf376ac79.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <float.h> #include <time.h> #include <cuda.h> struct timeval startwtime,endwtime; double seq_time; __device__ double kernel(double a,double *d_sigma); __device__ double distance(int l,int q,double *a, double *b, int *d_d); void printmatrixd(int a, int b, double *matrix); void printmatrixi(int a,int b, int *matrix); void printsolution(int a, int b, double *point, int *kmatrix); void checkfunction(void); __global__ void devMain(double *d_m,int *d_kmatrix,double *d_x,double *d_y,double *d_y2, double *d_epsilon, double *d_sigma, int *d_n, int *d_d, int *d_count, double *d_xtemp, double *d_sum2, int *d_numthreads, double *d_sum); int i,j,s,n,d,len,h,numthreads; int *d_n, *d_d; double *sum2,*m, *xtemp, *sum; double *d_sum2, *d_m, *d_xtemp, *d_sum; int *kmatrix, *count; int *d_kmatrix, *d_count, *d_numthreads; FILE *myfile; double *x, *y, *y2; double *d_x, *d_y, *d_y2; double sigma,epsilon,p; //only use sigma and epsilon double *d_sigma, *d_epsilon; int main(int argc, char **argv){ numthreads=6 ;// you can change this if you want if (argc!=5) { printf("Usage: %s, s, n, d, file.bin, \nwhere s is sigma*0.1, e is sigma*0.0001, \nn is the number of elements, \nd its dimension and file.bin\nthe binary file with the elements \n",argv[0]); exit(1); } s=atoi(argv[1]); sigma=s*0.1; cudaMalloc((void **)&d_sigma,sizeof(double)); cudaMemcpy(d_sigma,&sigma,sizeof(double),cudaMemcpyHostToDevice); cudaMalloc((void **)&d_numthreads,sizeof(int)); cudaMemcpy(d_numthreads,&numthreads,sizeof(int),cudaMemcpyHostToDevice); epsilon=sigma*0.0001; cudaMalloc((void **)&d_epsilon,sizeof(double)); cudaMemcpy(d_epsilon,&epsilon,sizeof(double),cudaMemcpyHostToDevice); n=atoi(argv[2]); cudaMalloc((void **)&d_n,sizeof(int)); cudaMemcpy(d_n,&n,sizeof(int),cudaMemcpyHostToDevice); d=atoi(argv[3]); cudaMalloc((void **)&d_d,sizeof(int)); cudaMemcpy(d_d,&d,sizeof(int),cudaMemcpyHostToDevice); xtemp=(double*)malloc(n*numthreads*sizeof(double)); cudaMalloc((void **)&d_xtemp,n*numthreads*sizeof(double)); m=(double*)malloc(n*sizeof(double)); cudaMalloc((void **)&d_m,n*sizeof(double)); sum2=(double*)malloc(n*numthreads*sizeof(double)); cudaMalloc((void **)&d_sum2,n*numthreads*sizeof(double)); sum=(double*)malloc(n*sizeof(double)); cudaMalloc((void **)&d_sum,n*sizeof(double)); kmatrix=(int*)malloc(n*sizeof(int)); cudaMalloc((void **)&d_kmatrix,n*sizeof(int)); count=(int*)malloc(n*sizeof(int)); cudaMalloc((void **)&d_count,n*sizeof(int)); x=(double*)malloc(n*d*sizeof(double)); cudaMalloc((void **)&d_x,n*d*sizeof(double)); y=(double*)malloc(n*d*sizeof(double)); cudaMalloc((void **)&d_y,n*d*sizeof(double)); y2=(double*)malloc(n*d*sizeof(double)); cudaMalloc((void **)&d_y2,n*d*sizeof(double)); for (i=0;i<n;i++){ m[i]=100; for (j=0;j<numthreads;j++){ xtemp[i*numthreads+j]=0; sum2[i*numthreads+j]=0; } sum[i]=0; kmatrix[i]=0; count[i]=0; } cudaMemcpy(d_m,m,n*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_sum,sum,n*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_sum2,sum2,n*numthreads*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_kmatrix,kmatrix,n*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_xtemp,xtemp,n*numthreads*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_count,count,n*sizeof(int),cudaMemcpyHostToDevice); myfile=fopen(argv[4],"rb"); for (i=0;i<n;i++){ for (j=0;j<d;j++){ len=fread(&p,8,1,myfile); x[i*d+j]=p; y[i*d+j]=0; y2[i*d+j]=p; } } fclose(myfile); cudaMemcpy(d_x,x,n*d*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_y,y,n*d*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_y2,y2,n*d*sizeof(double),cudaMemcpyHostToDevice); /** start of the main part of the program**/ gettimeofday(&startwtime,NULL); devMain<<<n,numthreads,n*d*sizeof(double)>>>(d_m,d_kmatrix,d_x,d_y,d_y2,d_epsilon,d_sigma,d_n,d_d,d_count,d_xtemp,d_sum2,d_numthreads,d_sum); gettimeofday(&endwtime,NULL); cudaMemcpy(kmatrix,d_kmatrix,n*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(y2,d_y2,n*d*sizeof(double),cudaMemcpyDeviceToHost); printsolution(n,d,y2,kmatrix); //checkfunction(); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); printf("total time: %f secs\n",seq_time); free(x); free(y); free(y2); free(count); free(kmatrix); free (sum); free (sum2); free(m); cudaFree(d_x); cudaFree(d_y); cudaFree(d_y2); cudaFree(d_count); cudaFree(d_kmatrix); cudaFree(d_sum2); cudaFree(d_sum); cudaFree(m); } __device__ double kernel(double a, double *d_sigma){ double result; result=exp(-a/(2*(*d_sigma)*(*d_sigma))); return result; } __device__ double distance(int l,int q, double *a, double *b, int *d_d){ double sum=0; double root=0; int j; for (j=0;j<(*d_d);j++){ sum+=(a[l*(*d_d)+j]-b[q*(*d_d)+j])*(a[l*(*d_d)+j]-b[q*(*d_d)+j]); } root=sqrt(sum); return root; } void printmatrixd(int a,int b, double *matrix){ int i,j; for (i=0;i<a;i++){ for (j=0;j<b;j++){ printf("%f ",matrix[i*b+j]); } printf("\n"); } } void printmatrixi(int a,int b, int *matrix){ int i,j; for (i=0;i<a;i++){ for (j=0;j<b;j++){ printf("%d ",matrix[i*b+j]); } printf("\n"); } } void printsolution(int a, int b, double *point, int *kmatrix){ int i,j; for (i=0;i<a;i++){ for (j=0;j<b;j++){ printf("%f ",point[i*b+j]); } printf("i=%d, k=%d\n",i, kmatrix[i]); } } void checkfunction(void){ char binary[50]; FILE *myfile2; int count,i,j; double *a; double p,dist; a=(double*)malloc(n*d*sizeof(double)); printf("please give the name of the binary file \n(of double floats-8-bytes) that is a right solution\nto the mean shift problem:\n"); dist=0; count=0; scanf("%s",binary); myfile2=fopen(binary,"rb"); for (i=0;i<n;i++){ //printf("reached here\n"); for (int j=0;j<d;j++){ fread(&p,8,1,myfile); a[i*d+j]=p; } } fclose(myfile2); for (i=0;i<n;i++){ for (j=0;j<d;j++){ dist+=(a[i*d+j]-y2[i*d+j])*(a[i*d+j]-y2[i*d+j]); } if (dist>sigma*sigma/100){ //distance>sigma/10 count++; } dist=0; } free(a); printf("we have problem in %d out of %d points\n",count,n); } __global__ void devMain(double *d_m,int *d_kmatrix,double *d_x,double *d_y,double *d_y2, double *d_epsilon, double *d_sigma, int *d_n, int *d_d, int *d_count, double *d_xtemp, double *d_sum2, int *d_numthreads, double *d_sum){ int j,i; int h; int bid=blockIdx.x; int tid=threadIdx.x; int steps=(*d_n/(*d_numthreads)); extern __shared__ double sharedx[]; for (j=0;j<(*d_d);j++){ sharedx[bid*(*d_d)+j]=d_x[bid*(*d_d)+j]; } __syncthreads(); while ((d_m[bid]>*d_epsilon)&&(d_kmatrix[bid]<15)){ //d_count[bid]=0; for (j=tid*steps;j<(tid+1)*steps;j++){ //printf("j=%d\n",j); if (distance(bid,j,d_y2,d_x,d_d)<(*d_sigma)*(*d_sigma)){ d_xtemp[bid*(*d_numthreads)+tid]=kernel(distance(bid,j,d_y2,d_x,d_d),d_sigma); //d_count[bid*(*d_numthreads)+tid]++; d_sum2[bid*(*d_numthreads)+tid]+=d_xtemp[bid*(*d_numthreads)+tid]; for (h=0;h<(*d_d);h++){ for (i=0;i<(*d_numthreads);i++){ if (i==tid) //to prevent data races d_y[bid*(*d_d)+h]+=d_xtemp[bid*(*d_numthreads)+tid]*d_x[j*(*d_d)+h]; __syncthreads(); } } } } if (1==tid){ for (h=0;h<(*d_numthreads);h++){ d_sum[bid]+=d_sum2[bid*(*d_numthreads)+h]; } for (h=0;h<*d_d;h++){ d_y[bid*(*d_d)+h]/=d_sum[bid]; } d_sum[bid]=0; d_m[bid]=distance(bid,bid,d_y,d_y2,d_d); d_kmatrix[bid]++; for (h=0;h<(*d_d);h++){ d_y2[bid*(*d_d)+h]=d_y[bid*(*d_d)+h]; d_y[bid*(*d_d)+h]=0; } } __syncthreads(); d_sum2[bid*(*d_numthreads)+tid]=0; __syncthreads(); } }
95e869927fba6e19838109bcb80339abf18e7e48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" // Kernel for fast unfold+copy // Borrowed from Theano // Authors: Arjun Jain, Frdric Bastien, Jan Schlter, Nicolas Ballas __global__ void im3d2col_kernel(hipLaunchParm lp, const int n, const float* data_im, const int height, const int width, const int depth, const int kernel_h, const int kernel_w, const int kernel_d, const int pad_h, const int pad_w, const int pad_d, const int stride_h, const int stride_w, const int stride_d, const int height_col, const int width_col, const int depth_col, float* data_col) { CUDA_KERNEL_LOOP(index, n) { int d_out = index % depth_col; int w_index = index / depth_col; int w_out = w_index % width_col; int h_index = w_index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; //channel_in = 1; int channel_out = channel_in * kernel_h * kernel_w * kernel_d; int h_in = h_out * stride_h - pad_h; int w_in = w_out * stride_w - pad_w; int d_in = d_out * stride_d - pad_d; float* data_col_ptr = data_col; data_col_ptr += channel_out * (height_col * width_col * depth_col) + h_out * (width_col * depth_col) + w_out * depth_col + d_out; const float* data_im_ptr = data_im; data_im_ptr += channel_in * (height * width * depth) + h_in * (width * depth) + w_in * depth + d_in; for (int i = 0; i < kernel_h; ++i) { int h = h_in + i; for (int j = 0; j < kernel_w; ++j) { int w = w_in + j; for (int k = 0; k < kernel_d; ++k) { int d = d_in + k; *data_col_ptr = (h >= 0 && w >= 0 && d >= 0 && h < height && w < width && d < depth) ? data_im_ptr[i * (width * depth) + j *depth + k] : 0; data_col_ptr += height_col * width_col * depth_col; } } } } } void im3d2col(hipStream_t stream, const float* data_im, const int channels, const int height, const int width, const int depth, const int kernel_h, const int kernel_w, const int kernel_d, const int pad_h, const int pad_w, const int pad_d, const int stride_h, const int stride_w, const int stride_d, float* data_col) { // We are going to launch channels * height_col * width_col * depth_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int depth_col = (depth + 2 * pad_d - kernel_d) / stride_d + 1; int num_kernels = channels * height_col * width_col * depth_col; hipLaunchKernel(HIP_KERNEL_NAME(im3d2col_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im, height, width, depth, kernel_h, kernel_w, kernel_d, pad_h, pad_w, pad_d, stride_h, stride_w, stride_d, height_col, width_col, depth_col, data_col); THCudaCheck(hipGetLastError()); } __global__ void col2im3d_kernel(hipLaunchParm lp, const int n, const float* data_col, const int height, const int width, const int depth, const int channels, const int patch_h, const int patch_w, const int patch_d, const int pad_h, const int pad_w, const int pad_d, const int stride_h, const int stride_w, const int stride_d, const int height_col, const int width_col, const int depth_col, float* data_im) { CUDA_KERNEL_LOOP(index, n) { float val = 0; int d = index % depth + pad_d; int w_index = index / depth; int w = w_index % width + pad_w; int h_index = w_index / width; int h = h_index % height + pad_h; int c = h_index / height; // compute the start and end of the output int d_col_start = (d < patch_d) ? 0 : (d - patch_d) / stride_d + 1; int d_col_end = min(d / stride_d + 1, depth_col); int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; int w_col_end = min(w / stride_w + 1, width_col); int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; int h_col_end = min(h / stride_h + 1, height_col); int offset = (c * patch_h * patch_w * patch_d + h * patch_w * patch_d + w * patch_d + d) * height_col * width_col * depth_col; int coeff_h_col = (1 - stride_h * patch_w * patch_d * height_col) * width_col * depth_col; int coeff_w_col = (1 - stride_w * patch_d * height_col * width_col) * depth_col; int coeff_d_col = (1 - stride_d * height_col * width_col * depth_col); for (int d_col = d_col_start; d_col < d_col_end; ++d_col) for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col + d_col * coeff_d_col]; } } data_im[index] = val; } } void col2im3d(hipStream_t stream, const float* data_col, const int channels, const int height, const int width, const int depth, const int patch_h, const int patch_w, const int patch_d, const int pad_h, const int pad_w, const int pad_d, const int stride_h, const int stride_w, const int stride_d, float* data_im) { int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; int depth_col = (depth + 2 * pad_d - patch_d) / stride_d + 1; int num_kernels = channels * height * width * depth; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. hipLaunchKernel(HIP_KERNEL_NAME(col2im3d_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col, height, width, depth, channels, patch_h, patch_w, patch_d, pad_h, pad_w, pad_d, stride_h, stride_w, stride_d, height_col, width_col, depth_col, data_im); THCudaCheck(hipGetLastError()); } void THNN_CudaVolumetricConvolution_updateOutput( THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *finput, THCudaTensor *fgradInput, int dT, int dW, int dH, int padT, int padW, int padH) { THCudaTensor *columns = finput; THCudaTensor *ones = fgradInput; THCUNN_assertSameGPU(state, 6, input, output, weight, bias, columns, ones); THArgCheck(input->nDimension == 4 || input->nDimension == 5, 2, "4D or 5D (batch mode) tensor is expected" ); THArgCheck(weight->nDimension == 5, 4, "5D weight tensor is expected (nOutputPlane x nInputPlane x kT x kH x kW)" ); int nOutputPlane = (int)weight->size[0]; int nInputPlane = (int)weight->size[1]; int kT = (int)weight->size[2]; int kH = (int)weight->size[3]; int kW = (int)weight->size[4]; int batch = 1; if (input->nDimension == 4) { // Force batch batch = 0; THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long inputDepth = input->size[4]; long outputWidth = (inputWidth + 2*padH - kH) / dH + 1; long outputHeight = (inputHeight + 2*padT - kT) / dT + 1; long outputDepth = (inputDepth + 2*padW - kW) / dW + 1; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize5d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth, outputDepth); // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH*kT, outputDepth*outputHeight*outputWidth); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets increased, // and always contains ones. if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize3d(state, ones, outputHeight, outputWidth, outputDepth); THCudaTensor_fill(state, ones, 1); } // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *output_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, output_n, output, 0, elt); // Do Bias first: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long n_ = outputDepth * outputHeight * outputWidth; long k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 't', 'n', n_, m_, k_, 1, THCudaTensor_data(state, ones), k_, THCudaTensor_data(state, bias), k_, 0, THCudaTensor_data(state, output_n), n_ ); // Extract columns: im3d2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, inputDepth, kT, kH, kW, padT, padH, padW, dT, dH, dW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = weight->size[0]; long n = columns->size[1]; long k = weight->size[1]*weight->size[2]*weight->size[3]*weight->size[4]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 'n', 'n', n, m, k, 1, THCudaTensor_data(state, columns), n, THCudaTensor_data(state, weight), k, 1, THCudaTensor_data(state, output_n), n ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, output_n); // Resize output if (batch == 0) { THCudaTensor_resize4d(state, output, nOutputPlane, outputHeight, outputWidth, outputDepth); THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth); } } void THNN_CudaVolumetricConvolution_updateGradInput( THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, THCudaTensor *finput, int dT, int dW, int dH, int padT, int padW, int padH) { THArgCheck(weight->nDimension == 5, 4, "5D weight tensor is expected (nOutputPlane x nInputPlane x kT x kH x kW)" ); int nOutputPlane = (int)weight->size[0]; int nInputPlane = (int)weight->size[1]; int kT = (int)weight->size[2]; int kH = (int)weight->size[3]; int kW = (int)weight->size[4]; THCudaTensor *gradColumns = finput; THCUNN_assertSameGPU(state, 5, input, gradOutput, weight, gradColumns, gradInput); THArgCheck(input->nDimension == 4 || input->nDimension == 5, 2, "4D or 5D (batch mode) tensor is expected" ); int batch = 1; if (input->nDimension == 4) { // Force batch batch = 0; THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); THCudaTensor_resize5d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long inputDepth = input->size[4]; long outputWidth = (inputWidth + 2*padH - kH) / dH + 1; long outputHeight = (inputHeight + 2*padT - kT) / dT + 1; long outputDepth = (inputDepth + 2*padW - kW) / dW + 1; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize5d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth, inputDepth); // Resize temporary columns THCudaTensor_resize2d(state, gradColumns, nInputPlane*kH*kT*kW, outputDepth*outputHeight*outputWidth); // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *gradInput_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, gradInput_n, gradInput, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = weight->size[1]*weight->size[2]*weight->size[3]*weight->size[4]; long n = gradColumns->size[1]; long k = weight->size[0]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 'n', 't', n, m, k, 1, THCudaTensor_data(state, gradOutput_n), n, THCudaTensor_data(state, weight), m, 0, THCudaTensor_data(state, gradColumns), n ); // Unpack columns back into input: col2im3d( THCState_getCurrentStream(state), THCudaTensor_data(state, gradColumns), nInputPlane, inputHeight, inputWidth, inputDepth, kT, kH, kW, padT, padH, padW, dT, dH, dW, THCudaTensor_data(state, gradInput_n) ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, gradInput_n); THCudaTensor_free(state, gradOutput_n); // Resize output if (batch == 0) { THCudaTensor_resize4d(state, gradOutput, nOutputPlane, outputHeight, outputWidth, outputDepth); THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth); THCudaTensor_resize4d(state, gradInput, nInputPlane, inputHeight, inputWidth, inputDepth); } } void THNN_CudaVolumetricConvolution_accGradParameters( THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, THCudaTensor *finput, THCudaTensor *fgradInput, int dT, int dW, int dH, int padT, int padW, int padH, float scale) { THCudaTensor *columns = finput; THCudaTensor *ones = fgradInput; THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, gradBias, columns, ones); THArgCheck(gradWeight->nDimension == 5, 4, "5D gradWeight tensor is expected (nOutputPlane x nInputPlane x kT x kH x kW)" ); int nOutputPlane = (int)gradWeight->size[0]; int nInputPlane = (int)gradWeight->size[1]; int kT = (int)gradWeight->size[2]; int kH = (int)gradWeight->size[3]; int kW = (int)gradWeight->size[4]; THArgCheck( input->nDimension == 4 || input->nDimension == 5, 2, "3D or 4D (batch mode) tensor is expected" ); int batch = 1; if (input->nDimension == 4) { // Force batch batch = 0; THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); THCudaTensor_resize5d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long inputDepth = input->size[4]; long outputWidth = (inputWidth + 2*padH - kH) / dH + 1; long outputHeight = (inputHeight + 2*padT - kT) / dT + 1; long outputDepth = (inputDepth + 2*padW - kW) / dW + 1; // Batch size + input planes long batchSize = input->size[0]; // Define a buffer of ones, for bias accumulation if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize3d(state, ones, outputHeight, outputWidth, outputDepth); THCudaTensor_fill(state, ones, 1); } // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kH*kT*kW, outputDepth*outputHeight*outputWidth); // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // Extract columns: im3d2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, inputDepth, kT, kH, kW, padT, padH, padW, dT, dH, dW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = gradWeight->size[0]; long n = gradWeight->size[1]*gradWeight->size[2]*gradWeight->size[3]*gradWeight->size[4]; long k = columns->size[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 't', 'n', n, m, k, scale, THCudaTensor_data(state, columns), k, THCudaTensor_data(state, gradOutput_n), k, 1, THCudaTensor_data(state, gradWeight), n ); // Do Bias: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long k_ = outputDepth * outputHeight * outputWidth; // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) THCudaBlas_Sgemv( state, 't', k_, m_, scale, THCudaTensor_data(state, gradOutput_n), k_, THCudaTensor_data(state, ones), 1, 1, THCudaTensor_data(state, gradBias), 1 ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, gradOutput_n); // Resize if (batch == 0) { THCudaTensor_resize4d(state, gradOutput, nOutputPlane, outputHeight, outputWidth, outputDepth); THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth); } }
95e869927fba6e19838109bcb80339abf18e7e48.cu
#include "hip/hip_runtime.h" #include "THCUNN.h" #include "common.h" // Kernel for fast unfold+copy // Borrowed from Theano // Authors: Arjun Jain, Frédéric Bastien, Jan Schlüter, Nicolas Ballas __global__ void im3d2col_kernel(hipLaunchParm lp, const int n, const float* data_im, const int height, const int width, const int depth, const int kernel_h, const int kernel_w, const int kernel_d, const int pad_h, const int pad_w, const int pad_d, const int stride_h, const int stride_w, const int stride_d, const int height_col, const int width_col, const int depth_col, float* data_col) { CUDA_KERNEL_LOOP(index, n) { int d_out = index % depth_col; int w_index = index / depth_col; int w_out = w_index % width_col; int h_index = w_index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; //channel_in = 1; int channel_out = channel_in * kernel_h * kernel_w * kernel_d; int h_in = h_out * stride_h - pad_h; int w_in = w_out * stride_w - pad_w; int d_in = d_out * stride_d - pad_d; float* data_col_ptr = data_col; data_col_ptr += channel_out * (height_col * width_col * depth_col) + h_out * (width_col * depth_col) + w_out * depth_col + d_out; const float* data_im_ptr = data_im; data_im_ptr += channel_in * (height * width * depth) + h_in * (width * depth) + w_in * depth + d_in; for (int i = 0; i < kernel_h; ++i) { int h = h_in + i; for (int j = 0; j < kernel_w; ++j) { int w = w_in + j; for (int k = 0; k < kernel_d; ++k) { int d = d_in + k; *data_col_ptr = (h >= 0 && w >= 0 && d >= 0 && h < height && w < width && d < depth) ? data_im_ptr[i * (width * depth) + j *depth + k] : 0; data_col_ptr += height_col * width_col * depth_col; } } } } } void im3d2col(hipStream_t stream, const float* data_im, const int channels, const int height, const int width, const int depth, const int kernel_h, const int kernel_w, const int kernel_d, const int pad_h, const int pad_w, const int pad_d, const int stride_h, const int stride_w, const int stride_d, float* data_col) { // We are going to launch channels * height_col * width_col * depth_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int depth_col = (depth + 2 * pad_d - kernel_d) / stride_d + 1; int num_kernels = channels * height_col * width_col * depth_col; hipLaunchKernel(HIP_KERNEL_NAME(im3d2col_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im, height, width, depth, kernel_h, kernel_w, kernel_d, pad_h, pad_w, pad_d, stride_h, stride_w, stride_d, height_col, width_col, depth_col, data_col); THCudaCheck(hipGetLastError()); } __global__ void col2im3d_kernel(hipLaunchParm lp, const int n, const float* data_col, const int height, const int width, const int depth, const int channels, const int patch_h, const int patch_w, const int patch_d, const int pad_h, const int pad_w, const int pad_d, const int stride_h, const int stride_w, const int stride_d, const int height_col, const int width_col, const int depth_col, float* data_im) { CUDA_KERNEL_LOOP(index, n) { float val = 0; int d = index % depth + pad_d; int w_index = index / depth; int w = w_index % width + pad_w; int h_index = w_index / width; int h = h_index % height + pad_h; int c = h_index / height; // compute the start and end of the output int d_col_start = (d < patch_d) ? 0 : (d - patch_d) / stride_d + 1; int d_col_end = min(d / stride_d + 1, depth_col); int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1; int w_col_end = min(w / stride_w + 1, width_col); int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1; int h_col_end = min(h / stride_h + 1, height_col); int offset = (c * patch_h * patch_w * patch_d + h * patch_w * patch_d + w * patch_d + d) * height_col * width_col * depth_col; int coeff_h_col = (1 - stride_h * patch_w * patch_d * height_col) * width_col * depth_col; int coeff_w_col = (1 - stride_w * patch_d * height_col * width_col) * depth_col; int coeff_d_col = (1 - stride_d * height_col * width_col * depth_col); for (int d_col = d_col_start; d_col < d_col_end; ++d_col) for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col + d_col * coeff_d_col]; } } data_im[index] = val; } } void col2im3d(hipStream_t stream, const float* data_col, const int channels, const int height, const int width, const int depth, const int patch_h, const int patch_w, const int patch_d, const int pad_h, const int pad_w, const int pad_d, const int stride_h, const int stride_w, const int stride_d, float* data_im) { int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1; int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1; int depth_col = (depth + 2 * pad_d - patch_d) / stride_d + 1; int num_kernels = channels * height * width * depth; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. hipLaunchKernel(HIP_KERNEL_NAME(col2im3d_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col, height, width, depth, channels, patch_h, patch_w, patch_d, pad_h, pad_w, pad_d, stride_h, stride_w, stride_d, height_col, width_col, depth_col, data_im); THCudaCheck(hipGetLastError()); } void THNN_CudaVolumetricConvolution_updateOutput( THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, THCudaTensor *finput, THCudaTensor *fgradInput, int dT, int dW, int dH, int padT, int padW, int padH) { THCudaTensor *columns = finput; THCudaTensor *ones = fgradInput; THCUNN_assertSameGPU(state, 6, input, output, weight, bias, columns, ones); THArgCheck(input->nDimension == 4 || input->nDimension == 5, 2, "4D or 5D (batch mode) tensor is expected" ); THArgCheck(weight->nDimension == 5, 4, "5D weight tensor is expected (nOutputPlane x nInputPlane x kT x kH x kW)" ); int nOutputPlane = (int)weight->size[0]; int nInputPlane = (int)weight->size[1]; int kT = (int)weight->size[2]; int kH = (int)weight->size[3]; int kW = (int)weight->size[4]; int batch = 1; if (input->nDimension == 4) { // Force batch batch = 0; THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long inputDepth = input->size[4]; long outputWidth = (inputWidth + 2*padH - kH) / dH + 1; long outputHeight = (inputHeight + 2*padT - kT) / dT + 1; long outputDepth = (inputDepth + 2*padW - kW) / dW + 1; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize5d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth, outputDepth); // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH*kT, outputDepth*outputHeight*outputWidth); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets increased, // and always contains ones. if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize3d(state, ones, outputHeight, outputWidth, outputDepth); THCudaTensor_fill(state, ones, 1); } // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *output_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, output_n, output, 0, elt); // Do Bias first: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long n_ = outputDepth * outputHeight * outputWidth; long k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 't', 'n', n_, m_, k_, 1, THCudaTensor_data(state, ones), k_, THCudaTensor_data(state, bias), k_, 0, THCudaTensor_data(state, output_n), n_ ); // Extract columns: im3d2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, inputDepth, kT, kH, kW, padT, padH, padW, dT, dH, dW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = weight->size[0]; long n = columns->size[1]; long k = weight->size[1]*weight->size[2]*weight->size[3]*weight->size[4]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 'n', 'n', n, m, k, 1, THCudaTensor_data(state, columns), n, THCudaTensor_data(state, weight), k, 1, THCudaTensor_data(state, output_n), n ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, output_n); // Resize output if (batch == 0) { THCudaTensor_resize4d(state, output, nOutputPlane, outputHeight, outputWidth, outputDepth); THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth); } } void THNN_CudaVolumetricConvolution_updateGradInput( THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, THCudaTensor *finput, int dT, int dW, int dH, int padT, int padW, int padH) { THArgCheck(weight->nDimension == 5, 4, "5D weight tensor is expected (nOutputPlane x nInputPlane x kT x kH x kW)" ); int nOutputPlane = (int)weight->size[0]; int nInputPlane = (int)weight->size[1]; int kT = (int)weight->size[2]; int kH = (int)weight->size[3]; int kW = (int)weight->size[4]; THCudaTensor *gradColumns = finput; THCUNN_assertSameGPU(state, 5, input, gradOutput, weight, gradColumns, gradInput); THArgCheck(input->nDimension == 4 || input->nDimension == 5, 2, "4D or 5D (batch mode) tensor is expected" ); int batch = 1; if (input->nDimension == 4) { // Force batch batch = 0; THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); THCudaTensor_resize5d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long inputDepth = input->size[4]; long outputWidth = (inputWidth + 2*padH - kH) / dH + 1; long outputHeight = (inputHeight + 2*padT - kT) / dT + 1; long outputDepth = (inputDepth + 2*padW - kW) / dW + 1; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCudaTensor_resize5d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth, inputDepth); // Resize temporary columns THCudaTensor_resize2d(state, gradColumns, nInputPlane*kH*kT*kW, outputDepth*outputHeight*outputWidth); // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *gradInput_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, gradInput_n, gradInput, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = weight->size[1]*weight->size[2]*weight->size[3]*weight->size[4]; long n = gradColumns->size[1]; long k = weight->size[0]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 'n', 't', n, m, k, 1, THCudaTensor_data(state, gradOutput_n), n, THCudaTensor_data(state, weight), m, 0, THCudaTensor_data(state, gradColumns), n ); // Unpack columns back into input: col2im3d( THCState_getCurrentStream(state), THCudaTensor_data(state, gradColumns), nInputPlane, inputHeight, inputWidth, inputDepth, kT, kH, kW, padT, padH, padW, dT, dH, dW, THCudaTensor_data(state, gradInput_n) ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, gradInput_n); THCudaTensor_free(state, gradOutput_n); // Resize output if (batch == 0) { THCudaTensor_resize4d(state, gradOutput, nOutputPlane, outputHeight, outputWidth, outputDepth); THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth); THCudaTensor_resize4d(state, gradInput, nInputPlane, inputHeight, inputWidth, inputDepth); } } void THNN_CudaVolumetricConvolution_accGradParameters( THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, THCudaTensor *finput, THCudaTensor *fgradInput, int dT, int dW, int dH, int padT, int padW, int padH, float scale) { THCudaTensor *columns = finput; THCudaTensor *ones = fgradInput; THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, gradBias, columns, ones); THArgCheck(gradWeight->nDimension == 5, 4, "5D gradWeight tensor is expected (nOutputPlane x nInputPlane x kT x kH x kW)" ); int nOutputPlane = (int)gradWeight->size[0]; int nInputPlane = (int)gradWeight->size[1]; int kT = (int)gradWeight->size[2]; int kH = (int)gradWeight->size[3]; int kW = (int)gradWeight->size[4]; THArgCheck( input->nDimension == 4 || input->nDimension == 5, 2, "3D or 4D (batch mode) tensor is expected" ); int batch = 1; if (input->nDimension == 4) { // Force batch batch = 0; THCudaTensor_resize5d(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); THCudaTensor_resize5d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); } long inputWidth = input->size[3]; long inputHeight = input->size[2]; long inputDepth = input->size[4]; long outputWidth = (inputWidth + 2*padH - kH) / dH + 1; long outputHeight = (inputHeight + 2*padT - kT) / dT + 1; long outputDepth = (inputDepth + 2*padW - kW) / dW + 1; // Batch size + input planes long batchSize = input->size[0]; // Define a buffer of ones, for bias accumulation if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) { // Resize plane and fill with ones... THCudaTensor_resize3d(state, ones, outputHeight, outputWidth, outputDepth); THCudaTensor_fill(state, ones, 1); } // Resize temporary columns THCudaTensor_resize2d(state, columns, nInputPlane*kH*kT*kW, outputDepth*outputHeight*outputWidth); // Helpers THCudaTensor *input_n = THCudaTensor_new(state); THCudaTensor *gradOutput_n = THCudaTensor_new(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCudaTensor_select(state, input_n, input, 0, elt); THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt); // Extract columns: im3d2col( THCState_getCurrentStream(state), THCudaTensor_data(state, input_n), nInputPlane, inputHeight, inputWidth, inputDepth, kT, kH, kW, padT, padH, padW, dT, dH, dW, THCudaTensor_data(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = gradWeight->size[0]; long n = gradWeight->size[1]*gradWeight->size[2]*gradWeight->size[3]*gradWeight->size[4]; long k = columns->size[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) THCudaBlas_Sgemm( state, 't', 'n', n, m, k, scale, THCudaTensor_data(state, columns), k, THCudaTensor_data(state, gradOutput_n), k, 1, THCudaTensor_data(state, gradWeight), n ); // Do Bias: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long k_ = outputDepth * outputHeight * outputWidth; // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) THCudaBlas_Sgemv( state, 't', k_, m_, scale, THCudaTensor_data(state, gradOutput_n), k_, THCudaTensor_data(state, ones), 1, 1, THCudaTensor_data(state, gradBias), 1 ); } // Free THCudaTensor_free(state, input_n); THCudaTensor_free(state, gradOutput_n); // Resize if (batch == 0) { THCudaTensor_resize4d(state, gradOutput, nOutputPlane, outputHeight, outputWidth, outputDepth); THCudaTensor_resize4d(state, input, nInputPlane, inputHeight, inputWidth, inputDepth); } }
6bf95a22a6ece734acf5c234ebff8204e5d9caa9.hip
// !!! This is a file automatically generated by hipify!!! #include "Convergence_GPU_ship.hpp" #include "kernel_GPU_ship.cuh" #include "hip/hip_runtime.h" inline bool CUDA_MALLOC( void ** devPtr, size_t size ) { hipError_t cudaStatus; cudaStatus = hipMalloc( devPtr, size ); if ( cudaStatus != hipSuccess ) { printf( "error: unable to allocate buffer\n"); return false; } return true; } inline bool CUDA_MEMCPY( void * dst, const void * src, size_t count, enum hipMemcpyKind kind ) { hipError_t cudaStatus; cudaStatus = hipMemcpy( dst, src, count, kind ); if ( cudaStatus != hipSuccess ) { printf( "error: unable to copy buffer\n"); return false; } return true; } Convergence_GPU_ship::Convergence_GPU_ship() : Convergence("GPU_double_ship") { } Convergence_GPU_ship::Convergence_GPU_ship(ColorMap* _colors, int _max_iters) : Convergence("GPU_double_ship") { colors = _colors; max_iters = _max_iters; hostTab = nullptr; deviceTab = nullptr; hipError_t cudaStatus; cudaStatus = hipSetDevice(0); if ( cudaStatus != hipSuccess ) { printf( "error: unable to setup cuda device\n"); exit(0); } } Convergence_GPU_ship::~Convergence_GPU_ship( ){ hipError_t cudaStatus = hipDeviceReset(); free(hostTab); free(deviceTab); } void Convergence_GPU_ship::updateImage(const long double _zoom, const long double _offsetX, const long double _offsetY, const int IMAGE_WIDTH, const int IMAGE_HEIGHT, sf::Image& image) { int nb_point = IMAGE_WIDTH*IMAGE_HEIGHT; dim3 grid(80,50,1); //nbr bloc dim3 block(16,16,1); //nbr threads if(hostTab == nullptr) hostTab = new uint32_t[nb_point]; if(deviceTab == nullptr) CUDA_MALLOC((void**)&deviceTab, nb_point * sizeof(uint32_t)); double offsetX = _offsetX; double offsetY = _offsetX; double zoom = _zoom; hipLaunchKernelGGL(( kernel_updateImage_GPU_ship), dim3(grid), dim3(block), 0, 0, zoom, offsetX, offsetY, IMAGE_WIDTH, IMAGE_HEIGHT, deviceTab, max_iters); CUDA_MEMCPY(hostTab, deviceTab, nb_point*sizeof(uint32_t), hipMemcpyDeviceToHost); for(int y = 0; y < IMAGE_HEIGHT; y++) { for(int x = 0; x < IMAGE_WIDTH; x++) { image.setPixel(x, y, colors->getColor(hostTab[x+y*IMAGE_WIDTH])); } } }
6bf95a22a6ece734acf5c234ebff8204e5d9caa9.cu
#include "Convergence_GPU_ship.hpp" #include "kernel_GPU_ship.cuh" #include "cuda_runtime.h" inline bool CUDA_MALLOC( void ** devPtr, size_t size ) { cudaError_t cudaStatus; cudaStatus = cudaMalloc( devPtr, size ); if ( cudaStatus != cudaSuccess ) { printf( "error: unable to allocate buffer\n"); return false; } return true; } inline bool CUDA_MEMCPY( void * dst, const void * src, size_t count, enum cudaMemcpyKind kind ) { cudaError_t cudaStatus; cudaStatus = cudaMemcpy( dst, src, count, kind ); if ( cudaStatus != cudaSuccess ) { printf( "error: unable to copy buffer\n"); return false; } return true; } Convergence_GPU_ship::Convergence_GPU_ship() : Convergence("GPU_double_ship") { } Convergence_GPU_ship::Convergence_GPU_ship(ColorMap* _colors, int _max_iters) : Convergence("GPU_double_ship") { colors = _colors; max_iters = _max_iters; hostTab = nullptr; deviceTab = nullptr; cudaError_t cudaStatus; cudaStatus = cudaSetDevice(0); if ( cudaStatus != cudaSuccess ) { printf( "error: unable to setup cuda device\n"); exit(0); } } Convergence_GPU_ship::~Convergence_GPU_ship( ){ cudaError_t cudaStatus = cudaDeviceReset(); free(hostTab); free(deviceTab); } void Convergence_GPU_ship::updateImage(const long double _zoom, const long double _offsetX, const long double _offsetY, const int IMAGE_WIDTH, const int IMAGE_HEIGHT, sf::Image& image) { int nb_point = IMAGE_WIDTH*IMAGE_HEIGHT; dim3 grid(80,50,1); //nbr bloc dim3 block(16,16,1); //nbr threads if(hostTab == nullptr) hostTab = new uint32_t[nb_point]; if(deviceTab == nullptr) CUDA_MALLOC((void**)&deviceTab, nb_point * sizeof(uint32_t)); double offsetX = _offsetX; double offsetY = _offsetX; double zoom = _zoom; kernel_updateImage_GPU_ship<<<grid, block>>>(zoom, offsetX, offsetY, IMAGE_WIDTH, IMAGE_HEIGHT, deviceTab, max_iters); CUDA_MEMCPY(hostTab, deviceTab, nb_point*sizeof(uint32_t), cudaMemcpyDeviceToHost); for(int y = 0; y < IMAGE_HEIGHT; y++) { for(int x = 0; x < IMAGE_WIDTH; x++) { image.setPixel(x, y, colors->getColor(hostTab[x+y*IMAGE_WIDTH])); } } }
7057fbf3f04e0d5463390b61ea1c219de86a32ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop MAGMA_D_CONJ. dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare. @generated from magmablas/zhemv_mgpu_upper.cu, normal z -> d, Wed Jan 2 14:18:51 2019 @author Mark Gates */ #include "magma_internal.h" #include "commonblas_d.h" #define PRECISION_d #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /***************************************************************************//** Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. *******************************************************************************/ __global__ void dsymv_kernel_U_mgpu( int n, double const * __restrict__ A, int lda, double const * __restrict__ x, int incx, double * __restrict__ work, int my_gpu_id, int ngpu, int block_offset) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); double psum, psum_t; double total = MAGMA_D_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ double sx_blk[NB_X]; // for x[ blk ] __shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag double rA[4]; double psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( (partial && tx >= partial) || (blk == 0 && tx < block_offset) ) { sx_blk[tx] = MAGMA_D_ZERO; } else { sx_blk[tx] = x[0]; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) if ( blk % ngpu == my_gpu_id ) { // this GPU owns this diagonal block, so // move to 32x32 diag block A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += MAGMA_D_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A -= half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2) A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2) } // finish switching thread offset A -= ty2*lda + tx2; // A is A(blk_ind, 0) A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty) int next = blk + (my_gpu_id + ngpu - 1 - blk % ngpu) % ngpu + 1; A += (next/ngpu)*NB_X*lda; // A is A(blk_ind + tx, next*NB_X + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for (int jj=next; jj < gridDim.x; jj += ngpu) { partial = (jj == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj // block is right of diagonal, so don't need to worry about offset here if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_D_ZERO; } } __syncthreads(); for (int k=0; k < 4; k++) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for (int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_D_ZERO; } } } else { #pragma unroll for (int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for (int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for (int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; //MAGMA_D_MAKE( tx4, blk ); // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; //MAGMA_D_MAKE( tx, blk ); // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end dsymv_kernel_U_mgpu /***************************************************************************//** Upper case, sum up partial results per GPU. Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] Note beta*y is not included here; see magmablas_dsymv_mgpu_sync. The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks: [ * ] blk=0 * data for non-transposed row w_blk = A_{blk,1:nblock} * x_{blk:nblock} work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk} [ x x * ] blk=2 blanks are not set [ * ] blk=3 [ x x x x * ] blk=4 [ * ] blk=0 work[gpu=1] = [ x * ] blk=1 [ * ] blk=2 [ x x x * ] blk=3 [ * ] blk=4 On output, rows across are summed up. Entries right of the diagonal blocks are not accessed. There are no blank lines; work has been set to 0 if a GPU has no data to contribute. [ * ] y[gpu=0] = [ * ] [ x + x + * ] [ * ] [ x + x + x + x + * ] [ * ] y[gpu=1] = [ x + * ] [ * ] [ x + x + x + * ] [ * ] *******************************************************************************/ __global__ void dsymv_kernel_U_mgpu_sum( int n, double alpha, int lda, double * __restrict__ y, int incy, double const * __restrict__ work, int my_gpu_id, int ngpu, int block_offset) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [block_offset, ..., n+block_offset) if ( ind >= block_offset && ind < n+block_offset ) { double Ax = MAGMA_D_ZERO; work += ind; // if this GPU owns block-column blk, all blocks j=[0, ..., blk] contain data; // else only block j=blk contains data. int first = 0; if ( blk % ngpu != my_gpu_id ) { first = blk; } for (int j = first; j <= blk; ++j) { Ax += work[j*lda]; } y[ind * incy] = alpha*Ax; // see magmablas_dsymv_sync for beta*y } } // end dsymv_kernel_L_mgpu_sum
7057fbf3f04e0d5463390b61ea1c219de86a32ad.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop MAGMA_D_CONJ. dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare. @generated from magmablas/zhemv_mgpu_upper.cu, normal z -> d, Wed Jan 2 14:18:51 2019 @author Mark Gates */ #include "magma_internal.h" #include "commonblas_d.h" #define PRECISION_d #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /***************************************************************************//** Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. *******************************************************************************/ __global__ void dsymv_kernel_U_mgpu( int n, double const * __restrict__ A, int lda, double const * __restrict__ x, int incx, double * __restrict__ work, int my_gpu_id, int ngpu, int block_offset) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); double psum, psum_t; double total = MAGMA_D_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ double sx_blk[NB_X]; // for x[ blk ] __shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag double rA[4]; double psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( (partial && tx >= partial) || (blk == 0 && tx < block_offset) ) { sx_blk[tx] = MAGMA_D_ZERO; } else { sx_blk[tx] = x[0]; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) if ( blk % ngpu == my_gpu_id ) { // this GPU owns this diagonal block, so // move to 32x32 diag block A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += MAGMA_D_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A -= half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2) A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2) } // finish switching thread offset A -= ty2*lda + tx2; // A is A(blk_ind, 0) A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty) int next = blk + (my_gpu_id + ngpu - 1 - blk % ngpu) % ngpu + 1; A += (next/ngpu)*NB_X*lda; // A is A(blk_ind + tx, next*NB_X + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for (int jj=next; jj < gridDim.x; jj += ngpu) { partial = (jj == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj // block is right of diagonal, so don't need to worry about offset here if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_D_ZERO; } } __syncthreads(); for (int k=0; k < 4; k++) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for (int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_D_ZERO; } } } else { #pragma unroll for (int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for (int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for (int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; //MAGMA_D_MAKE( tx4, blk ); // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; //MAGMA_D_MAKE( tx, blk ); // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end dsymv_kernel_U_mgpu /***************************************************************************//** Upper case, sum up partial results per GPU. Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] Note beta*y is not included here; see magmablas_dsymv_mgpu_sync. The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks: [ * ] blk=0 * data for non-transposed row w_blk = A_{blk,1:nblock} * x_{blk:nblock} work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk} [ x x * ] blk=2 blanks are not set [ * ] blk=3 [ x x x x * ] blk=4 [ * ] blk=0 work[gpu=1] = [ x * ] blk=1 [ * ] blk=2 [ x x x * ] blk=3 [ * ] blk=4 On output, rows across are summed up. Entries right of the diagonal blocks are not accessed. There are no blank lines; work has been set to 0 if a GPU has no data to contribute. [ * ] y[gpu=0] = [ * ] [ x + x + * ] [ * ] [ x + x + x + x + * ] [ * ] y[gpu=1] = [ x + * ] [ * ] [ x + x + x + * ] [ * ] *******************************************************************************/ __global__ void dsymv_kernel_U_mgpu_sum( int n, double alpha, int lda, double * __restrict__ y, int incy, double const * __restrict__ work, int my_gpu_id, int ngpu, int block_offset) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [block_offset, ..., n+block_offset) if ( ind >= block_offset && ind < n+block_offset ) { double Ax = MAGMA_D_ZERO; work += ind; // if this GPU owns block-column blk, all blocks j=[0, ..., blk] contain data; // else only block j=blk contains data. int first = 0; if ( blk % ngpu != my_gpu_id ) { first = blk; } for (int j = first; j <= blk; ++j) { Ax += work[j*lda]; } y[ind * incy] = alpha*Ax; // see magmablas_dsymv_sync for beta*y } } // end dsymv_kernel_L_mgpu_sum
f00dd685456a18b995bb2b4c49ec625d399b733e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <GL/gl.h> #include <GL/glut.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <vector> #include <iostream> #include <fstream> #include <string.h> #include <assert.h> #include <map> #define WIDTH 2000 #define HEIGHT 2000 #define TRIANGLES 10000 #define BUNNYHEIGHT 400 #define BUNNYWIDTH 400 using namespace std; extern float image[WIDTH][HEIGHT][3]; float parImage[WIDTH][HEIGHT][3]; GLfloat light_diffuse[] = {0.5, 0.5, 0.5, 1.0}; /* Red diffuse light. */ GLfloat light_position[] = {-0.2, 1.0, -1, 0.0}; /* Infinite light location. */ typedef struct Tri { float x1, y1, z1, x2, y2, z2, x3, y3, z3; float x1_2d, y1_2d, x2_2d, y2_2d, x3_2d, y3_2d; float bboxLeft, bboxTop, bboxWidth, bboxHeight; float R1, G1, B1, R2, G2, B2, R3, G3, B3; float normalX, normalY, normalZ; } Tri; extern void Parse(int type); extern Tri Triangles[TRIANGLES]; Tri *cudaTri; float *Depthbuffer; float *img; int *TriIndex; void imageWrite() { int x, y, w = WIDTH, h = HEIGHT, r, g, b; FILE *f; unsigned char *img = NULL; //int yres = HEIGHT; int filesize = 54 + 3*w*h; //w is your image width, h is image height, both int if( img ) free( img ); img = (unsigned char *)malloc(3*w*h); memset(img,0,sizeof(img)); int i = 0; for(i=0; i<w; i++) { for(int j=0; j<h; j++) { x=i; y = j; r = (int)(image[i][j][0]*255); g = (int)(image[i][j][1]*255); b = (int)(image[i][j][2]*255); if (r > 255) r=255; if (g > 255) g=255; if (b > 255) b=255; img[(x+y*w)*3+2] = (unsigned char)(r); img[(x+y*w)*3+1] = (unsigned char)(g); img[(x+y*w)*3+0] = (unsigned char)(b); } } unsigned char bmpfileheader[14] = {'B','M', 0,0,0,0, 0,0, 0,0, 54,0,0,0}; unsigned char bmpinfoheader[40] = {40,0,0,0, 0,0,0,0, 0,0,0,0, 1,0, 24,0}; unsigned char bmppad[3] = {0,0,0}; bmpfileheader[ 2] = (unsigned char)(filesize ); bmpfileheader[ 3] = (unsigned char)(filesize>> 8); bmpfileheader[ 4] = (unsigned char)(filesize>>16); bmpfileheader[ 5] = (unsigned char)(filesize>>24); bmpinfoheader[ 4] = (unsigned char)( w ); bmpinfoheader[ 5] = (unsigned char)( w>> 8); bmpinfoheader[ 6] = (unsigned char)( w>>16); bmpinfoheader[ 7] = (unsigned char)( w>>24); bmpinfoheader[ 8] = (unsigned char)( h ); bmpinfoheader[ 9] = (unsigned char)( h>> 8); bmpinfoheader[10] = (unsigned char)( h>>16); bmpinfoheader[11] = (unsigned char)( h>>24); f = fopen("img.bmp","wb"); fwrite(bmpfileheader,1,14,f); fwrite(bmpinfoheader,1,40,f); for(i=0; i<h; i++) { fwrite(img+(w*(h-i-1)*3),3,w,f); fwrite(bmppad,1,(4-(w*3)%4)%4,f); } fclose(f); } __forceinline__ __device__ float computeColor(float x, float y, float z, float diffuseColor) { //Color of light float lightPosX = 3; float lightPosY = 15.0; float lightPosZ = 13.0; float size = sqrt(lightPosX * lightPosX + lightPosY * lightPosY + lightPosZ + lightPosZ); lightPosX /= size; lightPosY /= size; lightPosZ /= size; size = sqrt(x * x + y * y + z * z); x /= size; y /= size; z /= size; float diffuseIntensity = 0.75; float diffuseLight = diffuseIntensity * (x * lightPosX + y * lightPosY + z * lightPosZ); float ambientLight = 0; float color = (diffuseLight + ambientLight) * diffuseColor; return color; } __global__ void Rasterizer(Tri *cudaTri, float *img, float *Depthbuffer, int triCounter) { int i = blockIdx.x * 512 + threadIdx.x; if(i > triCounter) return; float xa = cudaTri[i].x1_2d; float xb = cudaTri[i].x2_2d; float xc = cudaTri[i].x3_2d; float ya = cudaTri[i].y1_2d; float yb = cudaTri[i].y2_2d; float yc = cudaTri[i].y3_2d; for(int j = cudaTri[i].bboxLeft; j <= cudaTri[i].bboxLeft + cudaTri[i].bboxWidth; ++j) { for(int k = cudaTri[i].bboxTop; k <= cudaTri[i].bboxTop + cudaTri[i].bboxHeight; ++k) { float x = (float)j; float y = (float)k; float beta = (((xa-xc) * (y - yc)) - ((x-xc) * (ya - yc))) / (((xb - xa) * (yc - ya)) - ((xc - xa) * (yb-ya))); float gamma = (((xb-xa) * (y - ya)) - ((x-xa) * (yb - ya))) / (((xb - xa) * (yc - ya)) - ((xc - xa) * (yb-ya))); float alpha = 1 - beta - gamma; if(alpha >= 0 && alpha <= 1 && beta >= 0 && beta <= 1 && gamma >= 0 && gamma <= 1) { float depthP = alpha * cudaTri[i].z1 + beta * cudaTri[i].z2 + gamma * cudaTri[i].z3; // 1 is camera z position float distancefromeye = 1 - depthP; if(distancefromeye <= Depthbuffer[(j * HEIGHT) + k]) { float R = computeColor(cudaTri[i].x1, cudaTri[i].y1, cudaTri[i].z1, cudaTri[i].R1) * alpha + computeColor(cudaTri[i].x2, cudaTri[i].y2, cudaTri[i].z2, cudaTri[i].R2) * beta + computeColor(cudaTri[i].x3, cudaTri[i].y3, cudaTri[i].z3, cudaTri[i].R3) * gamma; float G = computeColor(cudaTri[i].x1, cudaTri[i].y1, cudaTri[i].z1, cudaTri[i].G1) * alpha + computeColor(cudaTri[i].x2, cudaTri[i].y2, cudaTri[i].z2, cudaTri[i].G2) * beta + computeColor(cudaTri[i].x3, cudaTri[i].y3, cudaTri[i].z3, cudaTri[i].G3) * gamma; float B = computeColor(cudaTri[i].x1, cudaTri[i].y1, cudaTri[i].z1, cudaTri[i].B1) * alpha + computeColor(cudaTri[i].x2, cudaTri[i].y2, cudaTri[i].z2, cudaTri[i].B2) * beta + computeColor(cudaTri[i].x3, cudaTri[i].y3, cudaTri[i].z3, cudaTri[i].B3) * gamma; for(int p = 0; p < 5; p++) { for(int l = 0; l < 5; l++) { img[((j + (p*BUNNYWIDTH)) * HEIGHT) + (k + (l*BUNNYHEIGHT))] = R; img[((j + (p*BUNNYWIDTH)) * HEIGHT) + (k + (l*BUNNYHEIGHT)) + (WIDTH * HEIGHT)] = G; img[((j + (p*BUNNYWIDTH)) * HEIGHT) + (k + (l*BUNNYHEIGHT)) + 2 * (WIDTH * HEIGHT)] = B; } } Depthbuffer[(j * HEIGHT) + k] = distancefromeye; } } } } __syncthreads(); } float *img1; int type = 0; // 0 for sequential, 1 for parralelized int main (int argc, char **argv) { Parse(type); dim3 grid, block; grid.x = (TRIANGLES / 512) + (TRIANGLES % 512 ? 1 : 0); block.x = 512; img1 = (float*)malloc(sizeof(float) * WIDTH * HEIGHT * 3); int counter = TRIANGLES; float* zDepth; zDepth = (float*)malloc(sizeof(float) * WIDTH * HEIGHT); for(int i = 0; i < WIDTH; i++) for(int j = 0; j < HEIGHT; j++) zDepth[i * WIDTH + j] = 1000000; hipMalloc(&cudaTri, sizeof(Tri) * TRIANGLES); hipMalloc(&Depthbuffer, sizeof(float) * WIDTH * HEIGHT); hipMalloc(&img, sizeof(float) * WIDTH * HEIGHT * 3); hipMemcpy(cudaTri, Triangles, sizeof(Tri) * TRIANGLES, hipMemcpyHostToDevice); hipMemcpy(Depthbuffer, zDepth, sizeof(float) * WIDTH * HEIGHT , hipMemcpyHostToDevice); hipLaunchKernelGGL(( Rasterizer), dim3(grid), dim3(block), 0, 0, cudaTri, img, Depthbuffer, counter); hipMemcpy(img1, img, sizeof(float) * WIDTH * HEIGHT * 3, hipMemcpyDeviceToHost); hipFree(cudaTri); hipFree(Depthbuffer); hipFree(img); if(type) { for(int i = 0; i < WIDTH; i++) { for(int j = 0; j < HEIGHT; j++) { image[i][j][0] = img1[i * WIDTH + j]; image[i][j][1] = img1[i * WIDTH + j + 1 * (WIDTH * HEIGHT)]; image[i][j][2] = img1[i * WIDTH + j + 2 * (WIDTH * HEIGHT)]; } } } imageWrite(); return 0; }
f00dd685456a18b995bb2b4c49ec625d399b733e.cu
#include <GL/gl.h> #include <GL/glut.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <vector> #include <iostream> #include <fstream> #include <string.h> #include <assert.h> #include <map> #define WIDTH 2000 #define HEIGHT 2000 #define TRIANGLES 10000 #define BUNNYHEIGHT 400 #define BUNNYWIDTH 400 using namespace std; extern float image[WIDTH][HEIGHT][3]; float parImage[WIDTH][HEIGHT][3]; GLfloat light_diffuse[] = {0.5, 0.5, 0.5, 1.0}; /* Red diffuse light. */ GLfloat light_position[] = {-0.2, 1.0, -1, 0.0}; /* Infinite light location. */ typedef struct Tri { float x1, y1, z1, x2, y2, z2, x3, y3, z3; float x1_2d, y1_2d, x2_2d, y2_2d, x3_2d, y3_2d; float bboxLeft, bboxTop, bboxWidth, bboxHeight; float R1, G1, B1, R2, G2, B2, R3, G3, B3; float normalX, normalY, normalZ; } Tri; extern void Parse(int type); extern Tri Triangles[TRIANGLES]; Tri *cudaTri; float *Depthbuffer; float *img; int *TriIndex; void imageWrite() { int x, y, w = WIDTH, h = HEIGHT, r, g, b; FILE *f; unsigned char *img = NULL; //int yres = HEIGHT; int filesize = 54 + 3*w*h; //w is your image width, h is image height, both int if( img ) free( img ); img = (unsigned char *)malloc(3*w*h); memset(img,0,sizeof(img)); int i = 0; for(i=0; i<w; i++) { for(int j=0; j<h; j++) { x=i; y = j; r = (int)(image[i][j][0]*255); g = (int)(image[i][j][1]*255); b = (int)(image[i][j][2]*255); if (r > 255) r=255; if (g > 255) g=255; if (b > 255) b=255; img[(x+y*w)*3+2] = (unsigned char)(r); img[(x+y*w)*3+1] = (unsigned char)(g); img[(x+y*w)*3+0] = (unsigned char)(b); } } unsigned char bmpfileheader[14] = {'B','M', 0,0,0,0, 0,0, 0,0, 54,0,0,0}; unsigned char bmpinfoheader[40] = {40,0,0,0, 0,0,0,0, 0,0,0,0, 1,0, 24,0}; unsigned char bmppad[3] = {0,0,0}; bmpfileheader[ 2] = (unsigned char)(filesize ); bmpfileheader[ 3] = (unsigned char)(filesize>> 8); bmpfileheader[ 4] = (unsigned char)(filesize>>16); bmpfileheader[ 5] = (unsigned char)(filesize>>24); bmpinfoheader[ 4] = (unsigned char)( w ); bmpinfoheader[ 5] = (unsigned char)( w>> 8); bmpinfoheader[ 6] = (unsigned char)( w>>16); bmpinfoheader[ 7] = (unsigned char)( w>>24); bmpinfoheader[ 8] = (unsigned char)( h ); bmpinfoheader[ 9] = (unsigned char)( h>> 8); bmpinfoheader[10] = (unsigned char)( h>>16); bmpinfoheader[11] = (unsigned char)( h>>24); f = fopen("img.bmp","wb"); fwrite(bmpfileheader,1,14,f); fwrite(bmpinfoheader,1,40,f); for(i=0; i<h; i++) { fwrite(img+(w*(h-i-1)*3),3,w,f); fwrite(bmppad,1,(4-(w*3)%4)%4,f); } fclose(f); } __forceinline__ __device__ float computeColor(float x, float y, float z, float diffuseColor) { //Color of light float lightPosX = 3; float lightPosY = 15.0; float lightPosZ = 13.0; float size = sqrt(lightPosX * lightPosX + lightPosY * lightPosY + lightPosZ + lightPosZ); lightPosX /= size; lightPosY /= size; lightPosZ /= size; size = sqrt(x * x + y * y + z * z); x /= size; y /= size; z /= size; float diffuseIntensity = 0.75; float diffuseLight = diffuseIntensity * (x * lightPosX + y * lightPosY + z * lightPosZ); float ambientLight = 0; float color = (diffuseLight + ambientLight) * diffuseColor; return color; } __global__ void Rasterizer(Tri *cudaTri, float *img, float *Depthbuffer, int triCounter) { int i = blockIdx.x * 512 + threadIdx.x; if(i > triCounter) return; float xa = cudaTri[i].x1_2d; float xb = cudaTri[i].x2_2d; float xc = cudaTri[i].x3_2d; float ya = cudaTri[i].y1_2d; float yb = cudaTri[i].y2_2d; float yc = cudaTri[i].y3_2d; for(int j = cudaTri[i].bboxLeft; j <= cudaTri[i].bboxLeft + cudaTri[i].bboxWidth; ++j) { for(int k = cudaTri[i].bboxTop; k <= cudaTri[i].bboxTop + cudaTri[i].bboxHeight; ++k) { float x = (float)j; float y = (float)k; float beta = (((xa-xc) * (y - yc)) - ((x-xc) * (ya - yc))) / (((xb - xa) * (yc - ya)) - ((xc - xa) * (yb-ya))); float gamma = (((xb-xa) * (y - ya)) - ((x-xa) * (yb - ya))) / (((xb - xa) * (yc - ya)) - ((xc - xa) * (yb-ya))); float alpha = 1 - beta - gamma; if(alpha >= 0 && alpha <= 1 && beta >= 0 && beta <= 1 && gamma >= 0 && gamma <= 1) { float depthP = alpha * cudaTri[i].z1 + beta * cudaTri[i].z2 + gamma * cudaTri[i].z3; // 1 is camera z position float distancefromeye = 1 - depthP; if(distancefromeye <= Depthbuffer[(j * HEIGHT) + k]) { float R = computeColor(cudaTri[i].x1, cudaTri[i].y1, cudaTri[i].z1, cudaTri[i].R1) * alpha + computeColor(cudaTri[i].x2, cudaTri[i].y2, cudaTri[i].z2, cudaTri[i].R2) * beta + computeColor(cudaTri[i].x3, cudaTri[i].y3, cudaTri[i].z3, cudaTri[i].R3) * gamma; float G = computeColor(cudaTri[i].x1, cudaTri[i].y1, cudaTri[i].z1, cudaTri[i].G1) * alpha + computeColor(cudaTri[i].x2, cudaTri[i].y2, cudaTri[i].z2, cudaTri[i].G2) * beta + computeColor(cudaTri[i].x3, cudaTri[i].y3, cudaTri[i].z3, cudaTri[i].G3) * gamma; float B = computeColor(cudaTri[i].x1, cudaTri[i].y1, cudaTri[i].z1, cudaTri[i].B1) * alpha + computeColor(cudaTri[i].x2, cudaTri[i].y2, cudaTri[i].z2, cudaTri[i].B2) * beta + computeColor(cudaTri[i].x3, cudaTri[i].y3, cudaTri[i].z3, cudaTri[i].B3) * gamma; for(int p = 0; p < 5; p++) { for(int l = 0; l < 5; l++) { img[((j + (p*BUNNYWIDTH)) * HEIGHT) + (k + (l*BUNNYHEIGHT))] = R; img[((j + (p*BUNNYWIDTH)) * HEIGHT) + (k + (l*BUNNYHEIGHT)) + (WIDTH * HEIGHT)] = G; img[((j + (p*BUNNYWIDTH)) * HEIGHT) + (k + (l*BUNNYHEIGHT)) + 2 * (WIDTH * HEIGHT)] = B; } } Depthbuffer[(j * HEIGHT) + k] = distancefromeye; } } } } __syncthreads(); } float *img1; int type = 0; // 0 for sequential, 1 for parralelized int main (int argc, char **argv) { Parse(type); dim3 grid, block; grid.x = (TRIANGLES / 512) + (TRIANGLES % 512 ? 1 : 0); block.x = 512; img1 = (float*)malloc(sizeof(float) * WIDTH * HEIGHT * 3); int counter = TRIANGLES; float* zDepth; zDepth = (float*)malloc(sizeof(float) * WIDTH * HEIGHT); for(int i = 0; i < WIDTH; i++) for(int j = 0; j < HEIGHT; j++) zDepth[i * WIDTH + j] = 1000000; cudaMalloc(&cudaTri, sizeof(Tri) * TRIANGLES); cudaMalloc(&Depthbuffer, sizeof(float) * WIDTH * HEIGHT); cudaMalloc(&img, sizeof(float) * WIDTH * HEIGHT * 3); cudaMemcpy(cudaTri, Triangles, sizeof(Tri) * TRIANGLES, cudaMemcpyHostToDevice); cudaMemcpy(Depthbuffer, zDepth, sizeof(float) * WIDTH * HEIGHT , cudaMemcpyHostToDevice); Rasterizer<<<grid, block>>>(cudaTri, img, Depthbuffer, counter); cudaMemcpy(img1, img, sizeof(float) * WIDTH * HEIGHT * 3, cudaMemcpyDeviceToHost); cudaFree(cudaTri); cudaFree(Depthbuffer); cudaFree(img); if(type) { for(int i = 0; i < WIDTH; i++) { for(int j = 0; j < HEIGHT; j++) { image[i][j][0] = img1[i * WIDTH + j]; image[i][j][1] = img1[i * WIDTH + j + 1 * (WIDTH * HEIGHT)]; image[i][j][2] = img1[i * WIDTH + j + 2 * (WIDTH * HEIGHT)]; } } } imageWrite(); return 0; }
06162d79c9d753976fa6a785837776daf8d3f88c.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include <algorithm> #include <string> #include <sstream> #include <stdint.h> #include <fstream> #include <vector> #include "../src/protein_type.h" #include "../src/alphabet_coder.h" #include "../src/score_matrix_reader.h" #include "../src/score_matrix.h" #include "../src/reduced_alphabet_file_reader.h" #include "../src/reduced_alphabet_coder.h" #include "../src/reduced_alphabet_variable_hash_function.h" #include "../src/distance_calculator_gpu.h" #include "../src/cuda_common.h" #include "../src/aligner_gpu_data.h" #include <thrust/device_vector.h> #include <thrust/copy.h> using namespace std; class DistanceCalculatorGpuTest: public ::testing::Test { protected: virtual void SetUp() { ProteinType protein_type; const std::string reduced_alphabet = "A KR EDNQ C G H ILVM FYW P ST"; std::istringstream in(reduced_alphabet); std::vector<std::string> alphabet_sets; ReducedAlphabetFileReader reduced_alphabet_reader; reduced_alphabet_reader.Read(in, alphabet_sets); coder_ = AlphabetCoder(protein_type); ReducedAlphabetCoder reduced_alphabet_coder(protein_type, alphabet_sets); AlphabetCoder::Code max_code = coder_.GetMaxRegularLetterCode(); reduced_code_map_.resize(max_code + 1); for (AlphabetCoder::Code code = coder_.GetMinCode(); code <= max_code; ++code) { char c = coder_.Decode(code); AlphabetCoder::Code reduced_code = reduced_alphabet_coder.Encode(c); reduced_code_map_[code] = reduced_code; } max_code_ = reduced_alphabet_coder.GetMaxRegularLetterCode(); max_length_ = 2; } virtual void TearDown() { } AlphabetCoder::Code max_code_; uint32_t max_length_; int score_threshold_; std::vector<AlphabetCoder::Code> reduced_code_map_; std::vector<int> code_score_; AlphabetCoder coder_; }; TEST_F(DistanceCalculatorGpuTest, CalculateDistances) { size_t subsequence_length = 4; string sequence0 = "IMEHQSIMEHQS"; vector<AlphabetCoder::Code> coded_sequence0(sequence0.length()); coder_.Encode(&sequence0[0], sequence0.length(), &coded_sequence0[0]); string sequence1 = "LMEHQALMEHQA"; vector<AlphabetCoder::Code> coded_sequence1(sequence1.length()); coder_.Encode(&sequence1[0], sequence1.length(), &coded_sequence1[0]); AlignerGpuData gpu_data; gpu_data.SetGpuQueriesSequence(&coded_sequence0[0], coded_sequence0.size()); gpu_data.SetGpuDatabaseSequence(&coded_sequence1[0], coded_sequence1.size()); gpu_data.SetGpuReducedCodeMap(&reduced_code_map_[0], reduced_code_map_.size()); thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > sequence0_positions; thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > sequence1_positions; thrust::host_vector<DistanceCalculator::Distance, thrust::hip::experimental::pinned_allocator< DistanceCalculator::Distance> > distances(3, 0); sequence0_positions.push_back(2); sequence1_positions.push_back(2); sequence0_positions.push_back(6); sequence1_positions.push_back(6); sequence0_positions.push_back(10); sequence1_positions.push_back(10); DistanceCalculatorGpu distance_calculator; hipStream_t stream; thrust::device_vector<uint32_t> d_sequence0_positions( sequence0_positions.size()); thrust::device_vector<uint32_t> d_sequence1_positions( sequence1_positions.size()); thrust::device_vector<DistanceCalculatorGpu::Distance> d_distances(sequence0_positions.size()); hipStreamCreate(&stream); distance_calculator.SetQueries(gpu_data.GetGpuQueriesSequence()); distance_calculator.SetDatabase( gpu_data.GetGpuDatabaseSequence()); distance_calculator.SetReducedCodeMap(gpu_data.GetGpuReducedCodeMap()); distance_calculator.SetSubsequenceLength(subsequence_length); distance_calculator.CalculateDistancesAsync(3, &sequence0_positions[0], &sequence1_positions[0], &distances[0], thrust::raw_pointer_cast(d_sequence0_positions.data()), thrust::raw_pointer_cast(d_sequence1_positions.data()), thrust::raw_pointer_cast(d_distances.data()), stream); hipStreamSynchronize(stream); hipStreamDestroy(stream); EXPECT_EQ(0, distances[0]); EXPECT_EQ(1, distances[1]); EXPECT_EQ(1, distances[2]); }
06162d79c9d753976fa6a785837776daf8d3f88c.cu
#include <gtest/gtest.h> #include <algorithm> #include <string> #include <sstream> #include <stdint.h> #include <fstream> #include <vector> #include "../src/protein_type.h" #include "../src/alphabet_coder.h" #include "../src/score_matrix_reader.h" #include "../src/score_matrix.h" #include "../src/reduced_alphabet_file_reader.h" #include "../src/reduced_alphabet_coder.h" #include "../src/reduced_alphabet_variable_hash_function.h" #include "../src/distance_calculator_gpu.h" #include "../src/cuda_common.h" #include "../src/aligner_gpu_data.h" #include <thrust/device_vector.h> #include <thrust/copy.h> using namespace std; class DistanceCalculatorGpuTest: public ::testing::Test { protected: virtual void SetUp() { ProteinType protein_type; const std::string reduced_alphabet = "A KR EDNQ C G H ILVM FYW P ST"; std::istringstream in(reduced_alphabet); std::vector<std::string> alphabet_sets; ReducedAlphabetFileReader reduced_alphabet_reader; reduced_alphabet_reader.Read(in, alphabet_sets); coder_ = AlphabetCoder(protein_type); ReducedAlphabetCoder reduced_alphabet_coder(protein_type, alphabet_sets); AlphabetCoder::Code max_code = coder_.GetMaxRegularLetterCode(); reduced_code_map_.resize(max_code + 1); for (AlphabetCoder::Code code = coder_.GetMinCode(); code <= max_code; ++code) { char c = coder_.Decode(code); AlphabetCoder::Code reduced_code = reduced_alphabet_coder.Encode(c); reduced_code_map_[code] = reduced_code; } max_code_ = reduced_alphabet_coder.GetMaxRegularLetterCode(); max_length_ = 2; } virtual void TearDown() { } AlphabetCoder::Code max_code_; uint32_t max_length_; int score_threshold_; std::vector<AlphabetCoder::Code> reduced_code_map_; std::vector<int> code_score_; AlphabetCoder coder_; }; TEST_F(DistanceCalculatorGpuTest, CalculateDistances) { size_t subsequence_length = 4; string sequence0 = "IMEHQSIMEHQS"; vector<AlphabetCoder::Code> coded_sequence0(sequence0.length()); coder_.Encode(&sequence0[0], sequence0.length(), &coded_sequence0[0]); string sequence1 = "LMEHQALMEHQA"; vector<AlphabetCoder::Code> coded_sequence1(sequence1.length()); coder_.Encode(&sequence1[0], sequence1.length(), &coded_sequence1[0]); AlignerGpuData gpu_data; gpu_data.SetGpuQueriesSequence(&coded_sequence0[0], coded_sequence0.size()); gpu_data.SetGpuDatabaseSequence(&coded_sequence1[0], coded_sequence1.size()); gpu_data.SetGpuReducedCodeMap(&reduced_code_map_[0], reduced_code_map_.size()); thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > sequence0_positions; thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > sequence1_positions; thrust::host_vector<DistanceCalculator::Distance, thrust::cuda::experimental::pinned_allocator< DistanceCalculator::Distance> > distances(3, 0); sequence0_positions.push_back(2); sequence1_positions.push_back(2); sequence0_positions.push_back(6); sequence1_positions.push_back(6); sequence0_positions.push_back(10); sequence1_positions.push_back(10); DistanceCalculatorGpu distance_calculator; cudaStream_t stream; thrust::device_vector<uint32_t> d_sequence0_positions( sequence0_positions.size()); thrust::device_vector<uint32_t> d_sequence1_positions( sequence1_positions.size()); thrust::device_vector<DistanceCalculatorGpu::Distance> d_distances(sequence0_positions.size()); cudaStreamCreate(&stream); distance_calculator.SetQueries(gpu_data.GetGpuQueriesSequence()); distance_calculator.SetDatabase( gpu_data.GetGpuDatabaseSequence()); distance_calculator.SetReducedCodeMap(gpu_data.GetGpuReducedCodeMap()); distance_calculator.SetSubsequenceLength(subsequence_length); distance_calculator.CalculateDistancesAsync(3, &sequence0_positions[0], &sequence1_positions[0], &distances[0], thrust::raw_pointer_cast(d_sequence0_positions.data()), thrust::raw_pointer_cast(d_sequence1_positions.data()), thrust::raw_pointer_cast(d_distances.data()), stream); cudaStreamSynchronize(stream); cudaStreamDestroy(stream); EXPECT_EQ(0, distances[0]); EXPECT_EQ(1, distances[1]); EXPECT_EQ(1, distances[2]); }
16a6995dc07cbbc08272d87cf8cbe4e458bdf8e3.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <iostream> #include <unistd.h> #include <ctime> #include <sys/time.h> #include <sys/resource.h> #include <assert.h> // includes, project #include "helper_cuda.h" // include initial files #define __MAIN_LOGIC #include "vegas.h" #include "gvegas.h" #undef __MAIN_LOGIC #include "kernels.h" double getrusage_sec() { struct rusage t; struct timeval tv; getrusage(RUSAGE_SELF, &t); tv = t.ru_utime; return tv.tv_sec + (double)tv.tv_usec*1e-6; } int main(int argc, char** argv) { //------------------ // Initialization //------------------ // // program interface: // program -n="ncall0" -i="itmx0" -a="nacc" -b="nBlockSize0" // // parameters: // ncall = ncall0 // itmx = itmx0 // acc = nacc*0.00001f // nBlockSize = nBlockSize0 // int ncall0 = 0; int itmx0 = 10; int nacc = 1; int nBlockSize0 = 256; int ndim0 = 6; int c; while ((c = getopt (argc, argv, "n:i:a:b:d:")) != -1) switch (c) { case 'n': ncall0 = atoi(optarg); break; case 'i': itmx0 = atoi(optarg); break; case 'a': nacc = atoi(optarg); break; case 'b': nBlockSize0 = atoi(optarg); break; case 'd': ndim0 = atoi(optarg); break; case '?': if (isprint (optopt)) fprintf (stderr, "Unknown option `-%c'.\n", optopt); else fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt); return 1; default: abort (); } //ncall = (1 << ncall0)*1024; ncall = ncall0; // more intuitive to users itmx = itmx0; acc = (double)nacc*0.000001; nBlockSize = nBlockSize0; ndim = ndim0; assert(ndim <= ndim_max); mds = 1; ng = 0; npg = 0; for (int i=0;i<ndim;i++) { xl[i] = 0.; xu[i] = 1.; } //If nprn = 1 it prints the whole work, when nprn = 0, just the text in this code //If nprn = -1, we can get the grid update information. nprn = 0; // nprn = -1; // nprn = 0; double avgi = 0.; double sd = 0.; double chi2a = 0.; gVegas(avgi, sd, chi2a); //------------------------- // Print out information //------------------------- std::cout.clear(); std::cout<<"#==========================="<<std::endl; std::cout<<"# No. of Thread Block Size : "<<nBlockSize<<std::endl; std::cout<<"#==========================="<<std::endl; std::cout<<"# No. of dimensions : "<<ndim<<std::endl; std::cout<<"# No. of func calls / iter : "<<ncall<<std::endl; std::cout<<"# No. of max. iterations : "<<itmx<<std::endl; std::cout<<"# Desired accuracy : "<<acc<<std::endl; std::cout<<"#==========================="<<std::endl; std::cout<<"# Answer : "<<avgi<<" +- "<<sd<<std::endl; std::cout<<"# Chisquare : "<<chi2a<<std::endl; std::cout<<"#==========================="<<std::endl; hipDeviceReset(); //Print running times! std::cout<<"#==========================="<<std::endl; std::cout<<"# Function call time per iteration: " <<timeVegasCall/(double)it<<std::endl; std::cout<<"# Values moving time per iteration: " <<timeVegasMove/(double)it<<std::endl; std::cout<<"# Filling (reduce) time per iteration: " <<timeVegasFill/(double)it<<std::endl; std::cout<<"# Refining time per iteration: " <<timeVegasRefine/(double)it<<std::endl; std::cout<<"#==========================="<<std::endl; return 0; }
16a6995dc07cbbc08272d87cf8cbe4e458bdf8e3.cu
#include <cstdlib> #include <iostream> #include <unistd.h> #include <ctime> #include <sys/time.h> #include <sys/resource.h> #include <assert.h> // includes, project #include "helper_cuda.h" // include initial files #define __MAIN_LOGIC #include "vegas.h" #include "gvegas.h" #undef __MAIN_LOGIC #include "kernels.h" double getrusage_sec() { struct rusage t; struct timeval tv; getrusage(RUSAGE_SELF, &t); tv = t.ru_utime; return tv.tv_sec + (double)tv.tv_usec*1e-6; } int main(int argc, char** argv) { //------------------ // Initialization //------------------ // // program interface: // program -n="ncall0" -i="itmx0" -a="nacc" -b="nBlockSize0" // // parameters: // ncall = ncall0 // itmx = itmx0 // acc = nacc*0.00001f // nBlockSize = nBlockSize0 // int ncall0 = 0; int itmx0 = 10; int nacc = 1; int nBlockSize0 = 256; int ndim0 = 6; int c; while ((c = getopt (argc, argv, "n:i:a:b:d:")) != -1) switch (c) { case 'n': ncall0 = atoi(optarg); break; case 'i': itmx0 = atoi(optarg); break; case 'a': nacc = atoi(optarg); break; case 'b': nBlockSize0 = atoi(optarg); break; case 'd': ndim0 = atoi(optarg); break; case '?': if (isprint (optopt)) fprintf (stderr, "Unknown option `-%c'.\n", optopt); else fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt); return 1; default: abort (); } //ncall = (1 << ncall0)*1024; ncall = ncall0; // more intuitive to users itmx = itmx0; acc = (double)nacc*0.000001; nBlockSize = nBlockSize0; ndim = ndim0; assert(ndim <= ndim_max); mds = 1; ng = 0; npg = 0; for (int i=0;i<ndim;i++) { xl[i] = 0.; xu[i] = 1.; } //If nprn = 1 it prints the whole work, when nprn = 0, just the text in this code //If nprn = -1, we can get the grid update information. nprn = 0; // nprn = -1; // nprn = 0; double avgi = 0.; double sd = 0.; double chi2a = 0.; gVegas(avgi, sd, chi2a); //------------------------- // Print out information //------------------------- std::cout.clear(); std::cout<<"#==========================="<<std::endl; std::cout<<"# No. of Thread Block Size : "<<nBlockSize<<std::endl; std::cout<<"#==========================="<<std::endl; std::cout<<"# No. of dimensions : "<<ndim<<std::endl; std::cout<<"# No. of func calls / iter : "<<ncall<<std::endl; std::cout<<"# No. of max. iterations : "<<itmx<<std::endl; std::cout<<"# Desired accuracy : "<<acc<<std::endl; std::cout<<"#==========================="<<std::endl; std::cout<<"# Answer : "<<avgi<<" +- "<<sd<<std::endl; std::cout<<"# Chisquare : "<<chi2a<<std::endl; std::cout<<"#==========================="<<std::endl; cudaThreadExit(); //Print running times! std::cout<<"#==========================="<<std::endl; std::cout<<"# Function call time per iteration: " <<timeVegasCall/(double)it<<std::endl; std::cout<<"# Values moving time per iteration: " <<timeVegasMove/(double)it<<std::endl; std::cout<<"# Filling (reduce) time per iteration: " <<timeVegasFill/(double)it<<std::endl; std::cout<<"# Refining time per iteration: " <<timeVegasRefine/(double)it<<std::endl; std::cout<<"#==========================="<<std::endl; return 0; }
df638de75b3146a3b0b6892931c9dbd1fc2b6019.hip
// !!! This is a file automatically generated by hipify!!! #include "device_launch_parameters.h" #include <math.h> #include <ctype.h> #include <stdlib.h> #include <opencv2/core.hpp> #include <opencv2/videoio.hpp> #include <opencv2/highgui.hpp> #include <opencv2/opencv.hpp> #include <opencv2/core/cuda.hpp> #include <opencv2/core/cuda.inl.hpp> #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #define FRAMES 3 // Number of frames located within the buffer //#define PIXELS_PER_FRAME 2073600 // Number of pixels per frame //#define PIXELS_PER_FRAME 307200 #define PIXELS_PER_FRAME 327680 #define BLOCK 512 // Size of blocks, best if it is a power of 2. using namespace std; using namespace cv; // Globals unsigned char* currentFrame_GPU; unsigned char* BlockOfFrames_CPU, * BlockOfFrames_GPU; float* MeanFrame_GPU; float* BlockOfLogNormalFrames_GPU; float* MeanLogNormalFrame_GPU; float* MedianLogNormalFrame_GPU; float* StdvLogNormalFrame_GPU; dim3 dimBlock, dimGrid; void AllocateMemory() { hipMalloc((void**)&currentFrame_GPU, PIXELS_PER_FRAME * sizeof(unsigned char)); // This are the set of frames that will be used to generate the log normal frame // and the standard deviation frame BlockOfFrames_CPU = (unsigned char*)malloc(FRAMES * PIXELS_PER_FRAME * sizeof(unsigned char)); hipMalloc((void**)&BlockOfFrames_GPU, FRAMES * PIXELS_PER_FRAME * sizeof(unsigned char)); hipMalloc((void**)&BlockOfLogNormalFrames_GPU, FRAMES * PIXELS_PER_FRAME * sizeof(float)); // Will hold the log normal frame and the standard deviation of the frames minus the log normal hipMalloc((void**)&MeanFrame_GPU, PIXELS_PER_FRAME * sizeof(float)); hipMalloc((void**)&MeanLogNormalFrame_GPU, PIXELS_PER_FRAME * sizeof(float)); hipMalloc((void**)&MedianLogNormalFrame_GPU, PIXELS_PER_FRAME * sizeof(float)); hipMalloc((void**)&StdvLogNormalFrame_GPU, PIXELS_PER_FRAME * sizeof(float)); } void SetUpCudaDevices() { dimBlock.x = BLOCK; dimBlock.y = 1; dimBlock.z = 1; dimGrid.x = ((PIXELS_PER_FRAME - 1) / BLOCK) + 1; dimGrid.y = 1; dimGrid.z = 1; } //This function creates the log-normal frames for comparison to the newest image __global__ void creatingBuffer(float* meanFrame, unsigned char* allFrames, float* allFramesLogNormal, int pixelsPerFrame, float* meanlogNormalFrame, float* medianlogNormalFrame, float* stdvLogNormalFrame, int frames) { int id; //Mean Matrix int pixel = threadIdx.x + blockIdx.x * blockDim.x; if (pixel < pixelsPerFrame) { double sum = 0.0; for (int i = 0; i < frames; i++) { sum += (int)allFrames[pixel + pixelsPerFrame * i]; } meanFrame[pixel] = sum / (float)frames; } //Log-Normal Matrix if (pixel < pixelsPerFrame) { for (int i = 0; i < frames; i++) { //Same screen location (pixel) but moving through frames (i). id = pixel + pixelsPerFrame * i; allFramesLogNormal[id] = (float)allFrames[id] - meanFrame[pixel]; allFramesLogNormal[id] = abs(allFramesLogNormal[id]); //Can't take log of zero so to be safe check and move it off zero. if (allFramesLogNormal[id] == 0.0f) { allFramesLogNormal[id] = 0.000001f; } allFramesLogNormal[id] = logf(allFramesLogNormal[id]); //allFramesLogNormal[id] = (float)allFrames[id]; // Remove after debugging. } } //Log Mean Matrix if (pixel < pixelsPerFrame) { double sum = 0.0; for (int i = 0; i < frames; i++) { sum += allFramesLogNormal[pixel + pixelsPerFrame * i]; } meanlogNormalFrame[pixel] = sum / (float)frames; } int used[FRAMES], index, count; float median = 0.0; float small; //Log Median Matrix if (pixel < pixelsPerFrame) { for (int i = 0; i < frames; i++) { used[i] = 0; } if (frames % 2 == 0) { int middle2 = frames / 2; int middle1 = middle2 - 1; index = -1; count = 0; while (count <= middle2) { small = 10000000.0f; //Needs to be a number larger than anything you would get in a log of a pixel. for (int i = 0; i < frames; i++) { if (allFramesLogNormal[pixel + pixelsPerFrame * i] < small && used[i] == 0) { small = allFramesLogNormal[pixel + pixelsPerFrame * i]; index = i; } } if (index == -1) printf("\nError no index found\n"); used[index] = 1; if (count == middle1 || count == middle2) { median += allFramesLogNormal[pixel + pixelsPerFrame * index]; } count++; } median /= 2.0f; } else { int middle = frames / 2; index = -1; count = 0; while (count <= middle) { small = 10000000.0f; //Needs to be a number larger than anything you would get in a log of a pixel. for (int i = 0; i < frames; i++) { if (allFramesLogNormal[pixel + pixelsPerFrame * i] < small) { if (used[i] == 0) { small = allFramesLogNormal[pixel + pixelsPerFrame * i]; index = i; } } } if (index == -1) printf("\nError no index found\n"); used[index] = 1; if (count == middle) { median += allFramesLogNormal[pixel + pixelsPerFrame * index]; } count++; } } medianlogNormalFrame[pixel] = median; } float temp; //Log-Normal STD Matrix if (pixel < pixelsPerFrame) { double sum = 0.0; for (int i = 0; i < frames; i++) { temp = allFramesLogNormal[pixel + pixelsPerFrame * i] - meanlogNormalFrame[pixel]; sum += temp * temp; } stdvLogNormalFrame[pixel] = sqrtf((sum) / (float)(frames - 1)); } } __global__ void CDFfunction(float* median, float* stdvLogNormalFrame, float* MeanLogNormalFrame, unsigned char* currentFrame, int pixelsPerFrame) { int pixel = threadIdx.x + blockIdx.x * blockDim.x; if (pixel < pixelsPerFrame) { float newvalue; float x = currentFrame[pixel]; newvalue = -((logf(x) - median[pixel]) - MeanLogNormalFrame[pixel]) / (sqrtf(2) * stdvLogNormalFrame[pixel]); float summ = 0.5f + 0.5f * erff(newvalue); //Threshold set to 30% if (summ >= 0.3) { currentFrame[pixel] = (unsigned char)255; } else { currentFrame[pixel] = (unsigned char)0; } } } void errorCheck(const char* message) { hipError_t error; error = hipGetLastError(); if (error != hipSuccess) { printf("%s", message); printf("\n CUDA ERROR: %s\n", hipGetErrorString(error)); exit(0); } } void cleanUp() { free(BlockOfFrames_CPU); hipFree(BlockOfFrames_GPU); hipFree(BlockOfLogNormalFrames_GPU); hipFree(MeanFrame_GPU); hipFree(MeanLogNormalFrame_GPU); hipFree(MedianLogNormalFrame_GPU); hipFree(StdvLogNormalFrame_GPU); } int main() { AllocateMemory(); SetUpCudaDevices(); //This option is set for a recorded video VideoCapture cap("video.avi"); //The next 4 lines are for an attached camera //VideoCapture cap; //int DeviceID = 0;//0 is set for the first camera instance //int apiID = CAP_ANY; //cap.open(DeviceID, apiID); Mat frame; cap.read(frame); int frame_width = static_cast<int>(cap.get(CAP_PROP_FRAME_WIDTH)); //get the width of frames of the video int frame_height = static_cast<int>(cap.get(CAP_PROP_FRAME_HEIGHT)); Mat grayimg, Temp; if (!cap.isOpened()) { std::cout << "Error! Unable to open camera\n"; cleanUp(); return -1; } unsigned char* TempFrame; for (int i = 0; i < FRAMES; i++) { cap.read(frame); frame.convertTo(Temp, CV_8U); cvtColor(Temp, grayimg, COLOR_RGB2GRAY); TempFrame = grayimg.ptr<unsigned char>(0); memcpy(BlockOfFrames_CPU + i * PIXELS_PER_FRAME, TempFrame, sizeof(unsigned char) * PIXELS_PER_FRAME); } hipMemcpyAsync(BlockOfFrames_GPU, BlockOfFrames_CPU, PIXELS_PER_FRAME * FRAMES * sizeof(unsigned char), hipMemcpyHostToDevice); errorCheck("copyFramessUp"); hipDeviceSynchronize(); creatingBuffer << <dimGrid, dimBlock >> > (MeanFrame_GPU, BlockOfFrames_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, MeanLogNormalFrame_GPU, MedianLogNormalFrame_GPU, StdvLogNormalFrame_GPU, FRAMES); hipDeviceSynchronize(); int i = 0; Size frame_size(frame_width, frame_height); int frames_per_second = 60; //Create and initialize the VideoWriter object VideoWriter video("E:\\outputVideo.avi", VideoWriter::fourcc('M', 'J', 'P', 'G'), 60, Size(frame_width, frame_height)); while (true) { cap.read(frame); cvtColor(frame, grayimg, COLOR_RGB2GRAY); TempFrame = grayimg.ptr<unsigned char>(0); memcpy(BlockOfFrames_CPU + i * PIXELS_PER_FRAME, TempFrame, sizeof(unsigned char) * (PIXELS_PER_FRAME)); hipMemcpyAsync(BlockOfFrames_GPU, BlockOfFrames_CPU, PIXELS_PER_FRAME * FRAMES * sizeof(unsigned char), hipMemcpyHostToDevice); if (i == 2) { i = -1; } if (i % 1 == 0) { creatingBuffer << <dimGrid, dimBlock >> > (MeanFrame_GPU, BlockOfFrames_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, MeanLogNormalFrame_GPU, MedianLogNormalFrame_GPU, StdvLogNormalFrame_GPU, FRAMES); } hipMemcpyAsync(currentFrame_GPU, TempFrame, PIXELS_PER_FRAME * sizeof(unsigned char), hipMemcpyHostToDevice); CDFfunction << <dimGrid, dimBlock >> > (MedianLogNormalFrame_GPU, StdvLogNormalFrame_GPU, MeanLogNormalFrame_GPU, currentFrame_GPU, PIXELS_PER_FRAME); errorCheck("CDF function"); hipMemcpyAsync(TempFrame, currentFrame_GPU, PIXELS_PER_FRAME * sizeof(unsigned char), hipMemcpyDeviceToHost); hipDeviceSynchronize(); imshow("image", grayimg); //if a key is pressed break. if (waitKey(5) >= 0) { video.release(); cleanUp(); break; } cvtColor(grayimg, grayimg, COLOR_GRAY2RGB); video.write(grayimg); } cap.release(); printf("\n DONE \n"); }
df638de75b3146a3b0b6892931c9dbd1fc2b6019.cu
#include "device_launch_parameters.h" #include <math.h> #include <ctype.h> #include <stdlib.h> #include <opencv2/core.hpp> #include <opencv2/videoio.hpp> #include <opencv2/highgui.hpp> #include <opencv2/opencv.hpp> #include <opencv2/core/cuda.hpp> #include <opencv2/core/cuda.inl.hpp> #include "cuda_runtime.h" #include <iostream> #include <stdio.h> #define FRAMES 3 // Number of frames located within the buffer //#define PIXELS_PER_FRAME 2073600 // Number of pixels per frame //#define PIXELS_PER_FRAME 307200 #define PIXELS_PER_FRAME 327680 #define BLOCK 512 // Size of blocks, best if it is a power of 2. using namespace std; using namespace cv; // Globals unsigned char* currentFrame_GPU; unsigned char* BlockOfFrames_CPU, * BlockOfFrames_GPU; float* MeanFrame_GPU; float* BlockOfLogNormalFrames_GPU; float* MeanLogNormalFrame_GPU; float* MedianLogNormalFrame_GPU; float* StdvLogNormalFrame_GPU; dim3 dimBlock, dimGrid; void AllocateMemory() { cudaMalloc((void**)&currentFrame_GPU, PIXELS_PER_FRAME * sizeof(unsigned char)); // This are the set of frames that will be used to generate the log normal frame // and the standard deviation frame BlockOfFrames_CPU = (unsigned char*)malloc(FRAMES * PIXELS_PER_FRAME * sizeof(unsigned char)); cudaMalloc((void**)&BlockOfFrames_GPU, FRAMES * PIXELS_PER_FRAME * sizeof(unsigned char)); cudaMalloc((void**)&BlockOfLogNormalFrames_GPU, FRAMES * PIXELS_PER_FRAME * sizeof(float)); // Will hold the log normal frame and the standard deviation of the frames minus the log normal cudaMalloc((void**)&MeanFrame_GPU, PIXELS_PER_FRAME * sizeof(float)); cudaMalloc((void**)&MeanLogNormalFrame_GPU, PIXELS_PER_FRAME * sizeof(float)); cudaMalloc((void**)&MedianLogNormalFrame_GPU, PIXELS_PER_FRAME * sizeof(float)); cudaMalloc((void**)&StdvLogNormalFrame_GPU, PIXELS_PER_FRAME * sizeof(float)); } void SetUpCudaDevices() { dimBlock.x = BLOCK; dimBlock.y = 1; dimBlock.z = 1; dimGrid.x = ((PIXELS_PER_FRAME - 1) / BLOCK) + 1; dimGrid.y = 1; dimGrid.z = 1; } //This function creates the log-normal frames for comparison to the newest image __global__ void creatingBuffer(float* meanFrame, unsigned char* allFrames, float* allFramesLogNormal, int pixelsPerFrame, float* meanlogNormalFrame, float* medianlogNormalFrame, float* stdvLogNormalFrame, int frames) { int id; //Mean Matrix int pixel = threadIdx.x + blockIdx.x * blockDim.x; if (pixel < pixelsPerFrame) { double sum = 0.0; for (int i = 0; i < frames; i++) { sum += (int)allFrames[pixel + pixelsPerFrame * i]; } meanFrame[pixel] = sum / (float)frames; } //Log-Normal Matrix if (pixel < pixelsPerFrame) { for (int i = 0; i < frames; i++) { //Same screen location (pixel) but moving through frames (i). id = pixel + pixelsPerFrame * i; allFramesLogNormal[id] = (float)allFrames[id] - meanFrame[pixel]; allFramesLogNormal[id] = abs(allFramesLogNormal[id]); //Can't take log of zero so to be safe check and move it off zero. if (allFramesLogNormal[id] == 0.0f) { allFramesLogNormal[id] = 0.000001f; } allFramesLogNormal[id] = logf(allFramesLogNormal[id]); //allFramesLogNormal[id] = (float)allFrames[id]; // Remove after debugging. } } //Log Mean Matrix if (pixel < pixelsPerFrame) { double sum = 0.0; for (int i = 0; i < frames; i++) { sum += allFramesLogNormal[pixel + pixelsPerFrame * i]; } meanlogNormalFrame[pixel] = sum / (float)frames; } int used[FRAMES], index, count; float median = 0.0; float small; //Log Median Matrix if (pixel < pixelsPerFrame) { for (int i = 0; i < frames; i++) { used[i] = 0; } if (frames % 2 == 0) { int middle2 = frames / 2; int middle1 = middle2 - 1; index = -1; count = 0; while (count <= middle2) { small = 10000000.0f; //Needs to be a number larger than anything you would get in a log of a pixel. for (int i = 0; i < frames; i++) { if (allFramesLogNormal[pixel + pixelsPerFrame * i] < small && used[i] == 0) { small = allFramesLogNormal[pixel + pixelsPerFrame * i]; index = i; } } if (index == -1) printf("\nError no index found\n"); used[index] = 1; if (count == middle1 || count == middle2) { median += allFramesLogNormal[pixel + pixelsPerFrame * index]; } count++; } median /= 2.0f; } else { int middle = frames / 2; index = -1; count = 0; while (count <= middle) { small = 10000000.0f; //Needs to be a number larger than anything you would get in a log of a pixel. for (int i = 0; i < frames; i++) { if (allFramesLogNormal[pixel + pixelsPerFrame * i] < small) { if (used[i] == 0) { small = allFramesLogNormal[pixel + pixelsPerFrame * i]; index = i; } } } if (index == -1) printf("\nError no index found\n"); used[index] = 1; if (count == middle) { median += allFramesLogNormal[pixel + pixelsPerFrame * index]; } count++; } } medianlogNormalFrame[pixel] = median; } float temp; //Log-Normal STD Matrix if (pixel < pixelsPerFrame) { double sum = 0.0; for (int i = 0; i < frames; i++) { temp = allFramesLogNormal[pixel + pixelsPerFrame * i] - meanlogNormalFrame[pixel]; sum += temp * temp; } stdvLogNormalFrame[pixel] = sqrtf((sum) / (float)(frames - 1)); } } __global__ void CDFfunction(float* median, float* stdvLogNormalFrame, float* MeanLogNormalFrame, unsigned char* currentFrame, int pixelsPerFrame) { int pixel = threadIdx.x + blockIdx.x * blockDim.x; if (pixel < pixelsPerFrame) { float newvalue; float x = currentFrame[pixel]; newvalue = -((logf(x) - median[pixel]) - MeanLogNormalFrame[pixel]) / (sqrtf(2) * stdvLogNormalFrame[pixel]); float summ = 0.5f + 0.5f * erff(newvalue); //Threshold set to 30% if (summ >= 0.3) { currentFrame[pixel] = (unsigned char)255; } else { currentFrame[pixel] = (unsigned char)0; } } } void errorCheck(const char* message) { cudaError_t error; error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s", message); printf("\n CUDA ERROR: %s\n", cudaGetErrorString(error)); exit(0); } } void cleanUp() { free(BlockOfFrames_CPU); cudaFree(BlockOfFrames_GPU); cudaFree(BlockOfLogNormalFrames_GPU); cudaFree(MeanFrame_GPU); cudaFree(MeanLogNormalFrame_GPU); cudaFree(MedianLogNormalFrame_GPU); cudaFree(StdvLogNormalFrame_GPU); } int main() { AllocateMemory(); SetUpCudaDevices(); //This option is set for a recorded video VideoCapture cap("video.avi"); //The next 4 lines are for an attached camera //VideoCapture cap; //int DeviceID = 0;//0 is set for the first camera instance //int apiID = CAP_ANY; //cap.open(DeviceID, apiID); Mat frame; cap.read(frame); int frame_width = static_cast<int>(cap.get(CAP_PROP_FRAME_WIDTH)); //get the width of frames of the video int frame_height = static_cast<int>(cap.get(CAP_PROP_FRAME_HEIGHT)); Mat grayimg, Temp; if (!cap.isOpened()) { std::cout << "Error! Unable to open camera\n"; cleanUp(); return -1; } unsigned char* TempFrame; for (int i = 0; i < FRAMES; i++) { cap.read(frame); frame.convertTo(Temp, CV_8U); cvtColor(Temp, grayimg, COLOR_RGB2GRAY); TempFrame = grayimg.ptr<unsigned char>(0); memcpy(BlockOfFrames_CPU + i * PIXELS_PER_FRAME, TempFrame, sizeof(unsigned char) * PIXELS_PER_FRAME); } cudaMemcpyAsync(BlockOfFrames_GPU, BlockOfFrames_CPU, PIXELS_PER_FRAME * FRAMES * sizeof(unsigned char), cudaMemcpyHostToDevice); errorCheck("copyFramessUp"); cudaDeviceSynchronize(); creatingBuffer << <dimGrid, dimBlock >> > (MeanFrame_GPU, BlockOfFrames_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, MeanLogNormalFrame_GPU, MedianLogNormalFrame_GPU, StdvLogNormalFrame_GPU, FRAMES); cudaDeviceSynchronize(); int i = 0; Size frame_size(frame_width, frame_height); int frames_per_second = 60; //Create and initialize the VideoWriter object VideoWriter video("E:\\outputVideo.avi", VideoWriter::fourcc('M', 'J', 'P', 'G'), 60, Size(frame_width, frame_height)); while (true) { cap.read(frame); cvtColor(frame, grayimg, COLOR_RGB2GRAY); TempFrame = grayimg.ptr<unsigned char>(0); memcpy(BlockOfFrames_CPU + i * PIXELS_PER_FRAME, TempFrame, sizeof(unsigned char) * (PIXELS_PER_FRAME)); cudaMemcpyAsync(BlockOfFrames_GPU, BlockOfFrames_CPU, PIXELS_PER_FRAME * FRAMES * sizeof(unsigned char), cudaMemcpyHostToDevice); if (i == 2) { i = -1; } if (i % 1 == 0) { creatingBuffer << <dimGrid, dimBlock >> > (MeanFrame_GPU, BlockOfFrames_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, MeanLogNormalFrame_GPU, MedianLogNormalFrame_GPU, StdvLogNormalFrame_GPU, FRAMES); } cudaMemcpyAsync(currentFrame_GPU, TempFrame, PIXELS_PER_FRAME * sizeof(unsigned char), cudaMemcpyHostToDevice); CDFfunction << <dimGrid, dimBlock >> > (MedianLogNormalFrame_GPU, StdvLogNormalFrame_GPU, MeanLogNormalFrame_GPU, currentFrame_GPU, PIXELS_PER_FRAME); errorCheck("CDF function"); cudaMemcpyAsync(TempFrame, currentFrame_GPU, PIXELS_PER_FRAME * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); imshow("image", grayimg); //if a key is pressed break. if (waitKey(5) >= 0) { video.release(); cleanUp(); break; } cvtColor(grayimg, grayimg, COLOR_GRAY2RGB); video.write(grayimg); } cap.release(); printf("\n DONE \n"); }
deebf5cd8f6052fc79b83809b32ac0b6ba753d2a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> __device__ int _inv(double *m, double *invOut); __device__ void mult(double *A, double *B, double *C); __device__ void copy(double *A, double *B); __device__ void _eye(double *data); // TODO: device level link class // TODO: block >= 2048 error /* * Params * T: double(N, 4, 4) the final transform matrix of all points (shared) * tool: double(N, 4, 4) the tool transform matrix of all points (shared) * nlinks_pt: long(N,): the number of links associated with each (shared) * link_A: double(N, max_nlinks, 4, 4) the transformation matrix of all joints * link_axes: long(max_nlinks, ): axes of all links * link_isjoint: long(max_nlinks, ): 1/0 whether links are joints * N: (int) number of points * njoints: (int) number of joints * out: (N, 6, njoints) */ __global__ void _jacob0(double *T, double *tool, double *etool, double *link_A, long *nlinks_pt, long *link_axes, long *link_isjoint, int N, int max_nlinks, int njoints, double *out) { int tid = blockIdx.x * blockDim.x + threadIdx.x; double *T_i; double *tool_i; double *U; double *temp; double *etool_i; double *invU; double *link_iA; U = (double*) malloc(sizeof(double) * 16); invU = (double*) malloc(sizeof(double) * 16); temp = (double*) malloc(sizeof(double) * 16); int j = 0; tool_i = &tool[tid * 16]; etool_i = &etool[tid * 16]; _eye(U); T_i = &T[tid * 16]; if (tid >= N) { free(U); free(invU); free(temp); return; } long nlinks = nlinks_pt[tid]; double *link_A_tid = &link_A[tid * max_nlinks * 4 * 4]; // printf("Hello from tid %d nlinks %ld\n", tid, nlinks); for (int i = 0; i < nlinks; i++) { // printf("Hello from tid %d link_i %d link_axis %ld isjoint %ld \n", tid, i, link_axes[i], link_isjoint[i]); if (link_isjoint[i] == 1) { link_iA = &link_A_tid[i * 16]; mult(U, link_iA, temp); copy(temp, U); if (i == nlinks - 1) { mult(U, etool_i, temp); copy(temp, U); mult(U, tool_i, temp); copy(temp , U); } _inv(U, invU); mult(invU, T_i, temp); double *out_tid = &out[tid * 6 * njoints]; if (link_axes[i] == 0) { out_tid[0 * njoints + j] = U[0 * 4 + 2] * temp[1 * 4 + 3] - U[0 * 4 + 1] * temp[2 * 4 + 3]; out_tid[1 * njoints + j] = U[1 * 4 + 2] * temp[1 * 4 + 3] - U[1 * 4 + 1] * temp[2 * 4 + 3]; out_tid[2 * njoints + j] = U[2 * 4 + 2] * temp[1 * 4 + 3] - U[2 * 4 + 1] * temp[2 * 4 + 3]; out_tid[3 * njoints + j] = U[0 * 4 + 2]; out_tid[4 * njoints + j] = U[1 * 4 + 2]; out_tid[5 * njoints + j] = U[2 * 4 + 2]; } else if (link_axes[i] == 1) { out_tid[0 * njoints + j] = U[0 * 4 + 0] * temp[2 * 4 + 3] - U[0 * 4 + 2] * temp[0 * 4 + 3]; out_tid[1 * njoints + j] = U[1 * 4 + 0] * temp[2 * 4 + 3] - U[1 * 4 + 2] * temp[0 * 4 + 3]; out_tid[2 * njoints + j] = U[2 * 4 + 0] * temp[2 * 4 + 3] - U[2 * 4 + 2] * temp[0 * 4 + 3]; out_tid[3 * njoints + j] = U[0 * 4 + 1]; out_tid[4 * njoints + j] = U[1 * 4 + 1]; out_tid[5 * njoints + j] = U[2 * 4 + 1]; } else if (link_axes[i] == 2) { out_tid[0 * njoints + j] = U[0 * 4 + 1] * temp[0 * 4 + 3] - U[0 * 4 + 0] * temp[1 * 4 + 3]; out_tid[1 * njoints + j] = U[1 * 4 + 1] * temp[0 * 4 + 3] - U[1 * 4 + 0] * temp[1 * 4 + 3]; out_tid[2 * njoints + j] = U[2 * 4 + 1] * temp[0 * 4 + 3] - U[2 * 4 + 0] * temp[1 * 4 + 3]; out_tid[3 * njoints + j] = U[0 * 4 + 2]; out_tid[4 * njoints + j] = U[1 * 4 + 2]; out_tid[5 * njoints + j] = U[2 * 4 + 2]; } else if (link_axes[i] == 3) { out_tid[0 * njoints + j] = U[0 * 4 + 0]; out_tid[1 * njoints + j] = U[1 * 4 + 0]; out_tid[2 * njoints + j] = U[2 * 4 + 0]; out_tid[3 * njoints + j] = 0.0; out_tid[4 * njoints + j] = 0.0; out_tid[5 * njoints + j] = 0.0; } else if (link_axes[i] == 4) { out_tid[0 * njoints + j] = U[0 * 4 + 1]; out_tid[1 * njoints + j] = U[1 * 4 + 1]; out_tid[2 * njoints + j] = U[2 * 4 + 1]; out_tid[3 * njoints + j] = 0.0; out_tid[4 * njoints + j] = 0.0; out_tid[5 * njoints + j] = 0.0; } else if (link_axes[i] == 5) { out_tid[0 * njoints + j] = U[0 * 4 + 2]; out_tid[1 * njoints + j] = U[1 * 4 + 2]; out_tid[2 * njoints + j] = U[2 * 4 + 2]; out_tid[3 * njoints + j] = 0.0; out_tid[4 * njoints + j] = 0.0; out_tid[5 * njoints + j] = 0.0; } j++; } else { link_iA = &link_A_tid[i * 16]; mult(U, link_iA, temp); copy(temp, U); } } free(U); free(invU); free(temp); } __device__ void _eye(double *data) { data[0] = 1; data[1] = 0; data[2] = 0; data[3] = 0; data[4] = 0; data[5] = 1; data[6] = 0; data[7] = 0; data[8] = 0; data[9] = 0; data[10] = 1; data[11] = 0; data[12] = 0; data[13] = 0; data[14] = 0; data[15] = 1; } __device__ void copy(double *A, double *B) { // copy A into B B[0] = A[0]; B[1] = A[1]; B[2] = A[2]; B[3] = A[3]; B[4] = A[4]; B[5] = A[5]; B[6] = A[6]; B[7] = A[7]; B[8] = A[8]; B[9] = A[9]; B[10] = A[10]; B[11] = A[11]; B[12] = A[12]; B[13] = A[13]; B[14] = A[14]; B[15] = A[15]; } __device__ void mult(double *A, double *B, double *C) { const int N = 4; int i, j, k; double num; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { num = 0; for (k = 0; k < N; k++) { num += A[i * N + k] * B[k * N + j]; } C[i * N + j] = num; } } } __device__ int _inv(double *m, double *invOut) { double *inv = (double*) malloc(sizeof(double) * 16); double det; int i; inv[0] = m[5] * m[10] * m[15] - m[5] * m[11] * m[14] - m[9] * m[6] * m[15] + m[9] * m[7] * m[14] + m[13] * m[6] * m[11] - m[13] * m[7] * m[10]; inv[4] = -m[4] * m[10] * m[15] + m[4] * m[11] * m[14] + m[8] * m[6] * m[15] - m[8] * m[7] * m[14] - m[12] * m[6] * m[11] + m[12] * m[7] * m[10]; inv[8] = m[4] * m[9] * m[15] - m[4] * m[11] * m[13] - m[8] * m[5] * m[15] + m[8] * m[7] * m[13] + m[12] * m[5] * m[11] - m[12] * m[7] * m[9]; inv[12] = -m[4] * m[9] * m[14] + m[4] * m[10] * m[13] + m[8] * m[5] * m[14] - m[8] * m[6] * m[13] - m[12] * m[5] * m[10] + m[12] * m[6] * m[9]; inv[1] = -m[1] * m[10] * m[15] + m[1] * m[11] * m[14] + m[9] * m[2] * m[15] - m[9] * m[3] * m[14] - m[13] * m[2] * m[11] + m[13] * m[3] * m[10]; inv[5] = m[0] * m[10] * m[15] - m[0] * m[11] * m[14] - m[8] * m[2] * m[15] + m[8] * m[3] * m[14] + m[12] * m[2] * m[11] - m[12] * m[3] * m[10]; inv[9] = -m[0] * m[9] * m[15] + m[0] * m[11] * m[13] + m[8] * m[1] * m[15] - m[8] * m[3] * m[13] - m[12] * m[1] * m[11] + m[12] * m[3] * m[9]; inv[13] = m[0] * m[9] * m[14] - m[0] * m[10] * m[13] - m[8] * m[1] * m[14] + m[8] * m[2] * m[13] + m[12] * m[1] * m[10] - m[12] * m[2] * m[9]; inv[2] = m[1] * m[6] * m[15] - m[1] * m[7] * m[14] - m[5] * m[2] * m[15] + m[5] * m[3] * m[14] + m[13] * m[2] * m[7] - m[13] * m[3] * m[6]; inv[6] = -m[0] * m[6] * m[15] + m[0] * m[7] * m[14] + m[4] * m[2] * m[15] - m[4] * m[3] * m[14] - m[12] * m[2] * m[7] + m[12] * m[3] * m[6]; inv[10] = m[0] * m[5] * m[15] - m[0] * m[7] * m[13] - m[4] * m[1] * m[15] + m[4] * m[3] * m[13] + m[12] * m[1] * m[7] - m[12] * m[3] * m[5]; inv[14] = -m[0] * m[5] * m[14] + m[0] * m[6] * m[13] + m[4] * m[1] * m[14] - m[4] * m[2] * m[13] - m[12] * m[1] * m[6] + m[12] * m[2] * m[5]; inv[3] = -m[1] * m[6] * m[11] + m[1] * m[7] * m[10] + m[5] * m[2] * m[11] - m[5] * m[3] * m[10] - m[9] * m[2] * m[7] + m[9] * m[3] * m[6]; inv[7] = m[0] * m[6] * m[11] - m[0] * m[7] * m[10] - m[4] * m[2] * m[11] + m[4] * m[3] * m[10] + m[8] * m[2] * m[7] - m[8] * m[3] * m[6]; inv[11] = -m[0] * m[5] * m[11] + m[0] * m[7] * m[9] + m[4] * m[1] * m[11] - m[4] * m[3] * m[9] - m[8] * m[1] * m[7] + m[8] * m[3] * m[5]; inv[15] = m[0] * m[5] * m[10] - m[0] * m[6] * m[9] - m[4] * m[1] * m[10] + m[4] * m[2] * m[9] + m[8] * m[1] * m[6] - m[8] * m[2] * m[5]; det = m[0] * inv[0] + m[1] * inv[4] + m[2] * inv[8] + m[3] * inv[12]; if (det == 0) { free(inv); return 0; } det = 1.0 / det; for (i = 0; i < 16; i++) invOut[i] = inv[i] * det; free(inv); return 1; } extern "C"{ /* * Params * T: double(N, 4, 4) the final transform matrix of all points (shared) * tool: double(N, 4, 4) the tool transform matrix of all points (shared) * nlinks_pt: long(N,): the number of links associated with each (shared) * link_A: double(N, max_nlinks, 4, 4) the transformation matrix of all joints * link_axes: long(max_nlinks, ): axes of all links * link_isjoint: long(max_nlinks, ): 1/0 whether links are joints * N: (int) number of points * max_nlinks: (int) max number of links on the path * njoints: (int) number of joints * out: (N, 6, njoints) */ void jacob0(double *T, double *tool, double *etool, double *link_A, long *nlinks_pt, long *link_axes, long *link_isjoint, int N, int max_nlinks, int njoints, double *out) { int block_size = 768; int grid_size = ((N + block_size) / block_size); // printf("Block size %d N %d gid size %d\n", block_size, N, grid_size); double *d_T, *d_tool, *d_etool, *d_link_A; long *d_link_axes, *d_link_isjoint, *d_nlinks_pt; double *d_out; hipMalloc((void**)&d_T, sizeof(double) * N * 16); hipMalloc((void**)&d_tool, sizeof(double) * N * 16); hipMalloc((void**)&d_etool, sizeof(double) * N * 16); hipMalloc((void**)&d_link_A, sizeof(double) * N * max_nlinks * 16); hipMalloc((void**)&d_nlinks_pt, sizeof(long) * N); hipMalloc((void**)&d_link_axes, sizeof(long) * max_nlinks); hipMalloc((void**)&d_link_isjoint, sizeof(long) * max_nlinks); hipMalloc((void**)&d_out, sizeof(double) * N * 6 * njoints); // Transfer data from host to device memory hipMemcpy(d_T, T, sizeof(double) * N * 16, hipMemcpyHostToDevice); hipMemcpy(d_tool, tool, sizeof(double) * N * 16, hipMemcpyHostToDevice); hipMemcpy(d_etool, etool, sizeof(double) * N * 16, hipMemcpyHostToDevice); hipMemcpy(d_link_A, link_A, sizeof(double) * N * max_nlinks * 16, hipMemcpyHostToDevice); hipMemcpy(d_nlinks_pt, nlinks_pt, sizeof(long) * N, hipMemcpyHostToDevice); hipMemcpy(d_link_axes, link_axes, sizeof(long) * max_nlinks, hipMemcpyHostToDevice); hipMemcpy(d_link_isjoint, link_isjoint, sizeof(long) * max_nlinks, hipMemcpyHostToDevice); hipMemcpy(d_out, out, sizeof(double) * N * 6 * njoints, hipMemcpyHostToDevice); hipLaunchKernelGGL(( _jacob0), dim3(grid_size),dim3(block_size), 0, 0, d_T, d_tool, d_etool, d_link_A, d_nlinks_pt, d_link_axes, d_link_isjoint, N, max_nlinks, njoints, d_out); hipError_t cudaerr = hipDeviceSynchronize(); // if (cudaerr != hipSuccess) // printf("kernel launch failed with error \"%s\".\n", // hipGetErrorString(cudaerr)); // memset(out, 1, N * 6 * njoints); // out[0] = 1; hipMemcpy(out, d_out, sizeof(double) * N * 6 * njoints, hipMemcpyDeviceToHost); // Deallocate device memory hipFree(d_T); hipFree(d_tool); hipFree(d_nlinks_pt); hipFree(d_etool); hipFree(d_link_A); hipFree(d_link_axes); hipFree(d_link_isjoint); hipFree(d_out); } }//extern "C"
deebf5cd8f6052fc79b83809b32ac0b6ba753d2a.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> __device__ int _inv(double *m, double *invOut); __device__ void mult(double *A, double *B, double *C); __device__ void copy(double *A, double *B); __device__ void _eye(double *data); // TODO: device level link class // TODO: block >= 2048 error /* * Params * T: double(N, 4, 4) the final transform matrix of all points (shared) * tool: double(N, 4, 4) the tool transform matrix of all points (shared) * nlinks_pt: long(N,): the number of links associated with each (shared) * link_A: double(N, max_nlinks, 4, 4) the transformation matrix of all joints * link_axes: long(max_nlinks, ): axes of all links * link_isjoint: long(max_nlinks, ): 1/0 whether links are joints * N: (int) number of points * njoints: (int) number of joints * out: (N, 6, njoints) */ __global__ void _jacob0(double *T, double *tool, double *etool, double *link_A, long *nlinks_pt, long *link_axes, long *link_isjoint, int N, int max_nlinks, int njoints, double *out) { int tid = blockIdx.x * blockDim.x + threadIdx.x; double *T_i; double *tool_i; double *U; double *temp; double *etool_i; double *invU; double *link_iA; U = (double*) malloc(sizeof(double) * 16); invU = (double*) malloc(sizeof(double) * 16); temp = (double*) malloc(sizeof(double) * 16); int j = 0; tool_i = &tool[tid * 16]; etool_i = &etool[tid * 16]; _eye(U); T_i = &T[tid * 16]; if (tid >= N) { free(U); free(invU); free(temp); return; } long nlinks = nlinks_pt[tid]; double *link_A_tid = &link_A[tid * max_nlinks * 4 * 4]; // printf("Hello from tid %d nlinks %ld\n", tid, nlinks); for (int i = 0; i < nlinks; i++) { // printf("Hello from tid %d link_i %d link_axis %ld isjoint %ld \n", tid, i, link_axes[i], link_isjoint[i]); if (link_isjoint[i] == 1) { link_iA = &link_A_tid[i * 16]; mult(U, link_iA, temp); copy(temp, U); if (i == nlinks - 1) { mult(U, etool_i, temp); copy(temp, U); mult(U, tool_i, temp); copy(temp , U); } _inv(U, invU); mult(invU, T_i, temp); double *out_tid = &out[tid * 6 * njoints]; if (link_axes[i] == 0) { out_tid[0 * njoints + j] = U[0 * 4 + 2] * temp[1 * 4 + 3] - U[0 * 4 + 1] * temp[2 * 4 + 3]; out_tid[1 * njoints + j] = U[1 * 4 + 2] * temp[1 * 4 + 3] - U[1 * 4 + 1] * temp[2 * 4 + 3]; out_tid[2 * njoints + j] = U[2 * 4 + 2] * temp[1 * 4 + 3] - U[2 * 4 + 1] * temp[2 * 4 + 3]; out_tid[3 * njoints + j] = U[0 * 4 + 2]; out_tid[4 * njoints + j] = U[1 * 4 + 2]; out_tid[5 * njoints + j] = U[2 * 4 + 2]; } else if (link_axes[i] == 1) { out_tid[0 * njoints + j] = U[0 * 4 + 0] * temp[2 * 4 + 3] - U[0 * 4 + 2] * temp[0 * 4 + 3]; out_tid[1 * njoints + j] = U[1 * 4 + 0] * temp[2 * 4 + 3] - U[1 * 4 + 2] * temp[0 * 4 + 3]; out_tid[2 * njoints + j] = U[2 * 4 + 0] * temp[2 * 4 + 3] - U[2 * 4 + 2] * temp[0 * 4 + 3]; out_tid[3 * njoints + j] = U[0 * 4 + 1]; out_tid[4 * njoints + j] = U[1 * 4 + 1]; out_tid[5 * njoints + j] = U[2 * 4 + 1]; } else if (link_axes[i] == 2) { out_tid[0 * njoints + j] = U[0 * 4 + 1] * temp[0 * 4 + 3] - U[0 * 4 + 0] * temp[1 * 4 + 3]; out_tid[1 * njoints + j] = U[1 * 4 + 1] * temp[0 * 4 + 3] - U[1 * 4 + 0] * temp[1 * 4 + 3]; out_tid[2 * njoints + j] = U[2 * 4 + 1] * temp[0 * 4 + 3] - U[2 * 4 + 0] * temp[1 * 4 + 3]; out_tid[3 * njoints + j] = U[0 * 4 + 2]; out_tid[4 * njoints + j] = U[1 * 4 + 2]; out_tid[5 * njoints + j] = U[2 * 4 + 2]; } else if (link_axes[i] == 3) { out_tid[0 * njoints + j] = U[0 * 4 + 0]; out_tid[1 * njoints + j] = U[1 * 4 + 0]; out_tid[2 * njoints + j] = U[2 * 4 + 0]; out_tid[3 * njoints + j] = 0.0; out_tid[4 * njoints + j] = 0.0; out_tid[5 * njoints + j] = 0.0; } else if (link_axes[i] == 4) { out_tid[0 * njoints + j] = U[0 * 4 + 1]; out_tid[1 * njoints + j] = U[1 * 4 + 1]; out_tid[2 * njoints + j] = U[2 * 4 + 1]; out_tid[3 * njoints + j] = 0.0; out_tid[4 * njoints + j] = 0.0; out_tid[5 * njoints + j] = 0.0; } else if (link_axes[i] == 5) { out_tid[0 * njoints + j] = U[0 * 4 + 2]; out_tid[1 * njoints + j] = U[1 * 4 + 2]; out_tid[2 * njoints + j] = U[2 * 4 + 2]; out_tid[3 * njoints + j] = 0.0; out_tid[4 * njoints + j] = 0.0; out_tid[5 * njoints + j] = 0.0; } j++; } else { link_iA = &link_A_tid[i * 16]; mult(U, link_iA, temp); copy(temp, U); } } free(U); free(invU); free(temp); } __device__ void _eye(double *data) { data[0] = 1; data[1] = 0; data[2] = 0; data[3] = 0; data[4] = 0; data[5] = 1; data[6] = 0; data[7] = 0; data[8] = 0; data[9] = 0; data[10] = 1; data[11] = 0; data[12] = 0; data[13] = 0; data[14] = 0; data[15] = 1; } __device__ void copy(double *A, double *B) { // copy A into B B[0] = A[0]; B[1] = A[1]; B[2] = A[2]; B[3] = A[3]; B[4] = A[4]; B[5] = A[5]; B[6] = A[6]; B[7] = A[7]; B[8] = A[8]; B[9] = A[9]; B[10] = A[10]; B[11] = A[11]; B[12] = A[12]; B[13] = A[13]; B[14] = A[14]; B[15] = A[15]; } __device__ void mult(double *A, double *B, double *C) { const int N = 4; int i, j, k; double num; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { num = 0; for (k = 0; k < N; k++) { num += A[i * N + k] * B[k * N + j]; } C[i * N + j] = num; } } } __device__ int _inv(double *m, double *invOut) { double *inv = (double*) malloc(sizeof(double) * 16); double det; int i; inv[0] = m[5] * m[10] * m[15] - m[5] * m[11] * m[14] - m[9] * m[6] * m[15] + m[9] * m[7] * m[14] + m[13] * m[6] * m[11] - m[13] * m[7] * m[10]; inv[4] = -m[4] * m[10] * m[15] + m[4] * m[11] * m[14] + m[8] * m[6] * m[15] - m[8] * m[7] * m[14] - m[12] * m[6] * m[11] + m[12] * m[7] * m[10]; inv[8] = m[4] * m[9] * m[15] - m[4] * m[11] * m[13] - m[8] * m[5] * m[15] + m[8] * m[7] * m[13] + m[12] * m[5] * m[11] - m[12] * m[7] * m[9]; inv[12] = -m[4] * m[9] * m[14] + m[4] * m[10] * m[13] + m[8] * m[5] * m[14] - m[8] * m[6] * m[13] - m[12] * m[5] * m[10] + m[12] * m[6] * m[9]; inv[1] = -m[1] * m[10] * m[15] + m[1] * m[11] * m[14] + m[9] * m[2] * m[15] - m[9] * m[3] * m[14] - m[13] * m[2] * m[11] + m[13] * m[3] * m[10]; inv[5] = m[0] * m[10] * m[15] - m[0] * m[11] * m[14] - m[8] * m[2] * m[15] + m[8] * m[3] * m[14] + m[12] * m[2] * m[11] - m[12] * m[3] * m[10]; inv[9] = -m[0] * m[9] * m[15] + m[0] * m[11] * m[13] + m[8] * m[1] * m[15] - m[8] * m[3] * m[13] - m[12] * m[1] * m[11] + m[12] * m[3] * m[9]; inv[13] = m[0] * m[9] * m[14] - m[0] * m[10] * m[13] - m[8] * m[1] * m[14] + m[8] * m[2] * m[13] + m[12] * m[1] * m[10] - m[12] * m[2] * m[9]; inv[2] = m[1] * m[6] * m[15] - m[1] * m[7] * m[14] - m[5] * m[2] * m[15] + m[5] * m[3] * m[14] + m[13] * m[2] * m[7] - m[13] * m[3] * m[6]; inv[6] = -m[0] * m[6] * m[15] + m[0] * m[7] * m[14] + m[4] * m[2] * m[15] - m[4] * m[3] * m[14] - m[12] * m[2] * m[7] + m[12] * m[3] * m[6]; inv[10] = m[0] * m[5] * m[15] - m[0] * m[7] * m[13] - m[4] * m[1] * m[15] + m[4] * m[3] * m[13] + m[12] * m[1] * m[7] - m[12] * m[3] * m[5]; inv[14] = -m[0] * m[5] * m[14] + m[0] * m[6] * m[13] + m[4] * m[1] * m[14] - m[4] * m[2] * m[13] - m[12] * m[1] * m[6] + m[12] * m[2] * m[5]; inv[3] = -m[1] * m[6] * m[11] + m[1] * m[7] * m[10] + m[5] * m[2] * m[11] - m[5] * m[3] * m[10] - m[9] * m[2] * m[7] + m[9] * m[3] * m[6]; inv[7] = m[0] * m[6] * m[11] - m[0] * m[7] * m[10] - m[4] * m[2] * m[11] + m[4] * m[3] * m[10] + m[8] * m[2] * m[7] - m[8] * m[3] * m[6]; inv[11] = -m[0] * m[5] * m[11] + m[0] * m[7] * m[9] + m[4] * m[1] * m[11] - m[4] * m[3] * m[9] - m[8] * m[1] * m[7] + m[8] * m[3] * m[5]; inv[15] = m[0] * m[5] * m[10] - m[0] * m[6] * m[9] - m[4] * m[1] * m[10] + m[4] * m[2] * m[9] + m[8] * m[1] * m[6] - m[8] * m[2] * m[5]; det = m[0] * inv[0] + m[1] * inv[4] + m[2] * inv[8] + m[3] * inv[12]; if (det == 0) { free(inv); return 0; } det = 1.0 / det; for (i = 0; i < 16; i++) invOut[i] = inv[i] * det; free(inv); return 1; } extern "C"{ /* * Params * T: double(N, 4, 4) the final transform matrix of all points (shared) * tool: double(N, 4, 4) the tool transform matrix of all points (shared) * nlinks_pt: long(N,): the number of links associated with each (shared) * link_A: double(N, max_nlinks, 4, 4) the transformation matrix of all joints * link_axes: long(max_nlinks, ): axes of all links * link_isjoint: long(max_nlinks, ): 1/0 whether links are joints * N: (int) number of points * max_nlinks: (int) max number of links on the path * njoints: (int) number of joints * out: (N, 6, njoints) */ void jacob0(double *T, double *tool, double *etool, double *link_A, long *nlinks_pt, long *link_axes, long *link_isjoint, int N, int max_nlinks, int njoints, double *out) { int block_size = 768; int grid_size = ((N + block_size) / block_size); // printf("Block size %d N %d gid size %d\n", block_size, N, grid_size); double *d_T, *d_tool, *d_etool, *d_link_A; long *d_link_axes, *d_link_isjoint, *d_nlinks_pt; double *d_out; cudaMalloc((void**)&d_T, sizeof(double) * N * 16); cudaMalloc((void**)&d_tool, sizeof(double) * N * 16); cudaMalloc((void**)&d_etool, sizeof(double) * N * 16); cudaMalloc((void**)&d_link_A, sizeof(double) * N * max_nlinks * 16); cudaMalloc((void**)&d_nlinks_pt, sizeof(long) * N); cudaMalloc((void**)&d_link_axes, sizeof(long) * max_nlinks); cudaMalloc((void**)&d_link_isjoint, sizeof(long) * max_nlinks); cudaMalloc((void**)&d_out, sizeof(double) * N * 6 * njoints); // Transfer data from host to device memory cudaMemcpy(d_T, T, sizeof(double) * N * 16, cudaMemcpyHostToDevice); cudaMemcpy(d_tool, tool, sizeof(double) * N * 16, cudaMemcpyHostToDevice); cudaMemcpy(d_etool, etool, sizeof(double) * N * 16, cudaMemcpyHostToDevice); cudaMemcpy(d_link_A, link_A, sizeof(double) * N * max_nlinks * 16, cudaMemcpyHostToDevice); cudaMemcpy(d_nlinks_pt, nlinks_pt, sizeof(long) * N, cudaMemcpyHostToDevice); cudaMemcpy(d_link_axes, link_axes, sizeof(long) * max_nlinks, cudaMemcpyHostToDevice); cudaMemcpy(d_link_isjoint, link_isjoint, sizeof(long) * max_nlinks, cudaMemcpyHostToDevice); cudaMemcpy(d_out, out, sizeof(double) * N * 6 * njoints, cudaMemcpyHostToDevice); _jacob0<<<grid_size,block_size>>>(d_T, d_tool, d_etool, d_link_A, d_nlinks_pt, d_link_axes, d_link_isjoint, N, max_nlinks, njoints, d_out); cudaError_t cudaerr = cudaDeviceSynchronize(); // if (cudaerr != cudaSuccess) // printf("kernel launch failed with error \"%s\".\n", // cudaGetErrorString(cudaerr)); // memset(out, 1, N * 6 * njoints); // out[0] = 1; cudaMemcpy(out, d_out, sizeof(double) * N * 6 * njoints, cudaMemcpyDeviceToHost); // Deallocate device memory cudaFree(d_T); cudaFree(d_tool); cudaFree(d_nlinks_pt); cudaFree(d_etool); cudaFree(d_link_A); cudaFree(d_link_axes); cudaFree(d_link_isjoint); cudaFree(d_out); } }//extern "C"
de62d99bb87b198566519ff6137a508f66efa268.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/inference/tensorrt/plugin/transformer_input_convert_plugin.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { __global__ void TransformerInputConvertKernel(const int64_t* input, int32_t* output0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int32_t shared_data; if (threadIdx.x == static_cast<int>(input[tid])) { atomicAdd(&shared_data, 1); } output0[0] = 0; output0[blockIdx.x + 1] = shared_data; __syncthreads(); for (int i = 0; i < blockDim.x; ++i) { output0[i + 1] += output0[i]; } } nvinfer1::DataType TransformerInputConvertPlugin::getOutputDataType( int index, const nvinfer1::DataType* input_types, int nb_inputs) const TRT_NOEXCEPT { return nvinfer1::DataType::kINT32; } nvinfer1::DimsExprs TransformerInputConvertPlugin::getOutputDimensions( int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT { nvinfer1::DimsExprs output_dims{}; output_dims.nbDims = 1; if (outputIndex == 0) { // PosId const auto* one = exprBuilder.constant(1); output_dims.d[0] = exprBuilder.operation( nvinfer1::DimensionOperation::kSUM, *inputs[0].d[0], *one); } else { // MaxSeqlen output_dims.d[0] = inputs[0].d[1]; } return output_dims; } bool TransformerInputConvertPlugin::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs) TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(nbInputs, 1, platform::errors::InvalidArgument("Must have 1 inputs, " "but got %d input(s). ", nbInputs)); PADDLE_ENFORCE_EQ(nbOutputs, getNbOutputs(), platform::errors::InvalidArgument("Must have 2 output, " "but got %d output(s). ", nbOutputs)); if (pos == 0) { // input return inOut[pos].format == nvinfer1::TensorFormat::kLINEAR; } else { // output0, output1 return inOut[pos].type == nvinfer1::DataType::kINT32 && inOut[pos].format == nvinfer1::TensorFormat::kLINEAR; } } void TransformerInputConvertPlugin::configurePlugin( const nvinfer1::DynamicPluginTensorDesc* inputs, int nbInputs, const nvinfer1::DynamicPluginTensorDesc* outputs, int nbOutputs) TRT_NOEXCEPT {} void TransformerInputConvertPlugin::attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, nvinfer1::IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {} void TransformerInputConvertPlugin::detachFromContext() TRT_NOEXCEPT {} void TransformerInputConvertPlugin::terminate() TRT_NOEXCEPT {} int TransformerInputConvertPlugin::enqueue( const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) TRT_NOEXCEPT { const auto input_desc = inputDesc[0]; const int64_t* input = static_cast<const int64_t*>(inputs[0]); int32_t* output0 = static_cast<int32_t*>(outputs[0]); // PosId // int32_t* output1 = static_cast<int32_t*>(outputs[1]); // MaxSeqlen const int32_t num_blocks = input_desc.dims.d[0]; // batchs const int32_t num_threads = input_desc.dims.d[1]; // max sequnce length hipLaunchKernelGGL(( TransformerInputConvertKernel), dim3(num_blocks), dim3(num_threads), 0, stream, input, output0); return hipGetLastError() != hipSuccess; } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
de62d99bb87b198566519ff6137a508f66efa268.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/inference/tensorrt/plugin/transformer_input_convert_plugin.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { __global__ void TransformerInputConvertKernel(const int64_t* input, int32_t* output0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int32_t shared_data; if (threadIdx.x == static_cast<int>(input[tid])) { atomicAdd(&shared_data, 1); } output0[0] = 0; output0[blockIdx.x + 1] = shared_data; __syncthreads(); for (int i = 0; i < blockDim.x; ++i) { output0[i + 1] += output0[i]; } } nvinfer1::DataType TransformerInputConvertPlugin::getOutputDataType( int index, const nvinfer1::DataType* input_types, int nb_inputs) const TRT_NOEXCEPT { return nvinfer1::DataType::kINT32; } nvinfer1::DimsExprs TransformerInputConvertPlugin::getOutputDimensions( int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT { nvinfer1::DimsExprs output_dims{}; output_dims.nbDims = 1; if (outputIndex == 0) { // PosId const auto* one = exprBuilder.constant(1); output_dims.d[0] = exprBuilder.operation( nvinfer1::DimensionOperation::kSUM, *inputs[0].d[0], *one); } else { // MaxSeqlen output_dims.d[0] = inputs[0].d[1]; } return output_dims; } bool TransformerInputConvertPlugin::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs) TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(nbInputs, 1, platform::errors::InvalidArgument("Must have 1 inputs, " "but got %d input(s). ", nbInputs)); PADDLE_ENFORCE_EQ(nbOutputs, getNbOutputs(), platform::errors::InvalidArgument("Must have 2 output, " "but got %d output(s). ", nbOutputs)); if (pos == 0) { // input return inOut[pos].format == nvinfer1::TensorFormat::kLINEAR; } else { // output0, output1 return inOut[pos].type == nvinfer1::DataType::kINT32 && inOut[pos].format == nvinfer1::TensorFormat::kLINEAR; } } void TransformerInputConvertPlugin::configurePlugin( const nvinfer1::DynamicPluginTensorDesc* inputs, int nbInputs, const nvinfer1::DynamicPluginTensorDesc* outputs, int nbOutputs) TRT_NOEXCEPT {} void TransformerInputConvertPlugin::attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, nvinfer1::IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {} void TransformerInputConvertPlugin::detachFromContext() TRT_NOEXCEPT {} void TransformerInputConvertPlugin::terminate() TRT_NOEXCEPT {} int TransformerInputConvertPlugin::enqueue( const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT { const auto input_desc = inputDesc[0]; const int64_t* input = static_cast<const int64_t*>(inputs[0]); int32_t* output0 = static_cast<int32_t*>(outputs[0]); // PosId // int32_t* output1 = static_cast<int32_t*>(outputs[1]); // MaxSeqlen const int32_t num_blocks = input_desc.dims.d[0]; // batchs const int32_t num_threads = input_desc.dims.d[1]; // max sequnce length TransformerInputConvertKernel<<<num_blocks, num_threads, 0, stream>>>( input, output0); return cudaGetLastError() != cudaSuccess; } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
5c50f4b6b0324b41fcd0e951fb87ebbc6bba7be2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/device_vector.h> #include "rism3d.h" double RISM3D :: cal_euv () { __global__ void euv(double * ds, double2 * dhuv, double * du, double * de, double q); double euv0 = 0.0 ; for (int iv = 0; iv < sv -> natv; ++iv) { hipLaunchKernelGGL(( euv) , dim3(g), dim3(b), b.x * sizeof(double) , 0, ds, dguv + (iv * ce -> ngrid), du + (iv * ce -> ngrid), de, sv -> qv[iv]); thrust::device_ptr<double> ds_ptr(ds); double s = thrust::reduce(ds_ptr, ds_ptr + g.x * g.y); euv0 += s * sv -> rhov[iv]; } euv0 *= ce -> dv; return (euv0); } __global__ void euv(double * ds, double2 * dguv, double * du, double * de, double q) { extern __shared__ double sdata[]; unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; sdata[threadIdx.x] = dguv[ip].x * (du[ip] + de[ip] * q); __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { if (threadIdx.x < s) { sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x < 32) { volatile double *smem = sdata; smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 1]; } if (threadIdx.x == 0) ds[blockIdx.x + blockIdx.y * gridDim.x] = sdata[0]; }
5c50f4b6b0324b41fcd0e951fb87ebbc6bba7be2.cu
#include <thrust/device_vector.h> #include "rism3d.h" double RISM3D :: cal_euv () { __global__ void euv(double * ds, double2 * dhuv, double * du, double * de, double q); double euv0 = 0.0 ; for (int iv = 0; iv < sv -> natv; ++iv) { euv <<< g, b, b.x * sizeof(double) >>> (ds, dguv + (iv * ce -> ngrid), du + (iv * ce -> ngrid), de, sv -> qv[iv]); thrust::device_ptr<double> ds_ptr(ds); double s = thrust::reduce(ds_ptr, ds_ptr + g.x * g.y); euv0 += s * sv -> rhov[iv]; } euv0 *= ce -> dv; return (euv0); } __global__ void euv(double * ds, double2 * dguv, double * du, double * de, double q) { extern __shared__ double sdata[]; unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; sdata[threadIdx.x] = dguv[ip].x * (du[ip] + de[ip] * q); __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { if (threadIdx.x < s) { sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x < 32) { volatile double *smem = sdata; smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 1]; } if (threadIdx.x == 0) ds[blockIdx.x + blockIdx.y * gridDim.x] = sdata[0]; }
4ea54f1ebc8304027be6da1438ac43079a4ff5d6.hip
// !!! This is a file automatically generated by hipify!!! /** * Name : Veerakumar Natarajan * Student Id: 200208042 * * 2d convolution program */ #include <stdio.h> #include <fstream> #include <sstream> #include <stdlib.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> /** * CUDA Kernel Device code * * Computes the 2d convolution of A and B into C. */ __global__ void convol(float *A, float *B, float *C, int row_a, int row_b, int row_c, int col_a, int col_b, int col_c) { int x = blockDim.y * blockIdx.y + threadIdx.y; int y = blockDim.x * blockIdx.x + threadIdx.x; C[x * col_c + y] = 0.0; for(int i = 0; i < row_b; i++) { for(int j = 0; j < col_b; j++) { if(((x - i) < row_a && (x - i) >= 0) && ((y - j) < col_a && (y - j) >= 0)) C[x * col_c + y] += B[i * col_b + j] * A[(x - i) * col_a + (y - j)]; } } } /** * Host main routine */ int main(int argc, char *argv[]) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; float *h_A, *h_B, *h_C, tmp; int row_a, row_b, row_c, col_a, col_b, col_c; int a_matrix = 1; int i, j; int size_a, size_b, size_c; std::ifstream file(argv[1]); std::string row; row_a=row_b=row_c=col_a=col_b=col_c=0; // Finding size of matrix A and matrix B while(std::getline(file, row)) { if(row.empty()) a_matrix = 0; std::istringstream iss(row); if(a_matrix == 1) { col_a=0; while(iss.good()) { iss >> tmp; col_a++; } row_a++; } else { if(!row.empty()) { col_b=0; while(iss.good()) { iss >> tmp; col_b++; } row_b++; } } } row_c = row_a + row_b - 1; col_c = col_a + col_b - 1; // Calculating size of matrix A, B and C size_a = row_a * col_a; size_b = row_b * col_b; size_c = row_c * col_c; // Allocate the host input vector A, B h_A = (float *)malloc(size_a * sizeof(float)); h_B = (float *)malloc(size_b * sizeof(float)); // Allocate the host output vector h_C = (float *)malloc(size_c * sizeof(float)); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Reading value of matrix A and B from input file std::ifstream file1(argv[1]); a_matrix = 1; i = j = 0; while(std::getline(file1, row)) { if(row.empty()) a_matrix = 0; std::istringstream iss1(row); if(a_matrix == 1){ while(iss1.good()) { iss1 >> tmp; h_A[i] = tmp; i++; } } else { if(!row.empty()) { while(iss1.good()) { iss1 >> tmp; h_B[j] = tmp; j++; } } } } // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size_a * sizeof(float)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size_b * sizeof(float)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size_c * sizeof(float)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory err = hipMemcpy(d_A, h_A, size_a * sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size_b * sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector 2dconvol CUDA Kernel dim3 dimBlock(row_c, col_c, 1); dim3 dimGrid(2, 2, 1); hipLaunchKernelGGL(( convol), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, row_a, row_b, row_c, col_a, col_b, col_c); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. err = hipMemcpy(h_C, d_C, size_c * sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } for(i = 0; i < row_c; i++) { for(j = 0; j < col_c; j++) { printf("%.3f ", h_C[i * col_c + j]); } printf("\n"); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
4ea54f1ebc8304027be6da1438ac43079a4ff5d6.cu
/** * Name : Veerakumar Natarajan * Student Id: 200208042 * * 2d convolution program */ #include <stdio.h> #include <fstream> #include <sstream> #include <stdlib.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes the 2d convolution of A and B into C. */ __global__ void convol(float *A, float *B, float *C, int row_a, int row_b, int row_c, int col_a, int col_b, int col_c) { int x = blockDim.y * blockIdx.y + threadIdx.y; int y = blockDim.x * blockIdx.x + threadIdx.x; C[x * col_c + y] = 0.0; for(int i = 0; i < row_b; i++) { for(int j = 0; j < col_b; j++) { if(((x - i) < row_a && (x - i) >= 0) && ((y - j) < col_a && (y - j) >= 0)) C[x * col_c + y] += B[i * col_b + j] * A[(x - i) * col_a + (y - j)]; } } } /** * Host main routine */ int main(int argc, char *argv[]) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; float *h_A, *h_B, *h_C, tmp; int row_a, row_b, row_c, col_a, col_b, col_c; int a_matrix = 1; int i, j; int size_a, size_b, size_c; std::ifstream file(argv[1]); std::string row; row_a=row_b=row_c=col_a=col_b=col_c=0; // Finding size of matrix A and matrix B while(std::getline(file, row)) { if(row.empty()) a_matrix = 0; std::istringstream iss(row); if(a_matrix == 1) { col_a=0; while(iss.good()) { iss >> tmp; col_a++; } row_a++; } else { if(!row.empty()) { col_b=0; while(iss.good()) { iss >> tmp; col_b++; } row_b++; } } } row_c = row_a + row_b - 1; col_c = col_a + col_b - 1; // Calculating size of matrix A, B and C size_a = row_a * col_a; size_b = row_b * col_b; size_c = row_c * col_c; // Allocate the host input vector A, B h_A = (float *)malloc(size_a * sizeof(float)); h_B = (float *)malloc(size_b * sizeof(float)); // Allocate the host output vector h_C = (float *)malloc(size_c * sizeof(float)); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Reading value of matrix A and B from input file std::ifstream file1(argv[1]); a_matrix = 1; i = j = 0; while(std::getline(file1, row)) { if(row.empty()) a_matrix = 0; std::istringstream iss1(row); if(a_matrix == 1){ while(iss1.good()) { iss1 >> tmp; h_A[i] = tmp; i++; } } else { if(!row.empty()) { while(iss1.good()) { iss1 >> tmp; h_B[j] = tmp; j++; } } } } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size_a * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size_b * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size_c * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory err = cudaMemcpy(d_A, h_A, size_a * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size_b * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector 2dconvol CUDA Kernel dim3 dimBlock(row_c, col_c, 1); dim3 dimGrid(2, 2, 1); convol<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, row_a, row_b, row_c, col_a, col_b, col_c); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. err = cudaMemcpy(h_C, d_C, size_c * sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } for(i = 0; i < row_c; i++) { for(j = 0; j < col_c; j++) { printf("%.3f ", h_C[i * col_c + j]); } printf("\n"); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
b4182d61c37c19ee5fce9a6d18dd637bf3a82fbd.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // This example shows how to use the clock function to measure the performance of // a kernel accurately. // // Blocks are executed in parallel and out of order. Since there's no synchronization // mechanism between blocks, we measure the clock once for each block. The clock // samples are written to device memory. // System includes #include <stdio.h> #include <stdint.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> // This kernel computes a standard parallel reduction and evaluates the // time it takes to do that for each block. The timing results are stored // in device memory. __global__ static void timedReduction(const float *input, float *output, clock_t *timer) { // __shared__ float shared[2 * blockDim.x]; extern __shared__ float shared[]; const int tid = threadIdx.x; const int bid = blockIdx.x; if (tid == 0) timer[bid] = clock(); // Copy input. shared[tid] = input[tid]; shared[tid + blockDim.x] = input[tid + blockDim.x]; // Perform reduction to find minimum. for (int d = blockDim.x; d > 0; d /= 2) { __syncthreads(); if (tid < d) { float f0 = shared[tid]; float f1 = shared[tid + d]; if (f1 < f0) { shared[tid] = f1; } } } // Write result. if (tid == 0) output[bid] = shared[0]; __syncthreads(); if (tid == 0) timer[bid+gridDim.x] = clock(); } // This example shows how to use the clock function to measure the performance of // a kernel accurately. // // Blocks are executed in parallel and out of order. Since there's no synchronization // mechanism between blocks, we measure the clock once for each block. The clock // samples are written to device memory. #define NUM_BLOCKS 64 #define NUM_THREADS 256 // It's interesting to change the number of blocks and the number of threads to // understand how to keep the hardware busy. // // Here are some numbers I get on my G80: // blocks - clocks // 1 - 3096 // 8 - 3232 // 16 - 3364 // 32 - 4615 // 64 - 9981 // // With less than 16 blocks some of the multiprocessors of the device are idle. With // more than 16 you are using all the multiprocessors, but there's only one block per // multiprocessor and that doesn't allow you to hide the latency of the memory. With // more than 32 the speed scales linearly. // Start the main CUDA Sample here int main(int argc, char **argv) { printf("CUDA Clock sample\n"); // This will pick the best possible CUDA capable device int dev = findCudaDevice(argc, (const char **)argv); float *dinput = NULL; float *doutput = NULL; clock_t *dtimer = NULL; clock_t timer[NUM_BLOCKS * 2]; float input[NUM_THREADS * 2]; for (int i = 0; i < NUM_THREADS * 2; i++) { input[i] = (float)i; } checkCudaErrors(hipMalloc((void **)&dinput, sizeof(float) * NUM_THREADS * 2)); checkCudaErrors(hipMalloc((void **)&doutput, sizeof(float) * NUM_BLOCKS)); checkCudaErrors(hipMalloc((void **)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2)); checkCudaErrors(hipMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( timedReduction), dim3(NUM_BLOCKS), dim3(NUM_THREADS), sizeof(float) * 2 *NUM_THREADS, 0, dinput, doutput, dtimer); checkCudaErrors(hipMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(dinput)); checkCudaErrors(hipFree(doutput)); checkCudaErrors(hipFree(dtimer)); // Compute the difference between the last block end and the first block start. clock_t minStart = timer[0]; clock_t maxEnd = timer[NUM_BLOCKS]; for (int i = 1; i < NUM_BLOCKS; i++) { minStart = timer[i] < minStart ? timer[i] : minStart; maxEnd = timer[NUM_BLOCKS+i] > maxEnd ? timer[NUM_BLOCKS+i] : maxEnd; } printf("Total clocks = %Lf\n", (long double)(maxEnd - minStart)); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); return EXIT_SUCCESS; }
b4182d61c37c19ee5fce9a6d18dd637bf3a82fbd.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // This example shows how to use the clock function to measure the performance of // a kernel accurately. // // Blocks are executed in parallel and out of order. Since there's no synchronization // mechanism between blocks, we measure the clock once for each block. The clock // samples are written to device memory. // System includes #include <stdio.h> #include <stdint.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> // This kernel computes a standard parallel reduction and evaluates the // time it takes to do that for each block. The timing results are stored // in device memory. __global__ static void timedReduction(const float *input, float *output, clock_t *timer) { // __shared__ float shared[2 * blockDim.x]; extern __shared__ float shared[]; const int tid = threadIdx.x; const int bid = blockIdx.x; if (tid == 0) timer[bid] = clock(); // Copy input. shared[tid] = input[tid]; shared[tid + blockDim.x] = input[tid + blockDim.x]; // Perform reduction to find minimum. for (int d = blockDim.x; d > 0; d /= 2) { __syncthreads(); if (tid < d) { float f0 = shared[tid]; float f1 = shared[tid + d]; if (f1 < f0) { shared[tid] = f1; } } } // Write result. if (tid == 0) output[bid] = shared[0]; __syncthreads(); if (tid == 0) timer[bid+gridDim.x] = clock(); } // This example shows how to use the clock function to measure the performance of // a kernel accurately. // // Blocks are executed in parallel and out of order. Since there's no synchronization // mechanism between blocks, we measure the clock once for each block. The clock // samples are written to device memory. #define NUM_BLOCKS 64 #define NUM_THREADS 256 // It's interesting to change the number of blocks and the number of threads to // understand how to keep the hardware busy. // // Here are some numbers I get on my G80: // blocks - clocks // 1 - 3096 // 8 - 3232 // 16 - 3364 // 32 - 4615 // 64 - 9981 // // With less than 16 blocks some of the multiprocessors of the device are idle. With // more than 16 you are using all the multiprocessors, but there's only one block per // multiprocessor and that doesn't allow you to hide the latency of the memory. With // more than 32 the speed scales linearly. // Start the main CUDA Sample here int main(int argc, char **argv) { printf("CUDA Clock sample\n"); // This will pick the best possible CUDA capable device int dev = findCudaDevice(argc, (const char **)argv); float *dinput = NULL; float *doutput = NULL; clock_t *dtimer = NULL; clock_t timer[NUM_BLOCKS * 2]; float input[NUM_THREADS * 2]; for (int i = 0; i < NUM_THREADS * 2; i++) { input[i] = (float)i; } checkCudaErrors(cudaMalloc((void **)&dinput, sizeof(float) * NUM_THREADS * 2)); checkCudaErrors(cudaMalloc((void **)&doutput, sizeof(float) * NUM_BLOCKS)); checkCudaErrors(cudaMalloc((void **)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2)); checkCudaErrors(cudaMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2, cudaMemcpyHostToDevice)); timedReduction<<<NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 *NUM_THREADS>>>(dinput, doutput, dtimer); checkCudaErrors(cudaMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(dinput)); checkCudaErrors(cudaFree(doutput)); checkCudaErrors(cudaFree(dtimer)); // Compute the difference between the last block end and the first block start. clock_t minStart = timer[0]; clock_t maxEnd = timer[NUM_BLOCKS]; for (int i = 1; i < NUM_BLOCKS; i++) { minStart = timer[i] < minStart ? timer[i] : minStart; maxEnd = timer[NUM_BLOCKS+i] > maxEnd ? timer[NUM_BLOCKS+i] : maxEnd; } printf("Total clocks = %Lf\n", (long double)(maxEnd - minStart)); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); return EXIT_SUCCESS; }
517d660374b9a006fe20e95a4b922c52742451bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <string> #include <fstream> #include <sstream> #include <hip/hip_complex.h> #include "kernel.h" #define _USE_MATH_DEFINES #include <math.h> #include <time.h> #include "device_launch_parameters.h" const int SIZE = 1024; const double MINFREQ = 1e-6; using namespace std; bool debug; struct dataInfo { double freqHigh; double freqLow; int NFreq; int NPorts; }; __global__ void VectorAdd(double *freq, hipComplex *Ahat, hipComplex *data, hipComplex *Poles, dataInfo *frequencyInfo, int *Apattern, int Ahat_size, int NComplexPoles, int NRealPoles) { double real=0; double imag=0; double denum=0; int poleNumb = 0; int NRow = (*frequencyInfo).NFreq; double s; int test = 0; int col = blockIdx.x; int row = threadIdx.x; if (col > 17) { poleNumb = col - 17; } else { poleNumb = col; } real = Poles[poleNumb].x; imag = Poles[poleNumb].y; s = 2 * M_PI*freq[row]; //real if (Apattern[col] == 1) { Ahat[col*NRow + row].x = -real / (pow(real, 2) + pow(s, 2)); Ahat[col*NRow + row].y = -s / (pow(real, 2) + pow(s, 2)); } else if (Apattern[col] == 2) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); Ahat[col*NRow + row].x = -2 * (real*(pow(real, 2) + pow(s, 2) + pow(imag, 2))) / denum; Ahat[col*NRow + row].y = -2 * (s *(pow(real, 2) + pow(s, 2) - pow(imag, 2))) / denum; } else if (Apattern[col] == 3) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); Ahat[(col)*NRow + row].x = (-2 * imag*(pow(real, 2) - pow(s, 2) + pow(imag, 2))) / denum; Ahat[(col)*NRow + row].y = (-4 * real*imag*s) / denum; } else if (Apattern[col] == 4) { Ahat[col*NRow + row].x = 1; Ahat[col*NRow + row].y = 0; } else if (Apattern[col] == -1) { denum = pow(real, 2) + pow(imag, 2) - 2 * imag*s + pow(s, 2); Ahat[col*NRow + row].x = (real*data[row].x - data[row].y*s + data[row].y*imag) / denum; Ahat[col*NRow + row].y = (s*data[row].x - data[row].x*imag + data[row].y*real) / denum; } else if (Apattern[col] == -2) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); Ahat[col*NRow + row].x = 2 * (pow(real, 3)*data[row].x - pow(real, 2)*data[row].y*s + real* data[row].x*pow(s, 2) + real* data[row].x*pow(imag, 2) + s*data[row].y*pow(imag, 2) - data[row].y*pow(s, 3)) / denum; Ahat[col*NRow + row].y = 2 * (pow(real, 3)*data[row].y + pow(real, 2)*data[row].x*s + real* data[row].y*pow(s, 2) + real* data[row].y*pow(imag, 2) - s*data[row].x*pow(imag, 2) + data[row].x*pow(s, 3)) / denum; } else if (Apattern[col] == -3) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); Ahat[col*NRow + row].x = 2 * imag*(pow(real, 2)*data[row].x - data[row].x* pow(s, 2) + data[row].x*pow(imag, 2) - 2 * real*data[row].y*s) / denum; Ahat[col*NRow + row].y = 2 * imag*(2 * data[row].x*real*s + pow(real, 2)*data[row].y - data[row].y*pow(s, 2) + data[row].y*pow(imag, 2)) / denum; } } //__global__ void AMatrixFormulationGPU(double *freq, hipComplex *Ahat, hipComplex *data, hipComplex *Poles, dataInfo *frequencyInfo, int *Apattern, int Ahat_size, int NComplexPoles, int NRealPoles) { // for (int blocky = 0; blocky <= NColToTake; blocky++) { // for (int blockx = 0; blockx < NColToTake; blockx++) { // if (blocky == NColToTake) { // for (int col = 0; col < baseMatrix_NCol; col++) { // for (int row = 0; row < baseMatrix_NRow; row++) { // // realBase = baseMatrix[col*NRow + row].x; // imagBase = baseMatrix[col*NRow + row].y; // // realData = data[row + NFreq*blockx].x; // imagData = data[row + NFreq*blockx].y; // // int memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColToTake)+col*NFreq*NColToTake + blockx*NFreq + row; // // Ahat[memPosition].x = (realBase*realData - imagBase*imagData); // Ahat[memPosition].y = (realBase*imagData - imagBase*realData); // } // }//end for col // } // else if (blocky == blockx) { // for (int col = 0; col < baseMatrix_NCol; col++) { // for (int row = 0; row < baseMatrix_NRow; row++) { // int memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColToTake)+col*NFreq*NColToTake + blockx*NFreq + row; // Ahat[memPosition].x = baseMatrix[col*NRow + row].x; // Ahat[memPosition].y = baseMatrix[col*NRow + row].y; // }//end for row // }//end for col // } // else { // for (int col = 0; col < baseMatrix_NCol; col++) { // // for (int row = 0; row < baseMatrix_NCol; row++) { // // int memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColToTake)+col*NFreq*NColToTake + blockx*NFreq + row; // Ahat[memPosition].x = 0; // Ahat[memPosition].y = 0; // }//end for row // }//end for col // }//endif // } // } // //} // void readFile(string fileName, double *freq, hipComplex *data, dataInfo *dataInfo) { std::string::size_type sz; ifstream infile(fileName); //Ali: Attempt to open data file if (!infile) { std::cout << "While opening data file an error was encountered" << std::endl; } else { string line; int fileColumn, freqCount = 0; bool skipline; string word; int currentFileDataCol = 0, currentPole = 0, startofPoleCol = 0, endofPoleCol = 0, dataType = 0; //Ali: iterate through each line while (getline(infile, line)) { istringstream stringOfLine(line); fileColumn = 0; int dataColCount = 0; //Ali: Iterrate through each element of line which is refered to as a column while (stringOfLine) { //Ali: escape after last pole if (fileColumn == 2 * pow((*dataInfo).NPorts, 2) + 1) { break; } //load new element into a string variable called "word" stringOfLine >> word; skipline = false; //Ali: if comment ("%#") is detected then like is skipped if (word.compare("%#") == 0) { skipline = true; break; } //Ali: if it is the first column then data is stored as the frequency if (fileColumn == 0) { //Ali: translate string to int freq[freqCount] = stod(word, &sz); //Ali: if it is the forst freq reading then initialize max and min if (freqCount == 0) { (*dataInfo).freqHigh = freq[freqCount]; (*dataInfo).freqLow = freq[freqCount]; } else { //Ali: if current freq is greater than highest then update if ((*dataInfo).freqHigh < freq[freqCount]) { (*dataInfo).freqHigh = freq[freqCount]; } //Ali: if current freq is smaller than lowest then update if ((*dataInfo).freqLow > freq[freqCount]) { (*dataInfo).freqLow = freq[freqCount]; } } fileColumn++; } else { //Ali: stores the data coloumn number without freq coloumn currentFileDataCol = fileColumn - 1; //Ali: calculate the current port we are reading currentPole = (int)(currentFileDataCol / ((*dataInfo).NPorts * 2)); //Ali: calculate the start coloumn and end coloumn to start and stop storing data for port startofPoleCol = currentPole * 2 * (*dataInfo).NPorts; endofPoleCol = startofPoleCol + (currentPole + 1) * 2 - 1; //Ali: check if it should store the current coloumn if (currentFileDataCol >= startofPoleCol && currentFileDataCol <= endofPoleCol) { //Ali: The second column is the real part of the response if (dataType == 0) { data[freqCount + (*dataInfo).NFreq*(dataColCount)].x = stod(word, &sz); }//endif //Ali: The third column is the imag part of the response if (dataType == 1) { data[freqCount + (*dataInfo).NFreq*(dataColCount)].y = stod(word, &sz); dataColCount++; }//endif dataType ^= 1; } //endif fileColumn++; }//endif }//endwhile //Ali: If line is skipped then dont add to freq count if (!skipline) { freqCount++; } } //Ali: make sure the min is atleast 1e-6 if ((*dataInfo).freqLow < MINFREQ) { (*dataInfo).freqLow = MINFREQ; } printf("freqcount %d\n", (*dataInfo).NFreq); } }//enfunction int main() { debug = true; string dataFileName = "radial_stub^S.txt"; int NRealPoles = 1; int NComplexPoles = 2; int NPorts = 2; int NFreq = 1001; int NColOfData = 0; //###########################Reading File######################################## //Ali: Find out how many col we wil have to store based on number of ports for (int i = 1; i <= NPorts; i++) { NColOfData += i; } //Ali: var to store freq points in data (upto 1024 data points) double *freq; hipMallocManaged(&freq, NFreq * sizeof(double)); //Ali: store collected data in complex form (upto 1024 data points) hipComplex *data; hipMallocManaged(&data, NColOfData * NFreq * sizeof(hipComplex)); //Ali: store info about the stroed data // -lowest freq // -highest freq // -number of freq points dataInfo *dataInfo; hipMallocManaged(&dataInfo, sizeof(dataInfo)); (*dataInfo).NFreq = NFreq; (*dataInfo).NPorts = NPorts; //Ali: extract data form file readFile(dataFileName, freq, data, dataInfo); if (debug) { FILE * fp; fp = fopen("1_extractedData.txt", "w+"); fprintf(fp, "********************************************************\n"); fprintf(fp, "extracted data\n"); fprintf(fp, "********************************************************\n"); for (int i = 0; i < NColOfData; i++) { fprintf(fp, "\n********************************************************\n"); fprintf(fp, "col: %d \n", i); for (int z = 0; z < (*dataInfo).NFreq; z++) { fprintf(fp, "Z: %d FREQ: %f %f(%f) \n", z, freq[z], data[i*NFreq + z].x, data[i*NFreq + z].y); } } fclose(fp); printf("^^^^^^^^^^^^^^^^^^^\n"); printf("HiegestFREQ: %f GHz \n LowestFREQ: %f GHz \n FreqPoints %d\n", (*dataInfo).freqHigh, (*dataInfo).freqLow, (*dataInfo).NFreq); } //###########################Initial Pole Guess######################################## //This constant is predetermined in a paper int Real_part_Divisor = 100; int NumberOfPoles = NRealPoles + (NComplexPoles / 2); double *Poles_imag_part; hipMallocManaged(&Poles_imag_part, NumberOfPoles * sizeof(double)); double *Poles_real_part; hipMallocManaged(&Poles_real_part, NumberOfPoles * sizeof(double)); double *Real_Poles; hipMallocManaged(&Real_Poles, NRealPoles * sizeof(double)); hipComplex *Complex_Poles; hipMallocManaged(&Complex_Poles, NComplexPoles * sizeof(hipComplex)); hipComplex *Poles; hipMallocManaged(&Poles, (NRealPoles + NComplexPoles) * sizeof(hipComplex)); int B[2] = { 1, 1 }; int C[2] = { 1, -1 }; double poleSpacing = ((*dataInfo).freqHigh - (*dataInfo).freqLow) / (NumberOfPoles - 1); for (int z = 0; z < NumberOfPoles; z++) { Poles_imag_part[z] = (*dataInfo).freqLow + poleSpacing*z; Poles_real_part[z] = -Poles_imag_part[z] / Real_part_Divisor; } //Set Real Poles for (int z = 0; z < NRealPoles; z++) { Real_Poles[z] = 2 * M_PI*Poles_real_part[z]; } //Set Complex Poles int poleIndex = 0; for (int z = 0; z < NComplexPoles / 2; z++) { for (int i = 0; i < 2; i++) { Complex_Poles[poleIndex].x = 2 * M_PI*(Poles_real_part[NRealPoles + z] * B[i]); Complex_Poles[poleIndex].y = 2 * M_PI*(Poles_imag_part[NRealPoles + z] * C[i]); poleIndex++; } } //merge Poles into one matrix for (int z = 0; z < (NComplexPoles + NRealPoles); z++) { if (z < NRealPoles) { Poles[z].x = Real_Poles[z]; } else { Poles[z].x = Complex_Poles[z - NRealPoles].x; Poles[z].y = Complex_Poles[z - NRealPoles].y; } } printf("\n\n********************************************************\n"); printf("Initial Poles\n"); printf("********************************************************\n"); for (int z = 0; z < NRealPoles; z++) { printf("Real Pole[%d]: %f(%f) \n", z, Real_Poles[z]); } for (int z = 0; z < NComplexPoles; z++) { printf("Complex Pole[%d]: %f(%f) \n", z, Complex_Poles[z].x, Complex_Poles[z].y); } printf("^^^^^^^^^^^^^\n"); for (int z = 0; z < NComplexPoles + NRealPoles; z++) { printf("merged Pole[%d]: %f(%f) \n", z, Poles[z].x, Poles[z].y); } printf("^^^^^^^^^^^^^\n"); printf("NumberOfPoles: %d \n poleSpacing: %f \n", NumberOfPoles, poleSpacing); //########################### Base Matrix Setup ######################################## // NCol is equal to the number of real poles + number of imaginary poles + 1 (d col) int baseMatrix_NCol = NComplexPoles + NRealPoles + 1; int baseMatrix_NRow = (*dataInfo).NFreq; int NRow = (*dataInfo).NFreq; hipComplex *baseMatrix; hipMallocManaged(&baseMatrix, baseMatrix_NRow * baseMatrix_NCol * sizeof(double)); int CPUenable = 1; if (CPUenable == 1 || CPUenable == 2) { clock_t tStart = clock(); /* Do your stuff here */ double real = 0, imag = 0, denum = 0, s=0; int poleNumb = 0, isReal = 1; //for loop to generate base matrix for (int col = 0; col < baseMatrix_NCol; col++) { if (poleNumb < NRealPoles + NComplexPoles) { real = Poles[poleNumb].x; imag = Poles[poleNumb].y; } for (int row = 0; row < baseMatrix_NRow; row++) { s = 2 * M_PI*freq[row]; //real pole if (col < NRealPoles) { baseMatrix[col*NRow + row].x = -real / (pow(real, 2) + pow(s, 2)); baseMatrix[col*NRow + row].y = -s / (pow(real, 2) + pow(s, 2)); } //imag pole real part else if (col < NRealPoles + NComplexPoles && isReal) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); baseMatrix[col*NRow + row].x = -2 * (real*(pow(real, 2) + pow(s, 2) + pow(imag, 2))) / denum; baseMatrix[col*NRow + row].y = -2 * (s *(pow(real, 2) + pow(s, 2) - pow(imag, 2))) / denum; } //imag pole imag part else if (col < NRealPoles + NComplexPoles && !isReal) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); baseMatrix[col*NRow + row].x = (-2 * imag*(pow(real, 2) - pow(s, 2) + pow(imag, 2))) / denum; baseMatrix[col*NRow + row].y = (-4 * real*imag*s) / denum; } //d col else if (col == NRealPoles + NComplexPoles) { baseMatrix[col*NRow + row].x = 1; baseMatrix[col*NRow + row].y = 0; }//endif };//end row for loop poleNumb++; if(col>= NRealPoles && col < NRealPoles + NComplexPoles) isReal ^= 1; }; //end col for loop //Write base b matrix to file FILE * fp; fp = fopen("2_Basematrix.txt", "w+"); for (int row = 0; row < baseMatrix_NRow; row++) { for (int col = 0; col < baseMatrix_NCol; col++) { fprintf(fp, " %.4e(%.4e)", baseMatrix[col*NRow + row].x, baseMatrix[col*NRow + row].y); }; fprintf(fp, "\n"); }; fclose(fp); //###########################Ahat set up######################################## //Ali: generate the AhatMatrix double realBase, imagBase, realData, imagData; int dim = NColOfData; int memPosition = 0; poleNumb = 0; int NumbAYBlocks= NColOfData+1; int NumbAXBlocks = NColOfData; int NElementsAMatrix = NumbAXBlocks*baseMatrix_NRow*baseMatrix_NCol*NumbAYBlocks - NumbAXBlocks*baseMatrix_NRow; hipComplex *Ahat; hipMallocManaged(&Ahat, NElementsAMatrix * sizeof(double)); //cycle trough col blocks. each block is the dimentions of the the base matrix //exept blocks in the last block column b/c thye dont include the constants for d for (int blocky = 0; blocky <= NColOfData; blocky++) { for (int blockx = 0; blockx < NColOfData; blockx++) { //check if it is the last blocky coloumn to multiply with data if (blocky == NColOfData) { for (int col = 0; col < baseMatrix_NCol-1; col++) { for (int row = 0; row < baseMatrix_NRow; row++) { //extract component if each base matrix element realBase = baseMatrix[col*NRow + row].x; imagBase = baseMatrix[col*NRow + row].y; //extract components of each data element realData = data[row + NFreq*blockx].x; imagData = data[row + NFreq*blockx].y; //calc posiiton in A matrix (memory offset) memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColOfData)+col*NFreq*NColOfData + blockx*NFreq + row; //assign Ahat[memPosition].x = (realBase*realData - imagBase*imagData); Ahat[memPosition].y = (realBase*imagData - imagBase*realData); }//end for row }//end for col } //Check if it is diaginal block elemnt that is just a reprint of base matrix else if (blocky == blockx) { for (int col = 0; col < baseMatrix_NCol; col++) { for (int row = 0; row < baseMatrix_NRow; row++) { //calc posiiton in A matrix (memory offset) memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColOfData)+col*NFreq*NColOfData + blockx*NFreq + row; //assign Ahat[memPosition].x = baseMatrix[col*NRow + row].x; Ahat[memPosition].y = baseMatrix[col*NRow + row].y; }//end for row }//end for col } //Otherwise set to zero else { for (int col = 0; col < baseMatrix_NCol; col++) { for (int row = 0; row < baseMatrix_NCol; row++) { //calc posiiton in A matrix (memory offset) memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColOfData)+col*NFreq*NColOfData + blockx*NFreq + row; //assign Ahat[memPosition].x = 0; Ahat[memPosition].y = 0; } } }//endif }//endblockX }//endblocky clock_t tStop = clock(); printf("CPU Time taken: %.6fs\n", (double)(tStop - tStart) / CLOCKS_PER_SEC); //clock_t start = clock(); //VectorAdd <<<Ahat_size,(*dataInfo).NFreq >>> (freq, Ahat, data, Poles, dataInfo, Apattern, Ahat_size, NComplexPoles, NRealPoles); //hipDeviceSynchronize(); //clock_t stop = clock(); //printf("GPU Time taken: %.6fs\n", (double)(stop - start) / CLOCKS_PER_SEC); //Ali: Write A matrix to file fp = fopen("3_Amatrix.txt", "w+"); for (int row = 0; row < NColOfData*NFreq; row++) { for (int col = 0; col < baseMatrix_NCol*(NColOfData+1)-1; col++) { fprintf(fp, " %.4e(%.4e)", Ahat[col*NRow*NColOfData + row].x, Ahat[col*NRow*NColOfData + row].y); } fprintf(fp, "\n"); } fclose(fp); } hipFree(freq); hipFree(data); hipFree(dataInfo); hipFree(Poles_imag_part); hipFree(Poles_real_part); hipFree(Real_Poles); hipFree(Complex_Poles); return 0; }
517d660374b9a006fe20e95a4b922c52742451bc.cu
#include <stdio.h> #include <iostream> #include <string> #include <fstream> #include <sstream> #include <cuComplex.h> #include "kernel.h" #define _USE_MATH_DEFINES #include <math.h> #include <time.h> #include "device_launch_parameters.h" const int SIZE = 1024; const double MINFREQ = 1e-6; using namespace std; bool debug; struct dataInfo { double freqHigh; double freqLow; int NFreq; int NPorts; }; __global__ void VectorAdd(double *freq, cuComplex *Ahat, cuComplex *data, cuComplex *Poles, dataInfo *frequencyInfo, int *Apattern, int Ahat_size, int NComplexPoles, int NRealPoles) { double real=0; double imag=0; double denum=0; int poleNumb = 0; int NRow = (*frequencyInfo).NFreq; double s; int test = 0; int col = blockIdx.x; int row = threadIdx.x; if (col > 17) { poleNumb = col - 17; } else { poleNumb = col; } real = Poles[poleNumb].x; imag = Poles[poleNumb].y; s = 2 * M_PI*freq[row]; //real if (Apattern[col] == 1) { Ahat[col*NRow + row].x = -real / (pow(real, 2) + pow(s, 2)); Ahat[col*NRow + row].y = -s / (pow(real, 2) + pow(s, 2)); } else if (Apattern[col] == 2) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); Ahat[col*NRow + row].x = -2 * (real*(pow(real, 2) + pow(s, 2) + pow(imag, 2))) / denum; Ahat[col*NRow + row].y = -2 * (s *(pow(real, 2) + pow(s, 2) - pow(imag, 2))) / denum; } else if (Apattern[col] == 3) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); Ahat[(col)*NRow + row].x = (-2 * imag*(pow(real, 2) - pow(s, 2) + pow(imag, 2))) / denum; Ahat[(col)*NRow + row].y = (-4 * real*imag*s) / denum; } else if (Apattern[col] == 4) { Ahat[col*NRow + row].x = 1; Ahat[col*NRow + row].y = 0; } else if (Apattern[col] == -1) { denum = pow(real, 2) + pow(imag, 2) - 2 * imag*s + pow(s, 2); Ahat[col*NRow + row].x = (real*data[row].x - data[row].y*s + data[row].y*imag) / denum; Ahat[col*NRow + row].y = (s*data[row].x - data[row].x*imag + data[row].y*real) / denum; } else if (Apattern[col] == -2) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); Ahat[col*NRow + row].x = 2 * (pow(real, 3)*data[row].x - pow(real, 2)*data[row].y*s + real* data[row].x*pow(s, 2) + real* data[row].x*pow(imag, 2) + s*data[row].y*pow(imag, 2) - data[row].y*pow(s, 3)) / denum; Ahat[col*NRow + row].y = 2 * (pow(real, 3)*data[row].y + pow(real, 2)*data[row].x*s + real* data[row].y*pow(s, 2) + real* data[row].y*pow(imag, 2) - s*data[row].x*pow(imag, 2) + data[row].x*pow(s, 3)) / denum; } else if (Apattern[col] == -3) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); Ahat[col*NRow + row].x = 2 * imag*(pow(real, 2)*data[row].x - data[row].x* pow(s, 2) + data[row].x*pow(imag, 2) - 2 * real*data[row].y*s) / denum; Ahat[col*NRow + row].y = 2 * imag*(2 * data[row].x*real*s + pow(real, 2)*data[row].y - data[row].y*pow(s, 2) + data[row].y*pow(imag, 2)) / denum; } } //__global__ void AMatrixFormulationGPU(double *freq, cuComplex *Ahat, cuComplex *data, cuComplex *Poles, dataInfo *frequencyInfo, int *Apattern, int Ahat_size, int NComplexPoles, int NRealPoles) { // for (int blocky = 0; blocky <= NColToTake; blocky++) { // for (int blockx = 0; blockx < NColToTake; blockx++) { // if (blocky == NColToTake) { // for (int col = 0; col < baseMatrix_NCol; col++) { // for (int row = 0; row < baseMatrix_NRow; row++) { // // realBase = baseMatrix[col*NRow + row].x; // imagBase = baseMatrix[col*NRow + row].y; // // realData = data[row + NFreq*blockx].x; // imagData = data[row + NFreq*blockx].y; // // int memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColToTake)+col*NFreq*NColToTake + blockx*NFreq + row; // // Ahat[memPosition].x = (realBase*realData - imagBase*imagData); // Ahat[memPosition].y = (realBase*imagData - imagBase*realData); // } // }//end for col // } // else if (blocky == blockx) { // for (int col = 0; col < baseMatrix_NCol; col++) { // for (int row = 0; row < baseMatrix_NRow; row++) { // int memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColToTake)+col*NFreq*NColToTake + blockx*NFreq + row; // Ahat[memPosition].x = baseMatrix[col*NRow + row].x; // Ahat[memPosition].y = baseMatrix[col*NRow + row].y; // }//end for row // }//end for col // } // else { // for (int col = 0; col < baseMatrix_NCol; col++) { // // for (int row = 0; row < baseMatrix_NCol; row++) { // // int memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColToTake)+col*NFreq*NColToTake + blockx*NFreq + row; // Ahat[memPosition].x = 0; // Ahat[memPosition].y = 0; // }//end for row // }//end for col // }//endif // } // } // //} // void readFile(string fileName, double *freq, cuComplex *data, dataInfo *dataInfo) { std::string::size_type sz; ifstream infile(fileName); //Ali: Attempt to open data file if (!infile) { std::cout << "While opening data file an error was encountered" << std::endl; } else { string line; int fileColumn, freqCount = 0; bool skipline; string word; int currentFileDataCol = 0, currentPole = 0, startofPoleCol = 0, endofPoleCol = 0, dataType = 0; //Ali: iterate through each line while (getline(infile, line)) { istringstream stringOfLine(line); fileColumn = 0; int dataColCount = 0; //Ali: Iterrate through each element of line which is refered to as a column while (stringOfLine) { //Ali: escape after last pole if (fileColumn == 2 * pow((*dataInfo).NPorts, 2) + 1) { break; } //load new element into a string variable called "word" stringOfLine >> word; skipline = false; //Ali: if comment ("%#") is detected then like is skipped if (word.compare("%#") == 0) { skipline = true; break; } //Ali: if it is the first column then data is stored as the frequency if (fileColumn == 0) { //Ali: translate string to int freq[freqCount] = stod(word, &sz); //Ali: if it is the forst freq reading then initialize max and min if (freqCount == 0) { (*dataInfo).freqHigh = freq[freqCount]; (*dataInfo).freqLow = freq[freqCount]; } else { //Ali: if current freq is greater than highest then update if ((*dataInfo).freqHigh < freq[freqCount]) { (*dataInfo).freqHigh = freq[freqCount]; } //Ali: if current freq is smaller than lowest then update if ((*dataInfo).freqLow > freq[freqCount]) { (*dataInfo).freqLow = freq[freqCount]; } } fileColumn++; } else { //Ali: stores the data coloumn number without freq coloumn currentFileDataCol = fileColumn - 1; //Ali: calculate the current port we are reading currentPole = (int)(currentFileDataCol / ((*dataInfo).NPorts * 2)); //Ali: calculate the start coloumn and end coloumn to start and stop storing data for port startofPoleCol = currentPole * 2 * (*dataInfo).NPorts; endofPoleCol = startofPoleCol + (currentPole + 1) * 2 - 1; //Ali: check if it should store the current coloumn if (currentFileDataCol >= startofPoleCol && currentFileDataCol <= endofPoleCol) { //Ali: The second column is the real part of the response if (dataType == 0) { data[freqCount + (*dataInfo).NFreq*(dataColCount)].x = stod(word, &sz); }//endif //Ali: The third column is the imag part of the response if (dataType == 1) { data[freqCount + (*dataInfo).NFreq*(dataColCount)].y = stod(word, &sz); dataColCount++; }//endif dataType ^= 1; } //endif fileColumn++; }//endif }//endwhile //Ali: If line is skipped then dont add to freq count if (!skipline) { freqCount++; } } //Ali: make sure the min is atleast 1e-6 if ((*dataInfo).freqLow < MINFREQ) { (*dataInfo).freqLow = MINFREQ; } printf("freqcount %d\n", (*dataInfo).NFreq); } }//enfunction int main() { debug = true; string dataFileName = "radial_stub^S.txt"; int NRealPoles = 1; int NComplexPoles = 2; int NPorts = 2; int NFreq = 1001; int NColOfData = 0; //###########################Reading File######################################## //Ali: Find out how many col we wil have to store based on number of ports for (int i = 1; i <= NPorts; i++) { NColOfData += i; } //Ali: var to store freq points in data (upto 1024 data points) double *freq; cudaMallocManaged(&freq, NFreq * sizeof(double)); //Ali: store collected data in complex form (upto 1024 data points) cuComplex *data; cudaMallocManaged(&data, NColOfData * NFreq * sizeof(cuComplex)); //Ali: store info about the stroed data // -lowest freq // -highest freq // -number of freq points dataInfo *dataInfo; cudaMallocManaged(&dataInfo, sizeof(dataInfo)); (*dataInfo).NFreq = NFreq; (*dataInfo).NPorts = NPorts; //Ali: extract data form file readFile(dataFileName, freq, data, dataInfo); if (debug) { FILE * fp; fp = fopen("1_extractedData.txt", "w+"); fprintf(fp, "********************************************************\n"); fprintf(fp, "extracted data\n"); fprintf(fp, "********************************************************\n"); for (int i = 0; i < NColOfData; i++) { fprintf(fp, "\n********************************************************\n"); fprintf(fp, "col: %d \n", i); for (int z = 0; z < (*dataInfo).NFreq; z++) { fprintf(fp, "Z: %d FREQ: %f %f(%f) \n", z, freq[z], data[i*NFreq + z].x, data[i*NFreq + z].y); } } fclose(fp); printf("^^^^^^^^^^^^^^^^^^^\n"); printf("HiegestFREQ: %f GHz \n LowestFREQ: %f GHz \n FreqPoints %d\n", (*dataInfo).freqHigh, (*dataInfo).freqLow, (*dataInfo).NFreq); } //###########################Initial Pole Guess######################################## //This constant is predetermined in a paper int Real_part_Divisor = 100; int NumberOfPoles = NRealPoles + (NComplexPoles / 2); double *Poles_imag_part; cudaMallocManaged(&Poles_imag_part, NumberOfPoles * sizeof(double)); double *Poles_real_part; cudaMallocManaged(&Poles_real_part, NumberOfPoles * sizeof(double)); double *Real_Poles; cudaMallocManaged(&Real_Poles, NRealPoles * sizeof(double)); cuComplex *Complex_Poles; cudaMallocManaged(&Complex_Poles, NComplexPoles * sizeof(cuComplex)); cuComplex *Poles; cudaMallocManaged(&Poles, (NRealPoles + NComplexPoles) * sizeof(cuComplex)); int B[2] = { 1, 1 }; int C[2] = { 1, -1 }; double poleSpacing = ((*dataInfo).freqHigh - (*dataInfo).freqLow) / (NumberOfPoles - 1); for (int z = 0; z < NumberOfPoles; z++) { Poles_imag_part[z] = (*dataInfo).freqLow + poleSpacing*z; Poles_real_part[z] = -Poles_imag_part[z] / Real_part_Divisor; } //Set Real Poles for (int z = 0; z < NRealPoles; z++) { Real_Poles[z] = 2 * M_PI*Poles_real_part[z]; } //Set Complex Poles int poleIndex = 0; for (int z = 0; z < NComplexPoles / 2; z++) { for (int i = 0; i < 2; i++) { Complex_Poles[poleIndex].x = 2 * M_PI*(Poles_real_part[NRealPoles + z] * B[i]); Complex_Poles[poleIndex].y = 2 * M_PI*(Poles_imag_part[NRealPoles + z] * C[i]); poleIndex++; } } //merge Poles into one matrix for (int z = 0; z < (NComplexPoles + NRealPoles); z++) { if (z < NRealPoles) { Poles[z].x = Real_Poles[z]; } else { Poles[z].x = Complex_Poles[z - NRealPoles].x; Poles[z].y = Complex_Poles[z - NRealPoles].y; } } printf("\n\n********************************************************\n"); printf("Initial Poles\n"); printf("********************************************************\n"); for (int z = 0; z < NRealPoles; z++) { printf("Real Pole[%d]: %f(%f) \n", z, Real_Poles[z]); } for (int z = 0; z < NComplexPoles; z++) { printf("Complex Pole[%d]: %f(%f) \n", z, Complex_Poles[z].x, Complex_Poles[z].y); } printf("^^^^^^^^^^^^^\n"); for (int z = 0; z < NComplexPoles + NRealPoles; z++) { printf("merged Pole[%d]: %f(%f) \n", z, Poles[z].x, Poles[z].y); } printf("^^^^^^^^^^^^^\n"); printf("NumberOfPoles: %d \n poleSpacing: %f \n", NumberOfPoles, poleSpacing); //########################### Base Matrix Setup ######################################## // NCol is equal to the number of real poles + number of imaginary poles + 1 (d col) int baseMatrix_NCol = NComplexPoles + NRealPoles + 1; int baseMatrix_NRow = (*dataInfo).NFreq; int NRow = (*dataInfo).NFreq; cuComplex *baseMatrix; cudaMallocManaged(&baseMatrix, baseMatrix_NRow * baseMatrix_NCol * sizeof(double)); int CPUenable = 1; if (CPUenable == 1 || CPUenable == 2) { clock_t tStart = clock(); /* Do your stuff here */ double real = 0, imag = 0, denum = 0, s=0; int poleNumb = 0, isReal = 1; //for loop to generate base matrix for (int col = 0; col < baseMatrix_NCol; col++) { if (poleNumb < NRealPoles + NComplexPoles) { real = Poles[poleNumb].x; imag = Poles[poleNumb].y; } for (int row = 0; row < baseMatrix_NRow; row++) { s = 2 * M_PI*freq[row]; //real pole if (col < NRealPoles) { baseMatrix[col*NRow + row].x = -real / (pow(real, 2) + pow(s, 2)); baseMatrix[col*NRow + row].y = -s / (pow(real, 2) + pow(s, 2)); } //imag pole real part else if (col < NRealPoles + NComplexPoles && isReal) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); baseMatrix[col*NRow + row].x = -2 * (real*(pow(real, 2) + pow(s, 2) + pow(imag, 2))) / denum; baseMatrix[col*NRow + row].y = -2 * (s *(pow(real, 2) + pow(s, 2) - pow(imag, 2))) / denum; } //imag pole imag part else if (col < NRealPoles + NComplexPoles && !isReal) { denum = (pow(real, 2)*(pow(real, 2) + 2 * pow(s, 2) + 2 * pow(imag, 2)) + pow(imag, 4) - 2 * pow(imag, 2)*pow(s, 2) + pow(s, 4)); baseMatrix[col*NRow + row].x = (-2 * imag*(pow(real, 2) - pow(s, 2) + pow(imag, 2))) / denum; baseMatrix[col*NRow + row].y = (-4 * real*imag*s) / denum; } //d col else if (col == NRealPoles + NComplexPoles) { baseMatrix[col*NRow + row].x = 1; baseMatrix[col*NRow + row].y = 0; }//endif };//end row for loop poleNumb++; if(col>= NRealPoles && col < NRealPoles + NComplexPoles) isReal ^= 1; }; //end col for loop //Write base b matrix to file FILE * fp; fp = fopen("2_Basematrix.txt", "w+"); for (int row = 0; row < baseMatrix_NRow; row++) { for (int col = 0; col < baseMatrix_NCol; col++) { fprintf(fp, " %.4e(%.4e)", baseMatrix[col*NRow + row].x, baseMatrix[col*NRow + row].y); }; fprintf(fp, "\n"); }; fclose(fp); //###########################Ahat set up######################################## //Ali: generate the AhatMatrix double realBase, imagBase, realData, imagData; int dim = NColOfData; int memPosition = 0; poleNumb = 0; int NumbAYBlocks= NColOfData+1; int NumbAXBlocks = NColOfData; int NElementsAMatrix = NumbAXBlocks*baseMatrix_NRow*baseMatrix_NCol*NumbAYBlocks - NumbAXBlocks*baseMatrix_NRow; cuComplex *Ahat; cudaMallocManaged(&Ahat, NElementsAMatrix * sizeof(double)); //cycle trough col blocks. each block is the dimentions of the the base matrix //exept blocks in the last block column b/c thye dont include the constants for d for (int blocky = 0; blocky <= NColOfData; blocky++) { for (int blockx = 0; blockx < NColOfData; blockx++) { //check if it is the last blocky coloumn to multiply with data if (blocky == NColOfData) { for (int col = 0; col < baseMatrix_NCol-1; col++) { for (int row = 0; row < baseMatrix_NRow; row++) { //extract component if each base matrix element realBase = baseMatrix[col*NRow + row].x; imagBase = baseMatrix[col*NRow + row].y; //extract components of each data element realData = data[row + NFreq*blockx].x; imagData = data[row + NFreq*blockx].y; //calc posiiton in A matrix (memory offset) memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColOfData)+col*NFreq*NColOfData + blockx*NFreq + row; //assign Ahat[memPosition].x = (realBase*realData - imagBase*imagData); Ahat[memPosition].y = (realBase*imagData - imagBase*realData); }//end for row }//end for col } //Check if it is diaginal block elemnt that is just a reprint of base matrix else if (blocky == blockx) { for (int col = 0; col < baseMatrix_NCol; col++) { for (int row = 0; row < baseMatrix_NRow; row++) { //calc posiiton in A matrix (memory offset) memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColOfData)+col*NFreq*NColOfData + blockx*NFreq + row; //assign Ahat[memPosition].x = baseMatrix[col*NRow + row].x; Ahat[memPosition].y = baseMatrix[col*NRow + row].y; }//end for row }//end for col } //Otherwise set to zero else { for (int col = 0; col < baseMatrix_NCol; col++) { for (int row = 0; row < baseMatrix_NCol; row++) { //calc posiiton in A matrix (memory offset) memPosition = blocky*(baseMatrix_NCol)*(NFreq)*(NColOfData)+col*NFreq*NColOfData + blockx*NFreq + row; //assign Ahat[memPosition].x = 0; Ahat[memPosition].y = 0; } } }//endif }//endblockX }//endblocky clock_t tStop = clock(); printf("CPU Time taken: %.6fs\n", (double)(tStop - tStart) / CLOCKS_PER_SEC); //clock_t start = clock(); //VectorAdd <<<Ahat_size,(*dataInfo).NFreq >>> (freq, Ahat, data, Poles, dataInfo, Apattern, Ahat_size, NComplexPoles, NRealPoles); //cudaDeviceSynchronize(); //clock_t stop = clock(); //printf("GPU Time taken: %.6fs\n", (double)(stop - start) / CLOCKS_PER_SEC); //Ali: Write A matrix to file fp = fopen("3_Amatrix.txt", "w+"); for (int row = 0; row < NColOfData*NFreq; row++) { for (int col = 0; col < baseMatrix_NCol*(NColOfData+1)-1; col++) { fprintf(fp, " %.4e(%.4e)", Ahat[col*NRow*NColOfData + row].x, Ahat[col*NRow*NColOfData + row].y); } fprintf(fp, "\n"); } fclose(fp); } cudaFree(freq); cudaFree(data); cudaFree(dataInfo); cudaFree(Poles_imag_part); cudaFree(Poles_real_part); cudaFree(Real_Poles); cudaFree(Complex_Poles); return 0; }
22384fe98613597e1851cda432678a562fbc079f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. // SPDX-License-Identifier: Apache-2.0 // DeepSpeed Team #include <cassert> #include "custom_cuda_layers.h" #include "memory_access_utils.h" namespace cg = cooperative_groups; namespace td_sort { constexpr int threads = 512; constexpr int granularity = 16; constexpr int mem_vals = granularity / sizeof(int32_t); constexpr int max_buffer_size = (threads + 1) * mem_vals; #ifdef __HIP_PLATFORM_HCC__ constexpr int warp_size = 64; #else constexpr int warp_size = 32; #endif constexpr int max_warps = threads / warp_size; } // namespace td_sort template <int VALS_PER_THREAD> __global__ void scan_sort(int32_t* data, int reserved_tokens, int original_tokens) { cg::thread_block tb = cg::this_thread_block(); cg::thread_block_tile<td_sort::warp_size> warp = cg::tiled_partition<td_sort::warp_size>(tb); __shared__ int32_t indices_buffer[td_sort::max_buffer_size]; __shared__ int32_t intermediate_buffer[td_sort::max_warps]; __shared__ int32_t sorted_indices_buffer[td_sort::max_buffer_size]; for (int i = tb.thread_index().x * td_sort::mem_vals; i < original_tokens + 1; i += tb.group_dim().x * td_sort::mem_vals) { uint32_t zeros[td_sort::mem_vals] = {0, 0, 0, 0}; mem_access::store_shared<td_sort::granularity>(indices_buffer + i, zeros); } int32_t local_vals[VALS_PER_THREAD]; // We flatten layers/batch into a single indexing dimension int32_t* data_block = data + tb.group_index().x * reserved_tokens; // The next two loops really could be fused for a more logical code layout, but don't want to // move the barrier forward #pragma unroll for (int i = 0; i < VALS_PER_THREAD; i++) { const int iter_idx = i * td_sort::threads + tb.thread_index().x; if (iter_idx < reserved_tokens) { mem_access::load_global<sizeof(int32_t)>(local_vals + i, data_block + iter_idx); } else { local_vals[i] = 0; } } tb.sync(); #pragma unroll for (int i = 0; i < VALS_PER_THREAD; i++) { const int iter_idx = i * td_sort::threads + tb.thread_index().x; if (iter_idx < reserved_tokens) { const int32_t one = 1; mem_access::store_shared<sizeof(int32_t)>(indices_buffer + local_vals[i], &one); } } tb.sync(); int32_t local_input[td_sort::mem_vals]; mem_access::load_shared<td_sort::granularity>( local_input, indices_buffer + tb.thread_index().x * td_sort::mem_vals); int32_t reduce_vals[td_sort::mem_vals]; reduce_vals[0] = local_input[0]; #pragma unroll for (int i = 1; i < td_sort::mem_vals; i++) { reduce_vals[i] = local_input[i] + reduce_vals[i - 1]; } int32_t step_1_val = reduce_vals[td_sort::mem_vals - 1]; // Short span exclusive scan algorithm (less work efficient) #pragma unroll for (int i = 1; i < td_sort::warp_size; i *= 2) { int32_t step_val = warp.shfl_up(step_1_val, i); step_1_val = (warp.thread_rank() < i) ? step_1_val : step_1_val + step_val; } if (warp.thread_rank() == td_sort::warp_size - 1) { mem_access::store_shared<sizeof(int32_t)>(intermediate_buffer + warp.meta_group_rank(), &step_1_val); } tb.sync(); if (warp.meta_group_rank() == 0) { int32_t step_2_val = 0; if (warp.thread_rank() < td_sort::max_warps) { mem_access::load_shared<sizeof(int32_t)>(&step_2_val, intermediate_buffer + warp.thread_rank()); } #pragma unroll for (int i = 1; i < td_sort::warp_size; i *= 2) { int32_t step_val = warp.shfl_up(step_2_val, i); step_2_val = (warp.thread_rank() < i) ? step_2_val : step_2_val + step_val; } if (warp.thread_rank() < td_sort::max_warps) { mem_access::store_shared<sizeof(int32_t)>(intermediate_buffer + warp.thread_rank(), &step_2_val); } } tb.sync(); int step_2_val = 0; if (warp.meta_group_rank() > 0) { mem_access::load_shared<sizeof(int32_t)>(&step_2_val, intermediate_buffer + warp.meta_group_rank() - 1); } const int thread_offset = reduce_vals[td_sort::mem_vals - 1]; #pragma unroll for (int i = 0; i < td_sort::mem_vals; i++) { reduce_vals[i] += step_1_val + step_2_val - thread_offset; } mem_access::store_shared<td_sort::granularity>( indices_buffer + tb.thread_index().x * td_sort::mem_vals, reduce_vals); if (tb.thread_index().x == 0) { indices_buffer[original_tokens] = original_tokens - indices_buffer[original_tokens]; } tb.sync(); for (int i = 0; i < VALS_PER_THREAD; i++) { const int iter_idx = i * td_sort::threads + tb.thread_index().x; if (iter_idx < reserved_tokens) { if (local_vals[i] == 0) { int zero = 0; mem_access::store_shared<sizeof(int32_t)>(sorted_indices_buffer, &zero); } else { int sorted_idx; mem_access::load_shared<sizeof(int32_t)>(&sorted_idx, indices_buffer + local_vals[i] - 1); mem_access::store_shared<sizeof(int32_t)>(sorted_indices_buffer + sorted_idx, local_vals + i); } } } tb.sync(); #pragma unroll for (int i = 0; i < VALS_PER_THREAD; i++) { const int iter_idx = i * td_sort::threads + tb.thread_index().x; if (iter_idx < reserved_tokens) { int32_t store_val; mem_access::load_shared<sizeof(int32_t)>(&store_val, sorted_indices_buffer + iter_idx); mem_access::store_global<sizeof(int32_t)>(data_block + iter_idx, &store_val); } } } void launch_token_sort(int32_t* indices, int layers, int batch_size, int reserved_size, int original_tokens, hipStream_t stream) { // Each sort is completely independent, can flatten this dimension dim3 grid(layers * batch_size); dim3 block(td_sort::threads); const int vals_per_thread = (reserved_size + td_sort::threads - 1) / td_sort::threads; if (vals_per_thread == 1) { hipLaunchKernelGGL(( scan_sort<1>), dim3(grid), dim3(block), 0, stream, indices, reserved_size, original_tokens); } else if (vals_per_thread == 2) { hipLaunchKernelGGL(( scan_sort<2>), dim3(grid), dim3(block), 0, stream, indices, reserved_size, original_tokens); } else if (vals_per_thread == 3) { hipLaunchKernelGGL(( scan_sort<3>), dim3(grid), dim3(block), 0, stream, indices, reserved_size, original_tokens); } else if (vals_per_thread == 4) { hipLaunchKernelGGL(( scan_sort<4>), dim3(grid), dim3(block), 0, stream, indices, reserved_size, original_tokens); } else { assert(false); } }
22384fe98613597e1851cda432678a562fbc079f.cu
// Copyright (c) Microsoft Corporation. // SPDX-License-Identifier: Apache-2.0 // DeepSpeed Team #include <cassert> #include "custom_cuda_layers.h" #include "memory_access_utils.h" namespace cg = cooperative_groups; namespace td_sort { constexpr int threads = 512; constexpr int granularity = 16; constexpr int mem_vals = granularity / sizeof(int32_t); constexpr int max_buffer_size = (threads + 1) * mem_vals; #ifdef __HIP_PLATFORM_HCC__ constexpr int warp_size = 64; #else constexpr int warp_size = 32; #endif constexpr int max_warps = threads / warp_size; } // namespace td_sort template <int VALS_PER_THREAD> __global__ void scan_sort(int32_t* data, int reserved_tokens, int original_tokens) { cg::thread_block tb = cg::this_thread_block(); cg::thread_block_tile<td_sort::warp_size> warp = cg::tiled_partition<td_sort::warp_size>(tb); __shared__ int32_t indices_buffer[td_sort::max_buffer_size]; __shared__ int32_t intermediate_buffer[td_sort::max_warps]; __shared__ int32_t sorted_indices_buffer[td_sort::max_buffer_size]; for (int i = tb.thread_index().x * td_sort::mem_vals; i < original_tokens + 1; i += tb.group_dim().x * td_sort::mem_vals) { uint32_t zeros[td_sort::mem_vals] = {0, 0, 0, 0}; mem_access::store_shared<td_sort::granularity>(indices_buffer + i, zeros); } int32_t local_vals[VALS_PER_THREAD]; // We flatten layers/batch into a single indexing dimension int32_t* data_block = data + tb.group_index().x * reserved_tokens; // The next two loops really could be fused for a more logical code layout, but don't want to // move the barrier forward #pragma unroll for (int i = 0; i < VALS_PER_THREAD; i++) { const int iter_idx = i * td_sort::threads + tb.thread_index().x; if (iter_idx < reserved_tokens) { mem_access::load_global<sizeof(int32_t)>(local_vals + i, data_block + iter_idx); } else { local_vals[i] = 0; } } tb.sync(); #pragma unroll for (int i = 0; i < VALS_PER_THREAD; i++) { const int iter_idx = i * td_sort::threads + tb.thread_index().x; if (iter_idx < reserved_tokens) { const int32_t one = 1; mem_access::store_shared<sizeof(int32_t)>(indices_buffer + local_vals[i], &one); } } tb.sync(); int32_t local_input[td_sort::mem_vals]; mem_access::load_shared<td_sort::granularity>( local_input, indices_buffer + tb.thread_index().x * td_sort::mem_vals); int32_t reduce_vals[td_sort::mem_vals]; reduce_vals[0] = local_input[0]; #pragma unroll for (int i = 1; i < td_sort::mem_vals; i++) { reduce_vals[i] = local_input[i] + reduce_vals[i - 1]; } int32_t step_1_val = reduce_vals[td_sort::mem_vals - 1]; // Short span exclusive scan algorithm (less work efficient) #pragma unroll for (int i = 1; i < td_sort::warp_size; i *= 2) { int32_t step_val = warp.shfl_up(step_1_val, i); step_1_val = (warp.thread_rank() < i) ? step_1_val : step_1_val + step_val; } if (warp.thread_rank() == td_sort::warp_size - 1) { mem_access::store_shared<sizeof(int32_t)>(intermediate_buffer + warp.meta_group_rank(), &step_1_val); } tb.sync(); if (warp.meta_group_rank() == 0) { int32_t step_2_val = 0; if (warp.thread_rank() < td_sort::max_warps) { mem_access::load_shared<sizeof(int32_t)>(&step_2_val, intermediate_buffer + warp.thread_rank()); } #pragma unroll for (int i = 1; i < td_sort::warp_size; i *= 2) { int32_t step_val = warp.shfl_up(step_2_val, i); step_2_val = (warp.thread_rank() < i) ? step_2_val : step_2_val + step_val; } if (warp.thread_rank() < td_sort::max_warps) { mem_access::store_shared<sizeof(int32_t)>(intermediate_buffer + warp.thread_rank(), &step_2_val); } } tb.sync(); int step_2_val = 0; if (warp.meta_group_rank() > 0) { mem_access::load_shared<sizeof(int32_t)>(&step_2_val, intermediate_buffer + warp.meta_group_rank() - 1); } const int thread_offset = reduce_vals[td_sort::mem_vals - 1]; #pragma unroll for (int i = 0; i < td_sort::mem_vals; i++) { reduce_vals[i] += step_1_val + step_2_val - thread_offset; } mem_access::store_shared<td_sort::granularity>( indices_buffer + tb.thread_index().x * td_sort::mem_vals, reduce_vals); if (tb.thread_index().x == 0) { indices_buffer[original_tokens] = original_tokens - indices_buffer[original_tokens]; } tb.sync(); for (int i = 0; i < VALS_PER_THREAD; i++) { const int iter_idx = i * td_sort::threads + tb.thread_index().x; if (iter_idx < reserved_tokens) { if (local_vals[i] == 0) { int zero = 0; mem_access::store_shared<sizeof(int32_t)>(sorted_indices_buffer, &zero); } else { int sorted_idx; mem_access::load_shared<sizeof(int32_t)>(&sorted_idx, indices_buffer + local_vals[i] - 1); mem_access::store_shared<sizeof(int32_t)>(sorted_indices_buffer + sorted_idx, local_vals + i); } } } tb.sync(); #pragma unroll for (int i = 0; i < VALS_PER_THREAD; i++) { const int iter_idx = i * td_sort::threads + tb.thread_index().x; if (iter_idx < reserved_tokens) { int32_t store_val; mem_access::load_shared<sizeof(int32_t)>(&store_val, sorted_indices_buffer + iter_idx); mem_access::store_global<sizeof(int32_t)>(data_block + iter_idx, &store_val); } } } void launch_token_sort(int32_t* indices, int layers, int batch_size, int reserved_size, int original_tokens, cudaStream_t stream) { // Each sort is completely independent, can flatten this dimension dim3 grid(layers * batch_size); dim3 block(td_sort::threads); const int vals_per_thread = (reserved_size + td_sort::threads - 1) / td_sort::threads; if (vals_per_thread == 1) { scan_sort<1><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens); } else if (vals_per_thread == 2) { scan_sort<2><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens); } else if (vals_per_thread == 3) { scan_sort<3><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens); } else if (vals_per_thread == 4) { scan_sort<4><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens); } else { assert(false); } }
60eded67fe5a9fec98f55e2fd90a189844311250.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Implements the math functions for GPU. #include "caffe2/utils/math.h" #include <cstring> #include <limits> #include <numeric> #include <vector> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/GpuAtomics.cuh" #include "caffe2/utils/conversions.h" #include "caffe2/utils/fixed_divisor.h" // TODO: Move this to fixed_divisor.h #ifdef __HIP_PLATFORM_HCC__ #define FIXED_DIVISOR int32_t #define FIXED_DIVISOR_DIV(d, n) (n / d) #define FIXED_DIVISOR_MOD(d, n) (n % d) #define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \ do { \ const auto n_copy = n; \ *q = n_copy / d; \ *r = n_copy % d; \ } while (0) #else // __HIP_PLATFORM_HCC__ #define FIXED_DIVISOR FixedDivisor<int32_t> #define FIXED_DIVISOR_DIV(d, n) (d.Div(n)) #define FIXED_DIVISOR_MOD(d, n) (d.Mod(n)) #define FIXED_DIVISOR_DIV_MOD(d, n, q, r) (d.DivMod(n, q, r)) #endif // __HIP_PLATFORM_HCC__ #ifdef __HIP_PLATFORM_HCC__ #include <hip/hip_version.h> using CUBLAS_HALF_TYPE = rocblas_half; #else // __HIP_PLATFORM_HCC using CUBLAS_HALF_TYPE = __half; #endif // __HIP_PLATFORM_HCC #include "caffe2/utils/math/utils.h" #if THRUST_VERSION >= 100800 #define THRUST_SUPPORTS_PER_THREAD #endif // THRUST_VERSION >= 100800 namespace caffe2 { namespace math { namespace { #define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \ template <typename T> \ struct Func##Functor { \ inline __host__ __device__ T \ operator()(const T& lhs, const T& rhs) const { \ return lhs expr rhs; \ } \ }; \ template <> \ struct Func##Functor<at::Half> { \ inline __host__ __device__ at::Half operator()( \ const at::Half& lhs, \ const at::Half& rhs) const { \ return convert::To<float, at::Half>(convert::To<at::Half, float>( \ lhs) expr convert::To<at::Half, float>(rhs)); \ } \ }; DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /) #undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR template <typename TIn, typename TOut, class BinaryOperator> __global__ void SimpleBinaryOpCUDAKernel( const int N, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(i, N) { C[i] = op(A[i], B[i]); } } template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st> __global__ void RowwiseBinaryOpCUDAKenel( const int size, const FIXED_DIVISOR cols, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { const int j = FIXED_DIVISOR_MOD(cols, C_index); const int A_index = broadcast_1st ? j : C_index; const int B_index = broadcast_1st ? C_index : j; C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st> __global__ void ColwiseBinaryOpCUDAKenel( const int size, const FIXED_DIVISOR cols, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { const int i = FIXED_DIVISOR_DIV(cols, C_index); const int A_index = broadcast_1st ? i : C_index; const int B_index = broadcast_1st ? C_index : i; C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator, int D> __global__ void BroadcastBinaryOpCUDAKernel( const int size, const SimpleArray<int, D> A_strides, const SimpleArray<int, D> B_strides, const SimpleArray<FIXED_DIVISOR, D> C_dims, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { int A_index = 0; int B_index = 0; int C_index_val = C_index; #pragma unroll for (int i = D - 1; i >= 0; --i) { int d; FIXED_DIVISOR_DIV_MOD(C_dims.data[i], C_index_val, &C_index_val, &d); A_index += d * A_strides.data[i]; B_index += d * B_strides.data[i]; } C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator> CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting( const int rows, const int cols, const bool rowwise_broadcast, const bool broadcast_1st, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { if (rows == 0 || cols == 0) { return; } const int size = rows * cols; const FIXED_DIVISOR cols_div(cols); if (rowwise_broadcast) { if (broadcast_1st) { hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, cols_div, op, A, B, C); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, cols_div, op, A, B, C); C10_HIP_KERNEL_LAUNCH_CHECK(); } } else { if (broadcast_1st) { hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, cols_div, op, A, B, C); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, cols_div, op, A, B, C); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } template <typename TIn, typename TOut, class BinaryOperator, int D> CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl( const int* A_dims, const int* B_dims, const int* C_dims, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { SimpleArray<int, D> A_strides_array; SimpleArray<int, D> B_strides_array; SimpleArray<FIXED_DIVISOR, D> C_dims_array; int A_stride = 1; int B_stride = 1; for (int i = D - 1; i >= 0; --i) { if (C_dims[i] == 0) { return; } A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride; B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride; A_stride *= A_dims[i]; B_stride *= B_dims[i]; C_dims_array.data[i] = FIXED_DIVISOR(C_dims[i]); } const int size = std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>()); hipLaunchKernelGGL(( BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename TIn, typename TOut, class BinaryOperator> CAFFE2_CUDA_EXPORT void BroadcastBinaryOp( const int A_ndim, const int* A_dims, const int B_ndim, const int* B_dims, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { const int ndim = ::max(A_ndim, B_ndim); std::vector<int> A_dims_array(ndim); std::vector<int> B_dims_array(ndim); std::vector<int> C_dims_array(ndim); utils::ComputeBroadcastBinaryOpDims( A_ndim, A_dims, B_ndim, B_dims, A_dims_array.data(), B_dims_array.data(), C_dims_array.data()); if (A_dims_array == B_dims_array) { const int size = std::accumulate( C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>()); hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, op, A, B, C); C10_HIP_KERNEL_LAUNCH_CHECK(); return; } int rows; int cols; bool broadcast_1st; if (utils::IsRowwiseBroadcastBinaryOp( ndim, A_dims_array.data(), B_dims_array.data(), &rows, &cols, &broadcast_1st)) { BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>( rows, cols, true, broadcast_1st, op, A, B, C, context); return; } if (utils::IsColwiseBroadcastBinaryOp( ndim, A_dims_array.data(), B_dims_array.data(), &rows, &cols, &broadcast_1st)) { BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>( rows, cols, false, broadcast_1st, op, A, B, C, context); return; } DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3( ndim, BroadcastBinaryOpImpl, TIn, TOut, BinaryOperator, A_dims_array.data(), B_dims_array.data(), C_dims_array.data(), op, A, B, C, context); } } // namespace #define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \ template <> \ CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \ , dim3(CAFFE_GET_BLOCKS(size)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \ , dim3(CAFFE_GET_BLOCKS(size)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \ , dim3(CAFFE_GET_BLOCKS(size)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \ , dim3(CAFFE_GET_BLOCKS(size)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } #define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal) #undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION #define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor) #undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and) DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or) DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor) #define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor) #undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION #undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION #define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \ template <> \ CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \ const int A_ndim, \ const int* A_dims, \ const int B_ndim, \ const int* B_dims, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \ A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \ } #define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal) #undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION #define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor) #undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and) DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or) DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor) #define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor) #undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION #undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION #define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \ template <> \ CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \ const int N, \ const T* src, \ T* dst, \ Tensor* scratch_ptr, \ CUDAContext* context) { \ size_t memRequired = 0; \ hipcub::DeviceReduce::func( \ nullptr, memRequired, src, dst, N, context->cuda_stream()); \ auto buffer_size = \ static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); \ scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); \ hipcub::DeviceReduce::func( \ static_cast<void*>(scratch_ptr->mutable_data<T>()), \ memRequired, \ src, \ dst, \ N, \ context->cuda_stream()); \ } DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min) DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max) DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max) DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max) #undef DELEGATE_REDUCTION_FUNCTION // Caffe2 gemm provides a simpler interface to the gemm functions, with the // limitation that the data has to be contiguous in memory. template <> CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const at::Half* A, const at::Half* B, const float beta, at::Half* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // rocblas doesn't support cublasSgemmEx type API yet. // It has more general rocblas_gemm_ex API which is more close to // hipblasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C, // whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C ROCBLAS_ENFORCE(rocblas_gemm_ex( context->rocblashandle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, rocblas_datatype_f16_r, ldb, A, rocblas_datatype_f16_r, lda, &beta, C, rocblas_datatype_f16_r, N, C, // D rocblas_datatype_f16_r, // D type N, // ldd rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0)); // flags, reserved for future use #else CUBLAS_ENFORCE(cublasSgemmEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, HIP_R_16F, ldb, A, HIP_R_16F, lda, &beta, C, HIP_R_16F, N)); #endif // __HIP_PLATFORM_HCC__ } else if (math_type == TensorProto_DataType_FLOAT16) { // convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); // call hipblasHgemm CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasHgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(B), ldb, reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(C), N)); } else { // fail CAFFE_THROW("Unsupported math type"); } } template <> CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>( const float* bias, const float* bias_multiplier, const int bias_channels, const int image_size, float* image, CUDAContext* context) { Gemm<float, CUDAContext>( CblasNoTrans, CblasNoTrans, bias_channels, image_size, 1, 1, bias, bias_multiplier, 1, image, context); } template <> CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float** A, const float** B, const float beta, float** C, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 || defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A[i], B[i], beta, C[i], context, math_type); } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; thrust::device_vector<const float*> A_device(A, A + batch_size); thrust::device_vector<const float*> B_device(B, B + batch_size); thrust::device_vector<float*> C_device(C, C + batch_size); CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSgemmBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B_device.data().get(), ldb, A_device.data().get(), lda, &beta, C_device.data().get(), ldc, batch_size)); #endif } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float* A, const int A_stride, const float* B, const int B_stride, const float beta, float* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); A += A_stride; B += B_stride; C += C_stride; } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSgemmStridedBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, B_stride, A, lda, A_stride, &beta, C, ldc, C_stride, batch_size)); #endif } template <> CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half** A, const at::Half** B, const float beta, at::Half** C, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 9 // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A[i], B[i], beta, C[i], context, math_type); } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { thrust::device_vector<const void*> A_device(A, A + batch_size); thrust::device_vector<const void*> B_device(B, B + batch_size); thrust::device_vector<void*> C_device(C, C + batch_size); CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasGemmBatchedEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B_device.data().get(), HIP_R_16F, ldb, A_device.data().get(), HIP_R_16F, lda, &beta, C_device.data().get(), HIP_R_16F, ldc, batch_size, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } else if (math_type == TensorProto_DataType_FLOAT16) { // Convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); thrust::host_vector<const __half*> A_array(batch_size); thrust::host_vector<const __half*> B_array(batch_size); thrust::host_vector<__half*> C_array(batch_size); for (int i = 0; i < batch_size; ++i) { A_array[i] = reinterpret_cast<const __half*>(A[i]); B_array[i] = reinterpret_cast<const __half*>(B[i]); C_array[i] = reinterpret_cast<__half*>(C[i]); } thrust::device_vector<const __half*> A_device( A_array.cbegin(), A_array.cend()); thrust::device_vector<const __half*> B_device( B_array.cbegin(), B_array.cend()); thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend()); CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasHgemmBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha_fp16, B_device.data().get(), ldb, A_device.data().get(), lda, &beta_fp16, C_device.data().get(), ldc, batch_size)); } else { CAFFE_THROW("Unsupported math type"); } #endif } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half* A, const int A_stride, const at::Half* B, const int B_stride, const float beta, at::Half* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); A += A_stride; B += B_stride; C += C_stride; } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // D[i*stride_d] = alpha*op(A[i*stride_a])*op(B[i*stride_b]) + // beta*C[i*stride_c], for i in [0,batch_count-1] ROCBLAS_ENFORCE(rocblas_gemm_strided_batched_ex( context->rocblashandle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, rocblas_datatype_f16_r, ldb, B_stride, A, rocblas_datatype_f16_r, lda, A_stride, &beta, C, rocblas_datatype_f16_r, ldc, C_stride, C, // D rocblas_datatype_f16_r, // D type ldc, // ldd C_stride, // D stride batch_size, rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0)); // flags, reserved for future use #else CUBLAS_ENFORCE(hipblasGemmStridedBatchedEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, HIP_R_16F, ldb, B_stride, A, HIP_R_16F, lda, A_stride, &beta, C, HIP_R_16F, ldc, C_stride, batch_size, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #endif // __HIP_PLATFORM_HCC__ } else if (math_type == TensorProto_DataType_FLOAT16) { // Convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasHgemmStridedBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(B), ldb, B_stride, reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, A_stride, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(C), ldc, C_stride, batch_size)); } else { CAFFE_THROW("Unsupported math type"); } #endif } template <> CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* context, TensorProto::DataType math_type) { const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSgemv( context->cublas_handle(), cu_trans_A, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const at::Half* A, const at::Half* x, const float beta, at::Half* y, CUDAContext* context, TensorProto::DataType math_type) { const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; // sort out what we need to call cublasSgemmEx / hipblasHgemm const int m = (cu_trans_A == HIPBLAS_OP_N) ? N : M; const int k = (cu_trans_A == HIPBLAS_OP_N) ? M : N; const int lda = (cu_trans_A == HIPBLAS_OP_N) ? m : k; const int ldc = m; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // rocblas doesn't support cublasSgemmEx type API yet. // It has more general rocblas_gemm_ex API which is more close to // hipblasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C, // whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C ROCBLAS_ENFORCE(rocblas_gemm_ex( context->rocblashandle(), cu_trans_A, rocblas_operation_none, m, 1, k, &alpha, A, rocblas_datatype_f16_r, lda, x, rocblas_datatype_f16_r, k, &beta, y, rocblas_datatype_f16_r, ldc, y, // D rocblas_datatype_f16_r, // D type ldc, // ldd rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0)); // flags, reserved for future use #else CUBLAS_ENFORCE(cublasSgemmEx( context->cublas_handle(), cu_trans_A, HIPBLAS_OP_N, m, 1, k, &alpha, A, HIP_R_16F, lda, x, HIP_R_16F, k, &beta, y, HIP_R_16F, ldc)); #endif // __HIP_PLATFORM_HCC__ } else if (math_type == TensorProto_DataType_FLOAT16) { const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasHgemm( context->cublas_handle(), cu_trans_A, HIPBLAS_OP_N, m, 1, k, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, reinterpret_cast<const CUBLAS_HALF_TYPE*>(x), k, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(y), ldc)); } else { // fail CAFFE_THROW("Unsupported math type"); } } #ifndef __HIP_PLATFORM_HCC__ // No change, but required. Defer to default CUDA engine template <> CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, TensorProto::DataType math_type) { return Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); } template <> CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const at::Half* A, const at::Half* B, const float beta, at::Half* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; // enable TensorCore for this call on this handle if (TensorCoreAvailable()) { CUBLAS_ENFORCE( cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH)); } CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasGemmEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, HIP_R_16F, ldb, A, HIP_R_16F, lda, &beta, C, HIP_R_16F, N, HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); // Now disable TensorCore math for subsequent calls to this handle if (TensorCoreAvailable()) { CUBLAS_ENFORCE( cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH)); } } template <> CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float** A, const float** B, const float beta, float** C, CUDAContext* context, TensorProto::DataType math_type) { GemmBatched<float, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, B, beta, C, context, math_type); } template <> CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half** A, const at::Half** B, const float beta, at::Half** C, CUDAContext* context, TensorProto::DataType math_type) { GemmBatched<at::Half, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, B, beta, C, context, math_type); } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float* A, const int A_stride, const float* B, const int B_stride, const float beta, float* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { GemmStridedBatched<float, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, A_stride, B, B_stride, beta, C, C_stride, context, math_type); } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half* A, const int A_stride, const at::Half* B, const int B_stride, const float beta, at::Half* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, A_stride, B, B_stride, beta, C, C_stride, context, math_type); } template <> CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* context, TensorProto::DataType math_type) { Gemv<float, CUDAContext, DefaultEngine>( trans_A, M, N, alpha, A, x, beta, y, context, math_type); } template <> CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const at::Half* A, const at::Half* x, const float beta, at::Half* y, CUDAContext* context, TensorProto::DataType math_type) { Gemv<at::Half, CUDAContext, DefaultEngine>( trans_A, M, N, alpha, A, x, beta, y, context, math_type); } #endif template <> CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const int lda, const float* B, const int ldb, const float beta, float* C, const int ldc, CUDAContext* context) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const hipblasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_ENFORCE( hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(hipblasSgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc)); } // Batched Add variants namespace { template <typename T> __global__ void AddStripedBatchKernel( const int N, const T* first, T* Y, const int stripe, const int batch) { for (int j = 0; j < batch; j++) { const T* x = first + j * stripe; CUDA_1D_KERNEL_LOOP(i, N) { float tmpY = convert::To<T, float>(Y[i]); tmpY += convert::To<T, float>(x[i]); Y[i] = convert::To<float, T>(tmpY); } } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \ template <> \ CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \ const int N, \ const T* first, \ T* Y, \ const int stripe, \ const int batch, \ CUDAContext* context) { \ hipLaunchKernelGGL(( AddStripedBatchKernel<T>) \ , dim3(CAFFE_GET_BLOCKS(N)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, first, Y, stripe, batch); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float); CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half); #undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH namespace { template <typename T> __global__ void UniformShift(const size_t N, const float min, const float max, T* x) { float scale = max - min; CUDA_1D_KERNEL_LOOP(i, N) { x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min); } } __global__ void UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) { int* x_int = reinterpret_cast<int*>(x); int range = (max - min + 1); CUDA_1D_KERNEL_LOOP(i, N) { x_int[i] = min + static_cast<int>(x[i] % range); } } } // namespace template <> CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>( const size_t n, const float min, const float max, float* r, CUDAContext* context) { CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n)); hipLaunchKernelGGL(( UniformShift<float>) , dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, min, max, r); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>( const size_t n, const double min, const double max, double* r, CUDAContext* context) { CURAND_ENFORCE( hiprandGenerateUniformDouble(context->curand_generator(), r, n)); hipLaunchKernelGGL(( UniformShift<double>) , dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, min, max, r); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>( const size_t n, const int min, const int max, int* r, CUDAContext* context) { CURAND_ENFORCE(hiprandGenerate( context->curand_generator(), reinterpret_cast<unsigned int*>(r), n)); hipLaunchKernelGGL(( UniformIntFit), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), n, min, max, reinterpret_cast<unsigned int*>(r)); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename T> size_t HandleOddLengthRandGaussian( const size_t n, const T mean, const T std, T* r, CUDAContext* context) { if (n % 2 == 1) { std::default_random_engine generator; std::normal_distribution<T> distribution(mean, std); const T random_value = distribution(generator); Set<T, CUDAContext>(1, random_value, r + (n - 1), context); return n - 1; } return n; } template <> CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>( const size_t n, const float mean, const float std, float* r, CUDAContext* context) { // If n is odd, we add a random Gaussian value at the end manually // and generate n-1 random values using hiprandGenerateNormal. // hiprandGenerateNormal requires n to be even. const size_t even_n = HandleOddLengthRandGaussian<float>(n, mean, std, r, context); CURAND_ENFORCE( hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std)); } template <> CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>( const size_t n, const double mean, const double std, double* r, CUDAContext* context) { const size_t even_n = HandleOddLengthRandGaussian<double>(n, mean, std, r, context); CURAND_ENFORCE(hiprandGenerateNormalDouble( context->curand_generator(), r, even_n, mean, std)); } template <> CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>( const int n, const float* a, const float* b, float* y, CUDAContext* context) { CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, y)); } template <> CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>( const int n, const at::Half* a, const at::Half* b, at::Half* y, CUDAContext* context) { #if defined __HIP_PLATFORM_HCC__ && HIP_VERSION < 210 CAFFE_THROW("HIP currently does not support FP16 completely yet."); #elif defined __HIP_PLATFORM_HCC__ && HIP_VERSION >= 210 CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(rocblas_hdot( context->cublas_handle(), n, reinterpret_cast<const rocblas_half*>(a), 1, reinterpret_cast<const rocblas_half*>(b), 1, reinterpret_cast<rocblas_half*>(y))); #else // execute with 32-bit math CUBLAS_ENFORCE(hipblasSetPointerMode( context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(hipblasDotEx_v2( context->cublas_handle(), n, a, HIP_R_16F, 1, b, HIP_R_16F, 1, y, HIP_R_16F, HIP_R_32F)); #endif } // A previous version of caffe2 used Thrust but it turns out that thrust // reduction has an implicit scratch space allocation and deallocation, which // may interfere with NCCL and create a deadlock. Hence we are using a custom // reduction here. #define SUM_KERNEL_NTHREADS 128 template <typename T> __global__ void SumKernel(const int N, const T* X, T* Y, bool square) { const int idx = threadIdx.x; __shared__ float reduction_buffer[SUM_KERNEL_NTHREADS]; reduction_buffer[idx] = 0; // A multilevel reduction. // N -> 128 if (!square) { for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { reduction_buffer[idx] += convert::To<T, float>(X[i]); } } else { for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { float Xi = convert::To<T, float>(X[i]); reduction_buffer[idx] += Xi * Xi; } } __syncthreads(); // 128 -> 32 if (idx < 32) { reduction_buffer[idx] += reduction_buffer[idx + 32] + reduction_buffer[idx + 64] + reduction_buffer[idx + 96]; } __syncthreads(); // 32 -> 1 if (idx == 0) { float tmp = 0; for (int i = 0; i < 32; ++i) { tmp += reduction_buffer[i]; } *Y = convert::To<float, T>(tmp); } } // According to the benchmarks script // caffe2/caffe2/experiments/python/device_reduce_sum_bench.py, // device reduce is slower for N <= 10000. #define DEVICE_REDUCE_SIZE_THRESHOLD 10000 namespace { template <typename T> __global__ void SumConvertKernel(float* sum, T* dest) { *dest = convert::To<float, T>(*sum); } template <typename T, typename IterT> CAFFE2_CUDA_EXPORT void SumGenericIter( const int N, IterT it, T*& dest, CUDAContext* context, Tensor* scratch_ptr) { size_t memRequired = 0; hipcub::DeviceReduce::Sum( nullptr, memRequired, it, dest, N, context->cuda_stream()); auto buffer_size = static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); if (!dest) { // allocate one more T at the end of scratch for dest scratch_ptr->Resize(std::vector<int64_t>{buffer_size + 1}); dest = scratch_ptr->template mutable_data<T>() + buffer_size; } else { scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); } hipcub::DeviceReduce::Sum( static_cast<void*>(scratch_ptr->template mutable_data<T>()), memRequired, it, dest, N, context->cuda_stream()); } } // namespace template <> CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>( const int N, const float* x, float* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SumGenericIter<float>(N, x, y, context, scratch_ptr); } else { hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), N, x, y, false); C10_HIP_KERNEL_LAUNCH_CHECK(); } } template <> CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>( const int N, const int32_t* x, int32_t* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SumGenericIter<int32_t>(N, x, y, context, scratch_ptr); } else { hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), N, x, y, false); C10_HIP_KERNEL_LAUNCH_CHECK(); } } namespace { template <typename T> struct FloatTransform { inline __host__ __device__ float operator()(const T v) const { return convert::To<T, float>(v); } }; } // namespace #define CAFFE2_MATH_SUM_FUNC(T) \ template <> \ CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \ const int N, \ const T* x, \ T* y, \ CUDAContext* context, \ Tensor* scratch_ptr) { \ if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \ FloatTransform<T> transform; \ hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \ x, transform); \ float* sum = nullptr; \ SumGenericIter<float>(N, it, sum, context, scratch_ptr); \ hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } else { \ hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \ N, x, y, false); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } \ } CAFFE2_MATH_SUM_FUNC(at::Half) #undef CAFFE2_MATH_SUM_FUNC namespace { template <typename T> struct SqrTransform { inline __host__ __device__ T operator()(const T v) const { return v * v; } }; } // namespace template <> CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>( const int N, const float* x, float* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SqrTransform<float> transform; hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it( x, transform); SumGenericIter<float>(N, it, y, context, scratch_ptr); } else { hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), N, x, y, true); C10_HIP_KERNEL_LAUNCH_CHECK(); } } #define CAFFE2_MATH_SUMSQR_FUNC(T) \ template <> \ CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \ const int N, \ const T* x, \ T* y, \ CUDAContext* context, \ Tensor* scratch_ptr) { \ if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \ FloatTransform<T> float_transform; \ hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \ float_it(x, float_transform); \ SqrTransform<float> sqr_transform; \ hipcub::TransformInputIterator< \ float, \ SqrTransform<float>, \ decltype(float_it)> \ it(float_it, sqr_transform); \ float* sum = nullptr; \ SumGenericIter<float>(N, it, sum, context, scratch_ptr); \ hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } else { \ hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \ N, x, y, true); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } \ } CAFFE2_MATH_SUMSQR_FUNC(at::Half) #undef CAFFE2_MATH_SUMSQR_FUNC #undef DEVICE_REDUCE_SIZE_THRESHOLD namespace { template <typename T> __global__ void SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = x[i * D + idx[i]]; } } } // namespace template <> CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>( const int N, const int D, const float* x, const int* idx, float* y, CUDAContext* context) { hipLaunchKernelGGL(( SelectKernel<float>) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, D, x, idx, y); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>( const int N, const int D, const at::Half* x, const int* idx, at::Half* y, CUDAContext* context) { hipLaunchKernelGGL(( SelectKernel<at::Half>) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, D, x, idx, y); C10_HIP_KERNEL_LAUNCH_CHECK(); } namespace { template <typename T> __global__ void Im2ColNCHWCUDAKernel( const int n, const int input_h, const int input_w, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* img_data, T* col_data) { CUDA_1D_KERNEL_LOOP(index, n) { const int w_out = index % output_w; const int h_index = index / output_w; const int h_out = h_index % output_h; const int channel_in = h_index / output_h; const int channel_out = channel_in * kernel_h * kernel_w; const int h_in = h_out * stride_h - pad_t; const int w_in = w_out * stride_w - pad_l; const int output_size = output_h * output_w; T* col_data_ptr = col_data + (channel_out * output_h + h_out) * output_w + w_out; const T* img_data_ptr = img_data + (channel_in * input_h + h_in) * input_w + w_in; int dh = 0; for (int i = 0; i < kernel_h; ++i) { int dw = 0; for (int j = 0; j < kernel_w; ++j) { const int h = h_in + dh; const int w = w_in + dw; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? __ldg(img_data_ptr + dh * input_w + dw) : 0; #else *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? img_data_ptr[dh * input_w + dw] : 0; #endif col_data_ptr += output_size; dw += dilation_w; } dh += dilation_h; } } } template <typename T> __global__ void Im2ColNHWCCUDAKernel( const int n, const int input_h, const int input_w, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_w, const int channels, const T* img_data, T* col_data) { CUDA_1D_KERNEL_LOOP(index, n) { const int channel_in = index % channels; const int w_out = index / channels % output_w; const int h_out = index / channels / output_w; const int h_in = h_out * stride_h - pad_t; const int w_in = w_out * stride_w - pad_l; T* col_data_ptr = col_data + (h_out * output_w + w_out) * channels * kernel_h * kernel_w + channel_in; int dh = 0; for (int i = 0; i < kernel_h; ++i) { int dw = 0; for (int j = 0; j < kernel_w; ++j) { const int h = h_in + dh; const int w = w_in + dw; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? __ldg(img_data + (h * input_w + w) * channels + channel_in) : 0; #else *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? img_data[(h * input_w + w) * channels + channel_in] : 0; #endif col_data_ptr += channels; dw += dilation_w; } dh += dilation_h; } } } template <typename T> __global__ void Col2ImNCHWCUDAKernel( const int n, const int input_h, const int input_w, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* col_data, T* img_data) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; const int w = index % input_w + pad_l; const int h = index / input_w % input_h + pad_t; const int c = index / (input_h * input_w); // compute the start and end of the output const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; const int w_col_end = min(w / stride_w + 1, output_w); const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; const int h_col_end = min(h / stride_h + 1, output_h); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = (h - h_col * stride_h); int w_k = (w - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; const int col_data_index = (((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) * output_w + w_col; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) val += __ldg(col_data + col_data_index); #else val += col_data[col_data_index]; #endif } } } img_data[index] = val; } } template <typename T> __global__ void Col2ImNHWCCUDAKernel( const int n, const int input_w, const int channels, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* col_data, T* img_data) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; const int c = index % channels; const int w = index / channels % input_w + pad_l; const int h = index / channels / input_w + pad_t; // compute the start and end of the output const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; const int w_col_end = min(w / stride_w + 1, output_w); const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; const int h_col_end = min(h / stride_h + 1, output_h); const int channels_col = patch_h * patch_w * channels; for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = h - h_col * stride_h; int w_k = w - w_col * stride_w; if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; const int c_col = (h_k * patch_w + w_k) * channels + c; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) val += __ldg( col_data + (h_col * output_w + w_col) * channels_col + c_col); #else val += col_data[(h_col * output_w + w_col) * channels_col + c_col]; #endif } } } img_data[index] = val; } } template <typename T, int N, bool kCol2Im> __global__ void Im2ColNdNCHWCUDAKernel( const int outer_size, const int inner_size, const int kernel_size, SimpleArray<int, N + 1> img_shape, SimpleArray<int, N + 1> col_shape, SimpleArray<int, N> kernel_shape, SimpleArray<int, N> stride, SimpleArray<int, N> dilation, SimpleArray<int, N> pad, const T* X_data, T* Y_data) { int d_offset[N]; int d_iter[N]; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { int offset_i = i; #pragma unroll for (int d_i = N - 1; d_i >= 0; --d_i) { d_offset[d_i] = offset_i % kernel_shape.data[d_i]; offset_i /= kernel_shape.data[d_i]; } for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int offset_j = j; #pragma unroll for (int d_i = N - 1; d_i >= 0; --d_i) { d_iter[d_i] = offset_j % col_shape.data[d_i + 1]; offset_j /= col_shape.data[d_i + 1]; } const int col_index = i * inner_size + j; int img_index = i / kernel_size; bool is_padding = false; #pragma unroll for (int d_i = 0; d_i < N; ++d_i) { const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] + d_offset[d_i] * dilation.data[d_i]; is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]); img_index = img_index * img_shape.data[d_i + 1] + d_img; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) if (!kCol2Im) { Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index); } else if (!is_padding) { gpu_atomic_add(Y_data + img_index, __ldg(X_data + col_index)); } #else if (!kCol2Im) { Y_data[col_index] = is_padding ? 0 : X_data[img_index]; } else if (!is_padding) { gpu_atomic_add(Y_data + img_index, X_data[col_index]); } #endif } } } template <typename T, int N> CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl( const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context) { const int outer_size = col_shape[0]; const int inner_size = col_size / outer_size; const int kernel_size = std::accumulate( kernel_shape, kernel_shape + N, 1, std::multiplies<int>()); SimpleArray<int, N + 1> img_shape_array; SimpleArray<int, N + 1> col_shape_array; SimpleArray<int, N> kernel_shape_array; SimpleArray<int, N> stride_array; SimpleArray<int, N> dilation_array; SimpleArray<int, N> pad_array; std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int)); std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int)); std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int)); std::memcpy(stride_array.data, stride, N * sizeof(int)); std::memcpy(dilation_array.data, dilation, N * sizeof(int)); std::memcpy(pad_array.data, pad, N * sizeof(int)); hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, false>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), outer_size, inner_size, kernel_size, img_shape_array, col_shape_array, kernel_shape_array, stride_array, dilation_array, pad_array, img_data, col_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename T, int N> CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl( const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context) { const int outer_size = col_shape[0]; const int inner_size = col_size / outer_size; const int kernel_size = std::accumulate( kernel_shape, kernel_shape + N, 1, std::multiplies<int>()); SimpleArray<int, N + 1> img_shape_array; SimpleArray<int, N + 1> col_shape_array; SimpleArray<int, N> kernel_shape_array; SimpleArray<int, N> stride_array; SimpleArray<int, N> dilation_array; SimpleArray<int, N> pad_array; std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int)); std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int)); std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int)); std::memcpy(stride_array.data, stride, N * sizeof(int)); std::memcpy(dilation_array.data, dilation, N * sizeof(int)); std::memcpy(pad_array.data, pad, N * sizeof(int)); Set<T, CUDAContext>(img_size, 0, img_data, context); hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, true>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), outer_size, inner_size, kernel_size, img_shape_array, col_shape_array, kernel_shape_array, stride_array, dilation_array, pad_array, col_data, img_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } } // namespace template <> CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* img_data, float* col_data, CUDAContext* context, const int /* groups */) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = channels * output_h * output_w; hipLaunchKernelGGL(( Im2ColNCHWCUDAKernel<float>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, img_data, col_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* img_data, float* col_data, CUDAContext* context, const int groups) { CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col"); const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = output_h * output_w * channels; hipLaunchKernelGGL(( Im2ColNHWCCUDAKernel<float>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_w, channels, img_data, col_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* col_data, float* img_data, CUDAContext* context, const int /* groups */) { // In NCHW, the number of groups doesn't affect Col2Im. const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = channels * height * width; hipLaunchKernelGGL(( Col2ImNCHWCUDAKernel<float>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, col_data, img_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* col_data, float* img_data, CUDAContext* context, const int groups) { CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im"); const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = height * width * channels; hipLaunchKernelGGL(( Col2ImNHWCCUDAKernel<float>) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), num_kernels, width, channels, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, col_data, img_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context, const int /* groups */) { // In NCHW, the number of groups doesn't affect Im2Col. DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( N, Im2ColNdNCHWCUDAImpl, float, img_size, col_size, img_shape, col_shape, kernel_shape, stride, dilation, pad, img_data, col_data, context); } template <> CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NHWC>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context, const int groups) { CAFFE_NOT_IMPLEMENTED; } template <> CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context, int /* groups */) { // In NCHW, the number of groups doesn't affect Col2Im. DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( N, Col2ImNdNCHWCUDAImpl, float, img_size, col_size, img_shape, col_shape, kernel_shape, stride, dilation, pad, col_data, img_data, context); } template <> CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NHWC>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context, int groups) { CAFFE_NOT_IMPLEMENTED; } template <> CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>( const size_t itemsize, const int M, const int N, const void* A, const int lda, void* B, const int ldb, CUDAContext* context, TypeMeta::Copy copy) { CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context"); hipMemcpy2DAsync( B, ldb * itemsize, A, lda * itemsize, N * itemsize, M, hipMemcpyDeviceToDevice, context->cuda_stream()); } #define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \ template <> \ void CopyMatrix<T, CUDAContext>( \ const int M, \ const int N, \ const T* A, \ const int lda, \ T* B, \ const int ldb, \ CUDAContext* context) { \ if (M == 0 || N == 0) { \ return; \ } \ hipMemcpy2DAsync( \ B, \ sizeof(T) * ldb, \ A, \ sizeof(T) * lda, \ sizeof(T) * N, \ M, \ hipMemcpyDeviceToDevice, \ context->cuda_stream()); \ } CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int64_t) #undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX template <> CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>( const int N, const float* src, float* dst, CUDAContext* context) { if (src != dst && N > 0) { hipMemcpyAsync( dst, src, sizeof(float) * N, hipMemcpyDeviceToDevice, context->cuda_stream()); } } template <> CAFFE2_CUDA_EXPORT void CopyVector<int, CUDAContext>( const int N, const int* src, int* dst, CUDAContext* context) { if (src != dst && N > 0) { hipMemcpyAsync( dst, src, sizeof(int) * N, hipMemcpyDeviceToDevice, context->cuda_stream()); } } namespace { template <typename T> using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T, class Reducer> __global__ void RowwiseReduceKernel( const int rows, const int cols, const Reducer reducer, const T init, const T alpha, const T* X, T* Y) { __shared__ typename BlockReduce<T>::TempStorage temp_storage; for (int i = blockIdx.x; i < rows; i += gridDim.x) { T val = init; for (int j = threadIdx.x; j < cols; j += blockDim.x) { val = reducer(X[i * cols + j], val); } val = BlockReduce<T>(temp_storage).Reduce(val, reducer); if (threadIdx.x == 0) { Y[i] = val * alpha; } __syncthreads(); } } template <typename T, class Reducer> __global__ void ColwiseReduceKernel( const int rows, const int cols, const Reducer reducer, const T init, const T alpha, const T* X, T* Y) { __shared__ typename BlockReduce<T>::TempStorage temp_storage; for (int i = blockIdx.x; i < cols; i += gridDim.x) { T val = init; for (int j = threadIdx.x; j < rows; j += blockDim.x) { val = reducer(X[j * cols + i], val); } val = BlockReduce<T>(temp_storage).Reduce(val, reducer); if (threadIdx.x == 0) { Y[i] = val * alpha; } __syncthreads(); } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \ template <> \ CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \ const int N, const int D, const T* x, T* y, CUDAContext* context) { \ hipLaunchKernelGGL(( RowwiseReduceKernel), \ ::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), \ N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float) #undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX #define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \ template <> \ CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \ const int N, const int D, const T* x, T* y, CUDAContext* context) { \ hipLaunchKernelGGL(( ColwiseReduceKernel), \ ::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), \ N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float) #undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX namespace { __global__ void maximum_kernel(const int N, const float alpha, const float* x, float* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = fmaxf(x[i], alpha); } } } // namespace template <> CAFFE2_CUDA_EXPORT void Maximum( const int N, const float alpha, const float* x, float* y, CUDAContext* context) { hipLaunchKernelGGL(( maximum_kernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, alpha, x, y); C10_HIP_KERNEL_LAUNCH_CHECK(); } namespace { template <typename T, int D> __global__ void BroadcastCUDAKernel( const int Y_size, const SimpleArray<int, D> X_strides, const SimpleArray<FIXED_DIVISOR, D> Y_dims, const T alpha, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(Y_index, Y_size) { int X_index = 0; int Y_index_val = Y_index; #pragma unroll for (int i = D - 1; i >= 0; --i) { int d; FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d); X_index += d * X_strides.data[i]; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) Y[Y_index] = __ldg(X + X_index) * alpha; #else Y[Y_index] = X[X_index] * alpha; #endif } } template <typename T, int D> CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl( const int X_ndim, const int* X_dims, const int* Y_dims, const T alpha, const T* X, T* Y, CUDAContext* context) { SimpleArray<int, D> X_strides_array; SimpleArray<FIXED_DIVISOR, D> Y_dims_array; const int d = D - X_ndim; std::fill(X_strides_array.data, X_strides_array.data + d, 0); int cur_stride = 1; for (int i = D - 1; i >= d; --i) { CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]); X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride; cur_stride *= X_dims[i - d]; } for (int i = 0; i < D; ++i) { if (Y_dims[i] == 0) { return; } Y_dims_array.data[i] = FIXED_DIVISOR(Y_dims[i]); } const int Y_size = std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>()); hipLaunchKernelGGL(( BroadcastCUDAKernel<T, D>) , dim3(CAFFE_GET_BLOCKS(Y_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), Y_size, X_strides_array, Y_dims_array, alpha, X, Y); C10_HIP_KERNEL_LAUNCH_CHECK(); } } // namespace #define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \ template <> \ CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \ const int X_ndim, \ const int* X_dims, \ const int Y_ndim, \ const int* Y_dims, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \ DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \ Y_ndim, \ BroadcastCUDAImpl, \ T, \ X_ndim, \ X_dims, \ Y_dims, \ alpha, \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t) CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t) CAFFE2_SPECIALIZED_CUDA_BROADCAST(float) CAFFE2_SPECIALIZED_CUDA_BROADCAST(double) #undef CAFFE2_SPECIALIZED_CUDA_BROADCAST namespace { template <typename T> __global__ void InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std); #define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \ template <> \ __global__ void InvStdCUDAKernel<T>( \ const int N, const T epsilon, const T* var, T* inv_std) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ inv_std[i] = Func(var[i] + epsilon); \ } \ } DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf) #undef DELEGATE_INV_STD_KERNEL_FUNCTION } // namespace #define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \ template <> \ CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \ const int N, \ const T epsilon, \ const T* var, \ T* inv_std, \ CUDAContext* context) { \ hipLaunchKernelGGL(( InvStdCUDAKernel<T>) \ , dim3(CAFFE_GET_BLOCKS(N)), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream(), N, epsilon, var, inv_std); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } CAFFE2_SPECIALIZED_CUDA_INV_STD(float) #undef CAFFE2_SPECIALIZED_CUDA_INV_STD } // namespace math } // namespace caffe2
60eded67fe5a9fec98f55e2fd90a189844311250.cu
// Implements the math functions for GPU. #include "caffe2/utils/math.h" #include <cstring> #include <limits> #include <numeric> #include <vector> #include <cub/block/block_reduce.cuh> #include <cub/cub.cuh> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/GpuAtomics.cuh" #include "caffe2/utils/conversions.h" #include "caffe2/utils/fixed_divisor.h" // TODO: Move this to fixed_divisor.h #ifdef __HIP_PLATFORM_HCC__ #define FIXED_DIVISOR int32_t #define FIXED_DIVISOR_DIV(d, n) (n / d) #define FIXED_DIVISOR_MOD(d, n) (n % d) #define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \ do { \ const auto n_copy = n; \ *q = n_copy / d; \ *r = n_copy % d; \ } while (0) #else // __HIP_PLATFORM_HCC__ #define FIXED_DIVISOR FixedDivisor<int32_t> #define FIXED_DIVISOR_DIV(d, n) (d.Div(n)) #define FIXED_DIVISOR_MOD(d, n) (d.Mod(n)) #define FIXED_DIVISOR_DIV_MOD(d, n, q, r) (d.DivMod(n, q, r)) #endif // __HIP_PLATFORM_HCC__ #ifdef __HIP_PLATFORM_HCC__ #include <hip/hip_version.h> using CUBLAS_HALF_TYPE = rocblas_half; #else // __HIP_PLATFORM_HCC using CUBLAS_HALF_TYPE = __half; #endif // __HIP_PLATFORM_HCC #include "caffe2/utils/math/utils.h" #if THRUST_VERSION >= 100800 #define THRUST_SUPPORTS_PER_THREAD #endif // THRUST_VERSION >= 100800 namespace caffe2 { namespace math { namespace { #define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \ template <typename T> \ struct Func##Functor { \ inline __host__ __device__ T \ operator()(const T& lhs, const T& rhs) const { \ return lhs expr rhs; \ } \ }; \ template <> \ struct Func##Functor<at::Half> { \ inline __host__ __device__ at::Half operator()( \ const at::Half& lhs, \ const at::Half& rhs) const { \ return convert::To<float, at::Half>(convert::To<at::Half, float>( \ lhs) expr convert::To<at::Half, float>(rhs)); \ } \ }; DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *) DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /) #undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR template <typename TIn, typename TOut, class BinaryOperator> __global__ void SimpleBinaryOpCUDAKernel( const int N, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(i, N) { C[i] = op(A[i], B[i]); } } template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st> __global__ void RowwiseBinaryOpCUDAKenel( const int size, const FIXED_DIVISOR cols, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { const int j = FIXED_DIVISOR_MOD(cols, C_index); const int A_index = broadcast_1st ? j : C_index; const int B_index = broadcast_1st ? C_index : j; C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st> __global__ void ColwiseBinaryOpCUDAKenel( const int size, const FIXED_DIVISOR cols, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { const int i = FIXED_DIVISOR_DIV(cols, C_index); const int A_index = broadcast_1st ? i : C_index; const int B_index = broadcast_1st ? C_index : i; C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator, int D> __global__ void BroadcastBinaryOpCUDAKernel( const int size, const SimpleArray<int, D> A_strides, const SimpleArray<int, D> B_strides, const SimpleArray<FIXED_DIVISOR, D> C_dims, const BinaryOperator op, const TIn* A, const TIn* B, TOut* C) { CUDA_1D_KERNEL_LOOP(C_index, size) { int A_index = 0; int B_index = 0; int C_index_val = C_index; #pragma unroll for (int i = D - 1; i >= 0; --i) { int d; FIXED_DIVISOR_DIV_MOD(C_dims.data[i], C_index_val, &C_index_val, &d); A_index += d * A_strides.data[i]; B_index += d * B_strides.data[i]; } C[C_index] = op(A[A_index], B[B_index]); } } template <typename TIn, typename TOut, class BinaryOperator> CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting( const int rows, const int cols, const bool rowwise_broadcast, const bool broadcast_1st, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { if (rows == 0 || cols == 0) { return; } const int size = rows * cols; const FIXED_DIVISOR cols_div(cols); if (rowwise_broadcast) { if (broadcast_1st) { RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, cols_div, op, A, B, C); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, cols_div, op, A, B, C); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } else { if (broadcast_1st) { ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, cols_div, op, A, B, C); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, cols_div, op, A, B, C); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } template <typename TIn, typename TOut, class BinaryOperator, int D> CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl( const int* A_dims, const int* B_dims, const int* C_dims, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { SimpleArray<int, D> A_strides_array; SimpleArray<int, D> B_strides_array; SimpleArray<FIXED_DIVISOR, D> C_dims_array; int A_stride = 1; int B_stride = 1; for (int i = D - 1; i >= 0; --i) { if (C_dims[i] == 0) { return; } A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride; B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride; A_stride *= A_dims[i]; B_stride *= B_dims[i]; C_dims_array.data[i] = FIXED_DIVISOR(C_dims[i]); } const int size = std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>()); BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename TIn, typename TOut, class BinaryOperator> CAFFE2_CUDA_EXPORT void BroadcastBinaryOp( const int A_ndim, const int* A_dims, const int B_ndim, const int* B_dims, const BinaryOperator& op, const TIn* A, const TIn* B, TOut* C, CUDAContext* context) { const int ndim = std::max(A_ndim, B_ndim); std::vector<int> A_dims_array(ndim); std::vector<int> B_dims_array(ndim); std::vector<int> C_dims_array(ndim); utils::ComputeBroadcastBinaryOpDims( A_ndim, A_dims, B_ndim, B_dims, A_dims_array.data(), B_dims_array.data(), C_dims_array.data()); if (A_dims_array == B_dims_array) { const int size = std::accumulate( C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>()); SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, op, A, B, C); C10_CUDA_KERNEL_LAUNCH_CHECK(); return; } int rows; int cols; bool broadcast_1st; if (utils::IsRowwiseBroadcastBinaryOp( ndim, A_dims_array.data(), B_dims_array.data(), &rows, &cols, &broadcast_1st)) { BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>( rows, cols, true, broadcast_1st, op, A, B, C, context); return; } if (utils::IsColwiseBroadcastBinaryOp( ndim, A_dims_array.data(), B_dims_array.data(), &rows, &cols, &broadcast_1st)) { BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>( rows, cols, false, broadcast_1st, op, A, B, C, context); return; } DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3( ndim, BroadcastBinaryOpImpl, TIn, TOut, BinaryOperator, A_dims_array.data(), B_dims_array.data(), C_dims_array.data(), op, A, B, C, context); } } // namespace #define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \ template <> \ CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \ <<<CAFFE_GET_BLOCKS(size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \ <<<CAFFE_GET_BLOCKS(size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \ <<<CAFFE_GET_BLOCKS(size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } \ template <> \ CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \ const int rows, \ const int cols, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ if (rows == 0 || cols == 0) { \ return; \ } \ const int size = rows * cols; \ const FIXED_DIVISOR cols_div(cols); \ ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \ <<<CAFFE_GET_BLOCKS(size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } #define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater) DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal) #undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION #define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor) DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor) #undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and) DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or) DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor) #define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or) DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor) #undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION #undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION #define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \ template <> \ CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \ const int A_ndim, \ const int* A_dims, \ const int B_ndim, \ const int* B_dims, \ const TIn* A, \ const TIn* B, \ TOut* C, \ CUDAContext* context) { \ BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \ A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \ } #define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater) DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal) #undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION #define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int64_t, std::int64_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor) DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor) #undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and) DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or) DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor) #define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \ std::int32_t, std::int32_t, Func, Op) \ DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or) DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor) #undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION #undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION #define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \ template <> \ CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \ const int N, \ const T* src, \ T* dst, \ Tensor* scratch_ptr, \ CUDAContext* context) { \ size_t memRequired = 0; \ cub::DeviceReduce::func( \ nullptr, memRequired, src, dst, N, context->cuda_stream()); \ auto buffer_size = \ static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); \ scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); \ cub::DeviceReduce::func( \ static_cast<void*>(scratch_ptr->mutable_data<T>()), \ memRequired, \ src, \ dst, \ N, \ context->cuda_stream()); \ } DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min) DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max) DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max) DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max) #undef DELEGATE_REDUCTION_FUNCTION // Caffe2 gemm provides a simpler interface to the gemm functions, with the // limitation that the data has to be contiguous in memory. template <> CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const at::Half* A, const at::Half* B, const float beta, at::Half* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // rocblas doesn't support cublasSgemmEx type API yet. // It has more general rocblas_gemm_ex API which is more close to // cublasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C, // whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C ROCBLAS_ENFORCE(rocblas_gemm_ex( context->rocblashandle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, rocblas_datatype_f16_r, ldb, A, rocblas_datatype_f16_r, lda, &beta, C, rocblas_datatype_f16_r, N, C, // D rocblas_datatype_f16_r, // D type N, // ldd rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0)); // flags, reserved for future use #else CUBLAS_ENFORCE(cublasSgemmEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &beta, C, CUDA_R_16F, N)); #endif // __HIP_PLATFORM_HCC__ } else if (math_type == TensorProto_DataType_FLOAT16) { // convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); // call cublasHgemm CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasHgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(B), ldb, reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(C), N)); } else { // fail CAFFE_THROW("Unsupported math type"); } } template <> CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>( const float* bias, const float* bias_multiplier, const int bias_channels, const int image_size, float* image, CUDAContext* context) { Gemm<float, CUDAContext>( CblasNoTrans, CblasNoTrans, bias_channels, image_size, 1, 1, bias, bias_multiplier, 1, image, context); } template <> CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float** A, const float** B, const float beta, float** C, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 || defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A[i], B[i], beta, C[i], context, math_type); } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; thrust::device_vector<const float*> A_device(A, A + batch_size); thrust::device_vector<const float*> B_device(B, B + batch_size); thrust::device_vector<float*> C_device(C, C + batch_size); CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSgemmBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B_device.data().get(), ldb, A_device.data().get(), lda, &beta, C_device.data().get(), ldc, batch_size)); #endif } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float* A, const int A_stride, const float* B, const int B_stride, const float beta, float* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); A += A_stride; B += B_stride; C += C_stride; } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSgemmStridedBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, B_stride, A, lda, A_stride, &beta, C, ldc, C_stride, batch_size)); #endif } template <> CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half** A, const at::Half** B, const float beta, at::Half** C, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 9 // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A[i], B[i], beta, C[i], context, math_type); } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { thrust::device_vector<const void*> A_device(A, A + batch_size); thrust::device_vector<const void*> B_device(B, B + batch_size); thrust::device_vector<void*> C_device(C, C + batch_size); CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasGemmBatchedEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B_device.data().get(), CUDA_R_16F, ldb, A_device.data().get(), CUDA_R_16F, lda, &beta, C_device.data().get(), CUDA_R_16F, ldc, batch_size, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } else if (math_type == TensorProto_DataType_FLOAT16) { // Convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); thrust::host_vector<const __half*> A_array(batch_size); thrust::host_vector<const __half*> B_array(batch_size); thrust::host_vector<__half*> C_array(batch_size); for (int i = 0; i < batch_size; ++i) { A_array[i] = reinterpret_cast<const __half*>(A[i]); B_array[i] = reinterpret_cast<const __half*>(B[i]); C_array[i] = reinterpret_cast<__half*>(C[i]); } thrust::device_vector<const __half*> A_device( A_array.cbegin(), A_array.cend()); thrust::device_vector<const __half*> B_device( B_array.cbegin(), B_array.cend()); thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend()); CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasHgemmBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha_fp16, B_device.data().get(), ldb, A_device.data().get(), lda, &beta_fp16, C_device.data().get(), ldc, batch_size)); } else { CAFFE_THROW("Unsupported math type"); } #endif } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half* A, const int A_stride, const at::Half* B, const int B_stride, const float beta, at::Half* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { #if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__) // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { Gemm<at::Half, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); A += A_stride; B += B_stride; C += C_stride; } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const int ldc = N; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // D[i*stride_d] = alpha*op(A[i*stride_a])*op(B[i*stride_b]) + // beta*C[i*stride_c], for i in [0,batch_count-1] ROCBLAS_ENFORCE(rocblas_gemm_strided_batched_ex( context->rocblashandle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, rocblas_datatype_f16_r, ldb, B_stride, A, rocblas_datatype_f16_r, lda, A_stride, &beta, C, rocblas_datatype_f16_r, ldc, C_stride, C, // D rocblas_datatype_f16_r, // D type ldc, // ldd C_stride, // D stride batch_size, rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0)); // flags, reserved for future use #else CUBLAS_ENFORCE(cublasGemmStridedBatchedEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, CUDA_R_16F, ldb, B_stride, A, CUDA_R_16F, lda, A_stride, &beta, C, CUDA_R_16F, ldc, C_stride, batch_size, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #endif // __HIP_PLATFORM_HCC__ } else if (math_type == TensorProto_DataType_FLOAT16) { // Convert alpha, beta from float -> __half const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasHgemmStridedBatched( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(B), ldb, B_stride, reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, A_stride, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(C), ldc, C_stride, batch_size)); } else { CAFFE_THROW("Unsupported math type"); } #endif } template <> CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* context, TensorProto::DataType math_type) { const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSgemv( context->cublas_handle(), cu_trans_A, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const at::Half* A, const at::Half* x, const float beta, at::Half* y, CUDAContext* context, TensorProto::DataType math_type) { const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; // sort out what we need to call cublasSgemmEx / cublasHgemm const int m = (cu_trans_A == CUBLAS_OP_N) ? N : M; const int k = (cu_trans_A == CUBLAS_OP_N) ? M : N; const int lda = (cu_trans_A == CUBLAS_OP_N) ? m : k; const int ldc = m; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); #ifdef __HIP_PLATFORM_HCC__ // rocblas doesn't support cublasSgemmEx type API yet. // It has more general rocblas_gemm_ex API which is more close to // cublasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C, // whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C ROCBLAS_ENFORCE(rocblas_gemm_ex( context->rocblashandle(), cu_trans_A, rocblas_operation_none, m, 1, k, &alpha, A, rocblas_datatype_f16_r, lda, x, rocblas_datatype_f16_r, k, &beta, y, rocblas_datatype_f16_r, ldc, y, // D rocblas_datatype_f16_r, // D type ldc, // ldd rocblas_datatype_f32_r, // compute type rocblas_gemm_algo_standard, // rocblas_gemm_algo 0, // solution index, reserved for future use 0)); // flags, reserved for future use #else CUBLAS_ENFORCE(cublasSgemmEx( context->cublas_handle(), cu_trans_A, CUBLAS_OP_N, m, 1, k, &alpha, A, CUDA_R_16F, lda, x, CUDA_R_16F, k, &beta, y, CUDA_R_16F, ldc)); #endif // __HIP_PLATFORM_HCC__ } else if (math_type == TensorProto_DataType_FLOAT16) { const __half alpha_fp16 = at::Half(alpha); const __half beta_fp16 = at::Half(beta); CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasHgemm( context->cublas_handle(), cu_trans_A, CUBLAS_OP_N, m, 1, k, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16), reinterpret_cast<const CUBLAS_HALF_TYPE*>(A), lda, reinterpret_cast<const CUBLAS_HALF_TYPE*>(x), k, reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16), reinterpret_cast<CUBLAS_HALF_TYPE*>(y), ldc)); } else { // fail CAFFE_THROW("Unsupported math type"); } } #ifndef __HIP_PLATFORM_HCC__ // No change, but required. Defer to default CUDA engine template <> CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, TensorProto::DataType math_type) { return Gemm<float, CUDAContext>( trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type); } template <> CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const at::Half* A, const at::Half* B, const float beta, at::Half* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (trans_A == CblasNoTrans) ? K : M; const int ldb = (trans_B == CblasNoTrans) ? N : K; const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // enable TensorCore for this call on this handle if (TensorCoreAvailable()) { CUBLAS_ENFORCE( cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH)); } CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasGemmEx( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &beta, C, CUDA_R_16F, N, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); // Now disable TensorCore math for subsequent calls to this handle if (TensorCoreAvailable()) { CUBLAS_ENFORCE( cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH)); } } template <> CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float** A, const float** B, const float beta, float** C, CUDAContext* context, TensorProto::DataType math_type) { GemmBatched<float, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, B, beta, C, context, math_type); } template <> CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half** A, const at::Half** B, const float beta, at::Half** C, CUDAContext* context, TensorProto::DataType math_type) { GemmBatched<at::Half, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, B, beta, C, context, math_type); } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const float* A, const int A_stride, const float* B, const int B_stride, const float beta, float* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { GemmStridedBatched<float, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, A_stride, B, B_stride, beta, C, C_stride, context, math_type); } template <> CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int batch_size, const int M, const int N, const int K, const float alpha, const at::Half* A, const int A_stride, const at::Half* B, const int B_stride, const float beta, at::Half* C, const int C_stride, CUDAContext* context, TensorProto::DataType math_type) { GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>( trans_A, trans_B, batch_size, M, N, K, alpha, A, A_stride, B, B_stride, beta, C, C_stride, context, math_type); } template <> CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* context, TensorProto::DataType math_type) { Gemv<float, CUDAContext, DefaultEngine>( trans_A, M, N, alpha, A, x, beta, y, context, math_type); } template <> CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE trans_A, const int M, const int N, const float alpha, const at::Half* A, const at::Half* x, const float beta, at::Half* y, CUDAContext* context, TensorProto::DataType math_type) { Gemv<at::Half, CUDAContext, DefaultEngine>( trans_A, M, N, alpha, A, x, beta, y, context, math_type); } #endif template <> CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>( const CBLAS_TRANSPOSE trans_A, const CBLAS_TRANSPOSE trans_B, const int M, const int N, const int K, const float alpha, const float* A, const int lda, const float* B, const int ldb, const float beta, float* C, const int ldc, CUDAContext* context) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const cublasOperation_t cu_trans_A = (trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t cu_trans_B = (trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_ENFORCE( cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); CUBLAS_ENFORCE(cublasSgemm( context->cublas_handle(), cu_trans_B, cu_trans_A, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc)); } // Batched Add variants namespace { template <typename T> __global__ void AddStripedBatchKernel( const int N, const T* first, T* Y, const int stripe, const int batch) { for (int j = 0; j < batch; j++) { const T* x = first + j * stripe; CUDA_1D_KERNEL_LOOP(i, N) { float tmpY = convert::To<T, float>(Y[i]); tmpY += convert::To<T, float>(x[i]); Y[i] = convert::To<float, T>(tmpY); } } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \ template <> \ CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \ const int N, \ const T* first, \ T* Y, \ const int stripe, \ const int batch, \ CUDAContext* context) { \ AddStripedBatchKernel<T> \ <<<CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, first, Y, stripe, batch); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float); CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half); #undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH namespace { template <typename T> __global__ void UniformShift(const size_t N, const float min, const float max, T* x) { float scale = max - min; CUDA_1D_KERNEL_LOOP(i, N) { x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min); } } __global__ void UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) { int* x_int = reinterpret_cast<int*>(x); int range = (max - min + 1); CUDA_1D_KERNEL_LOOP(i, N) { x_int[i] = min + static_cast<int>(x[i] % range); } } } // namespace template <> CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>( const size_t n, const float min, const float max, float* r, CUDAContext* context) { CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n)); UniformShift<float> <<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, min, max, r); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>( const size_t n, const double min, const double max, double* r, CUDAContext* context) { CURAND_ENFORCE( curandGenerateUniformDouble(context->curand_generator(), r, n)); UniformShift<double> <<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, min, max, r); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>( const size_t n, const int min, const int max, int* r, CUDAContext* context) { CURAND_ENFORCE(curandGenerate( context->curand_generator(), reinterpret_cast<unsigned int*>(r), n)); UniformIntFit<<< CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( n, min, max, reinterpret_cast<unsigned int*>(r)); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename T> size_t HandleOddLengthRandGaussian( const size_t n, const T mean, const T std, T* r, CUDAContext* context) { if (n % 2 == 1) { std::default_random_engine generator; std::normal_distribution<T> distribution(mean, std); const T random_value = distribution(generator); Set<T, CUDAContext>(1, random_value, r + (n - 1), context); return n - 1; } return n; } template <> CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>( const size_t n, const float mean, const float std, float* r, CUDAContext* context) { // If n is odd, we add a random Gaussian value at the end manually // and generate n-1 random values using curandGenerateNormal. // curandGenerateNormal requires n to be even. const size_t even_n = HandleOddLengthRandGaussian<float>(n, mean, std, r, context); CURAND_ENFORCE( curandGenerateNormal(context->curand_generator(), r, even_n, mean, std)); } template <> CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>( const size_t n, const double mean, const double std, double* r, CUDAContext* context) { const size_t even_n = HandleOddLengthRandGaussian<double>(n, mean, std, r, context); CURAND_ENFORCE(curandGenerateNormalDouble( context->curand_generator(), r, even_n, mean, std)); } template <> CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>( const int n, const float* a, const float* b, float* y, CUDAContext* context) { CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, y)); } template <> CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>( const int n, const at::Half* a, const at::Half* b, at::Half* y, CUDAContext* context) { #if defined __HIP_PLATFORM_HCC__ && HIP_VERSION < 210 CAFFE_THROW("HIP currently does not support FP16 completely yet."); #elif defined __HIP_PLATFORM_HCC__ && HIP_VERSION >= 210 CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(rocblas_hdot( context->cublas_handle(), n, reinterpret_cast<const rocblas_half*>(a), 1, reinterpret_cast<const rocblas_half*>(b), 1, reinterpret_cast<rocblas_half*>(y))); #else // execute with 32-bit math CUBLAS_ENFORCE(cublasSetPointerMode( context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); CUBLAS_ENFORCE(cublasDotEx( context->cublas_handle(), n, a, CUDA_R_16F, 1, b, CUDA_R_16F, 1, y, CUDA_R_16F, CUDA_R_32F)); #endif } // A previous version of caffe2 used Thrust but it turns out that thrust // reduction has an implicit scratch space allocation and deallocation, which // may interfere with NCCL and create a deadlock. Hence we are using a custom // reduction here. #define SUM_KERNEL_NTHREADS 128 template <typename T> __global__ void SumKernel(const int N, const T* X, T* Y, bool square) { const int idx = threadIdx.x; __shared__ float reduction_buffer[SUM_KERNEL_NTHREADS]; reduction_buffer[idx] = 0; // A multilevel reduction. // N -> 128 if (!square) { for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { reduction_buffer[idx] += convert::To<T, float>(X[i]); } } else { for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { float Xi = convert::To<T, float>(X[i]); reduction_buffer[idx] += Xi * Xi; } } __syncthreads(); // 128 -> 32 if (idx < 32) { reduction_buffer[idx] += reduction_buffer[idx + 32] + reduction_buffer[idx + 64] + reduction_buffer[idx + 96]; } __syncthreads(); // 32 -> 1 if (idx == 0) { float tmp = 0; for (int i = 0; i < 32; ++i) { tmp += reduction_buffer[i]; } *Y = convert::To<float, T>(tmp); } } // According to the benchmarks script // caffe2/caffe2/experiments/python/device_reduce_sum_bench.py, // device reduce is slower for N <= 10000. #define DEVICE_REDUCE_SIZE_THRESHOLD 10000 namespace { template <typename T> __global__ void SumConvertKernel(float* sum, T* dest) { *dest = convert::To<float, T>(*sum); } template <typename T, typename IterT> CAFFE2_CUDA_EXPORT void SumGenericIter( const int N, IterT it, T*& dest, CUDAContext* context, Tensor* scratch_ptr) { size_t memRequired = 0; cub::DeviceReduce::Sum( nullptr, memRequired, it, dest, N, context->cuda_stream()); auto buffer_size = static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); if (!dest) { // allocate one more T at the end of scratch for dest scratch_ptr->Resize(std::vector<int64_t>{buffer_size + 1}); dest = scratch_ptr->template mutable_data<T>() + buffer_size; } else { scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); } cub::DeviceReduce::Sum( static_cast<void*>(scratch_ptr->template mutable_data<T>()), memRequired, it, dest, N, context->cuda_stream()); } } // namespace template <> CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>( const int N, const float* x, float* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SumGenericIter<float>(N, x, y, context, scratch_ptr); } else { SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( N, x, y, false); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } template <> CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>( const int N, const int32_t* x, int32_t* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SumGenericIter<int32_t>(N, x, y, context, scratch_ptr); } else { SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( N, x, y, false); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } namespace { template <typename T> struct FloatTransform { inline __host__ __device__ float operator()(const T v) const { return convert::To<T, float>(v); } }; } // namespace #define CAFFE2_MATH_SUM_FUNC(T) \ template <> \ CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \ const int N, \ const T* x, \ T* y, \ CUDAContext* context, \ Tensor* scratch_ptr) { \ if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \ FloatTransform<T> transform; \ cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \ x, transform); \ float* sum = nullptr; \ SumGenericIter<float>(N, it, sum, context, scratch_ptr); \ SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } else { \ SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \ N, x, y, false); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } \ } CAFFE2_MATH_SUM_FUNC(at::Half) #undef CAFFE2_MATH_SUM_FUNC namespace { template <typename T> struct SqrTransform { inline __host__ __device__ T operator()(const T v) const { return v * v; } }; } // namespace template <> CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>( const int N, const float* x, float* y, CUDAContext* context, Tensor* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SqrTransform<float> transform; cub::TransformInputIterator<float, SqrTransform<float>, const float*> it( x, transform); SumGenericIter<float>(N, it, y, context, scratch_ptr); } else { SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( N, x, y, true); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } #define CAFFE2_MATH_SUMSQR_FUNC(T) \ template <> \ CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \ const int N, \ const T* x, \ T* y, \ CUDAContext* context, \ Tensor* scratch_ptr) { \ if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \ FloatTransform<T> float_transform; \ cub::TransformInputIterator<float, FloatTransform<T>, const T*> \ float_it(x, float_transform); \ SqrTransform<float> sqr_transform; \ cub::TransformInputIterator< \ float, \ SqrTransform<float>, \ decltype(float_it)> \ it(float_it, sqr_transform); \ float* sum = nullptr; \ SumGenericIter<float>(N, it, sum, context, scratch_ptr); \ SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } else { \ SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \ N, x, y, true); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } \ } CAFFE2_MATH_SUMSQR_FUNC(at::Half) #undef CAFFE2_MATH_SUMSQR_FUNC #undef DEVICE_REDUCE_SIZE_THRESHOLD namespace { template <typename T> __global__ void SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = x[i * D + idx[i]]; } } } // namespace template <> CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>( const int N, const int D, const float* x, const int* idx, float* y, CUDAContext* context) { SelectKernel<float> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, x, idx, y); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>( const int N, const int D, const at::Half* x, const int* idx, at::Half* y, CUDAContext* context) { SelectKernel<at::Half> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, x, idx, y); C10_CUDA_KERNEL_LAUNCH_CHECK(); } namespace { template <typename T> __global__ void Im2ColNCHWCUDAKernel( const int n, const int input_h, const int input_w, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* img_data, T* col_data) { CUDA_1D_KERNEL_LOOP(index, n) { const int w_out = index % output_w; const int h_index = index / output_w; const int h_out = h_index % output_h; const int channel_in = h_index / output_h; const int channel_out = channel_in * kernel_h * kernel_w; const int h_in = h_out * stride_h - pad_t; const int w_in = w_out * stride_w - pad_l; const int output_size = output_h * output_w; T* col_data_ptr = col_data + (channel_out * output_h + h_out) * output_w + w_out; const T* img_data_ptr = img_data + (channel_in * input_h + h_in) * input_w + w_in; int dh = 0; for (int i = 0; i < kernel_h; ++i) { int dw = 0; for (int j = 0; j < kernel_w; ++j) { const int h = h_in + dh; const int w = w_in + dw; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? __ldg(img_data_ptr + dh * input_w + dw) : 0; #else *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? img_data_ptr[dh * input_w + dw] : 0; #endif col_data_ptr += output_size; dw += dilation_w; } dh += dilation_h; } } } template <typename T> __global__ void Im2ColNHWCCUDAKernel( const int n, const int input_h, const int input_w, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_w, const int channels, const T* img_data, T* col_data) { CUDA_1D_KERNEL_LOOP(index, n) { const int channel_in = index % channels; const int w_out = index / channels % output_w; const int h_out = index / channels / output_w; const int h_in = h_out * stride_h - pad_t; const int w_in = w_out * stride_w - pad_l; T* col_data_ptr = col_data + (h_out * output_w + w_out) * channels * kernel_h * kernel_w + channel_in; int dh = 0; for (int i = 0; i < kernel_h; ++i) { int dw = 0; for (int j = 0; j < kernel_w; ++j) { const int h = h_in + dh; const int w = w_in + dw; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? __ldg(img_data + (h * input_w + w) * channels + channel_in) : 0; #else *col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) && utils::IsAGeZeroAndALtB(w, input_w) ? img_data[(h * input_w + w) * channels + channel_in] : 0; #endif col_data_ptr += channels; dw += dilation_w; } dh += dilation_h; } } } template <typename T> __global__ void Col2ImNCHWCUDAKernel( const int n, const int input_h, const int input_w, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* col_data, T* img_data) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; const int w = index % input_w + pad_l; const int h = index / input_w % input_h + pad_t; const int c = index / (input_h * input_w); // compute the start and end of the output const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; const int w_col_end = min(w / stride_w + 1, output_w); const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; const int h_col_end = min(h / stride_h + 1, output_h); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = (h - h_col * stride_h); int w_k = (w - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; const int col_data_index = (((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) * output_w + w_col; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) val += __ldg(col_data + col_data_index); #else val += col_data[col_data_index]; #endif } } } img_data[index] = val; } } template <typename T> __global__ void Col2ImNHWCCUDAKernel( const int n, const int input_w, const int channels, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int output_h, const int output_w, const T* col_data, T* img_data) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; const int c = index % channels; const int w = index / channels % input_w + pad_l; const int h = index / channels / input_w + pad_t; // compute the start and end of the output const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; const int w_col_end = min(w / stride_w + 1, output_w); const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; const int h_col_end = min(h / stride_h + 1, output_h); const int channels_col = patch_h * patch_w * channels; for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = h - h_col * stride_h; int w_k = w - w_col * stride_w; if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; const int c_col = (h_k * patch_w + w_k) * channels + c; #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) val += __ldg( col_data + (h_col * output_w + w_col) * channels_col + c_col); #else val += col_data[(h_col * output_w + w_col) * channels_col + c_col]; #endif } } } img_data[index] = val; } } template <typename T, int N, bool kCol2Im> __global__ void Im2ColNdNCHWCUDAKernel( const int outer_size, const int inner_size, const int kernel_size, SimpleArray<int, N + 1> img_shape, SimpleArray<int, N + 1> col_shape, SimpleArray<int, N> kernel_shape, SimpleArray<int, N> stride, SimpleArray<int, N> dilation, SimpleArray<int, N> pad, const T* X_data, T* Y_data) { int d_offset[N]; int d_iter[N]; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { int offset_i = i; #pragma unroll for (int d_i = N - 1; d_i >= 0; --d_i) { d_offset[d_i] = offset_i % kernel_shape.data[d_i]; offset_i /= kernel_shape.data[d_i]; } for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int offset_j = j; #pragma unroll for (int d_i = N - 1; d_i >= 0; --d_i) { d_iter[d_i] = offset_j % col_shape.data[d_i + 1]; offset_j /= col_shape.data[d_i + 1]; } const int col_index = i * inner_size + j; int img_index = i / kernel_size; bool is_padding = false; #pragma unroll for (int d_i = 0; d_i < N; ++d_i) { const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] + d_offset[d_i] * dilation.data[d_i]; is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]); img_index = img_index * img_shape.data[d_i + 1] + d_img; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) if (!kCol2Im) { Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index); } else if (!is_padding) { gpu_atomic_add(Y_data + img_index, __ldg(X_data + col_index)); } #else if (!kCol2Im) { Y_data[col_index] = is_padding ? 0 : X_data[img_index]; } else if (!is_padding) { gpu_atomic_add(Y_data + img_index, X_data[col_index]); } #endif } } } template <typename T, int N> CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl( const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context) { const int outer_size = col_shape[0]; const int inner_size = col_size / outer_size; const int kernel_size = std::accumulate( kernel_shape, kernel_shape + N, 1, std::multiplies<int>()); SimpleArray<int, N + 1> img_shape_array; SimpleArray<int, N + 1> col_shape_array; SimpleArray<int, N> kernel_shape_array; SimpleArray<int, N> stride_array; SimpleArray<int, N> dilation_array; SimpleArray<int, N> pad_array; std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int)); std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int)); std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int)); std::memcpy(stride_array.data, stride, N * sizeof(int)); std::memcpy(dilation_array.data, dilation, N * sizeof(int)); std::memcpy(pad_array.data, pad, N * sizeof(int)); Im2ColNdNCHWCUDAKernel<T, N, false> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( outer_size, inner_size, kernel_size, img_shape_array, col_shape_array, kernel_shape_array, stride_array, dilation_array, pad_array, img_data, col_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename T, int N> CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl( const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context) { const int outer_size = col_shape[0]; const int inner_size = col_size / outer_size; const int kernel_size = std::accumulate( kernel_shape, kernel_shape + N, 1, std::multiplies<int>()); SimpleArray<int, N + 1> img_shape_array; SimpleArray<int, N + 1> col_shape_array; SimpleArray<int, N> kernel_shape_array; SimpleArray<int, N> stride_array; SimpleArray<int, N> dilation_array; SimpleArray<int, N> pad_array; std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int)); std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int)); std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int)); std::memcpy(stride_array.data, stride, N * sizeof(int)); std::memcpy(dilation_array.data, dilation, N * sizeof(int)); std::memcpy(pad_array.data, pad, N * sizeof(int)); Set<T, CUDAContext>(img_size, 0, img_data, context); Im2ColNdNCHWCUDAKernel<T, N, true> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( outer_size, inner_size, kernel_size, img_shape_array, col_shape_array, kernel_shape_array, stride_array, dilation_array, pad_array, col_data, img_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } // namespace template <> CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* img_data, float* col_data, CUDAContext* context, const int /* groups */) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = channels * output_h * output_w; Im2ColNCHWCUDAKernel<float> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, img_data, col_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* img_data, float* col_data, CUDAContext* context, const int groups) { CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col"); const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = output_h * output_w * channels; Im2ColNHWCCUDAKernel<float> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_w, channels, img_data, col_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* col_data, float* img_data, CUDAContext* context, const int /* groups */) { // In NCHW, the number of groups doesn't affect Col2Im. const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = channels * height * width; Col2ImNCHWCUDAKernel<float> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, col_data, img_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const float* col_data, float* img_data, CUDAContext* context, const int groups) { CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im"); const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; const int num_kernels = height * width * channels; Col2ImNHWCCUDAKernel<float> <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, width, channels, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, output_h, output_w, col_data, img_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <> CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context, const int /* groups */) { // In NCHW, the number of groups doesn't affect Im2Col. DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( N, Im2ColNdNCHWCUDAImpl, float, img_size, col_size, img_shape, col_shape, kernel_shape, stride, dilation, pad, img_data, col_data, context); } template <> CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NHWC>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* img_data, float* col_data, CUDAContext* context, const int groups) { CAFFE_NOT_IMPLEMENTED; } template <> CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context, int /* groups */) { // In NCHW, the number of groups doesn't affect Col2Im. DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( N, Col2ImNdNCHWCUDAImpl, float, img_size, col_size, img_shape, col_shape, kernel_shape, stride, dilation, pad, col_data, img_data, context); } template <> CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NHWC>( const int N, const int img_size, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const float* col_data, float* img_data, CUDAContext* context, int groups) { CAFFE_NOT_IMPLEMENTED; } template <> CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>( const size_t itemsize, const int M, const int N, const void* A, const int lda, void* B, const int ldb, CUDAContext* context, TypeMeta::Copy copy) { CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context"); cudaMemcpy2DAsync( B, ldb * itemsize, A, lda * itemsize, N * itemsize, M, cudaMemcpyDeviceToDevice, context->cuda_stream()); } #define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \ template <> \ void CopyMatrix<T, CUDAContext>( \ const int M, \ const int N, \ const T* A, \ const int lda, \ T* B, \ const int ldb, \ CUDAContext* context) { \ if (M == 0 || N == 0) { \ return; \ } \ cudaMemcpy2DAsync( \ B, \ sizeof(T) * ldb, \ A, \ sizeof(T) * lda, \ sizeof(T) * N, \ M, \ cudaMemcpyDeviceToDevice, \ context->cuda_stream()); \ } CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int) CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int64_t) #undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX template <> CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>( const int N, const float* src, float* dst, CUDAContext* context) { if (src != dst && N > 0) { cudaMemcpyAsync( dst, src, sizeof(float) * N, cudaMemcpyDeviceToDevice, context->cuda_stream()); } } template <> CAFFE2_CUDA_EXPORT void CopyVector<int, CUDAContext>( const int N, const int* src, int* dst, CUDAContext* context) { if (src != dst && N > 0) { cudaMemcpyAsync( dst, src, sizeof(int) * N, cudaMemcpyDeviceToDevice, context->cuda_stream()); } } namespace { template <typename T> using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T, class Reducer> __global__ void RowwiseReduceKernel( const int rows, const int cols, const Reducer reducer, const T init, const T alpha, const T* X, T* Y) { __shared__ typename BlockReduce<T>::TempStorage temp_storage; for (int i = blockIdx.x; i < rows; i += gridDim.x) { T val = init; for (int j = threadIdx.x; j < cols; j += blockDim.x) { val = reducer(X[i * cols + j], val); } val = BlockReduce<T>(temp_storage).Reduce(val, reducer); if (threadIdx.x == 0) { Y[i] = val * alpha; } __syncthreads(); } } template <typename T, class Reducer> __global__ void ColwiseReduceKernel( const int rows, const int cols, const Reducer reducer, const T init, const T alpha, const T* X, T* Y) { __shared__ typename BlockReduce<T>::TempStorage temp_storage; for (int i = blockIdx.x; i < cols; i += gridDim.x) { T val = init; for (int j = threadIdx.x; j < rows; j += blockDim.x) { val = reducer(X[j * cols + i], val); } val = BlockReduce<T>(temp_storage).Reduce(val, reducer); if (threadIdx.x == 0) { Y[i] = val * alpha; } __syncthreads(); } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \ template <> \ CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \ const int N, const int D, const T* x, T* y, CUDAContext* context) { \ RowwiseReduceKernel<<< \ std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>( \ N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float) #undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX #define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \ template <> \ CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \ const int N, const int D, const T* x, T* y, CUDAContext* context) { \ ColwiseReduceKernel<<< \ std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>( \ N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float) #undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX namespace { __global__ void maximum_kernel(const int N, const float alpha, const float* x, float* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = fmaxf(x[i], alpha); } } } // namespace template <> CAFFE2_CUDA_EXPORT void Maximum( const int N, const float alpha, const float* x, float* y, CUDAContext* context) { maximum_kernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, alpha, x, y); C10_CUDA_KERNEL_LAUNCH_CHECK(); } namespace { template <typename T, int D> __global__ void BroadcastCUDAKernel( const int Y_size, const SimpleArray<int, D> X_strides, const SimpleArray<FIXED_DIVISOR, D> Y_dims, const T alpha, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(Y_index, Y_size) { int X_index = 0; int Y_index_val = Y_index; #pragma unroll for (int i = D - 1; i >= 0; --i) { int d; FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d); X_index += d * X_strides.data[i]; } #if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__) Y[Y_index] = __ldg(X + X_index) * alpha; #else Y[Y_index] = X[X_index] * alpha; #endif } } template <typename T, int D> CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl( const int X_ndim, const int* X_dims, const int* Y_dims, const T alpha, const T* X, T* Y, CUDAContext* context) { SimpleArray<int, D> X_strides_array; SimpleArray<FIXED_DIVISOR, D> Y_dims_array; const int d = D - X_ndim; std::fill(X_strides_array.data, X_strides_array.data + d, 0); int cur_stride = 1; for (int i = D - 1; i >= d; --i) { CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]); X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride; cur_stride *= X_dims[i - d]; } for (int i = 0; i < D; ++i) { if (Y_dims[i] == 0) { return; } Y_dims_array.data[i] = FIXED_DIVISOR(Y_dims[i]); } const int Y_size = std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>()); BroadcastCUDAKernel<T, D> <<<CAFFE_GET_BLOCKS(Y_size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( Y_size, X_strides_array, Y_dims_array, alpha, X, Y); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } // namespace #define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \ template <> \ CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \ const int X_ndim, \ const int* X_dims, \ const int Y_ndim, \ const int* Y_dims, \ const T alpha, \ const T* X, \ T* Y, \ CUDAContext* context) { \ CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \ DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \ Y_ndim, \ BroadcastCUDAImpl, \ T, \ X_ndim, \ X_dims, \ Y_dims, \ alpha, \ X, \ Y, \ context); \ } CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t) CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t) CAFFE2_SPECIALIZED_CUDA_BROADCAST(float) CAFFE2_SPECIALIZED_CUDA_BROADCAST(double) #undef CAFFE2_SPECIALIZED_CUDA_BROADCAST namespace { template <typename T> __global__ void InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std); #define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \ template <> \ __global__ void InvStdCUDAKernel<T>( \ const int N, const T epsilon, const T* var, T* inv_std) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ inv_std[i] = Func(var[i] + epsilon); \ } \ } DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf) #undef DELEGATE_INV_STD_KERNEL_FUNCTION } // namespace #define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \ template <> \ CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \ const int N, \ const T epsilon, \ const T* var, \ T* inv_std, \ CUDAContext* context) { \ InvStdCUDAKernel<T> \ <<<CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, epsilon, var, inv_std); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } CAFFE2_SPECIALIZED_CUDA_INV_STD(float) #undef CAFFE2_SPECIALIZED_CUDA_INV_STD } // namespace math } // namespace caffe2
fd616bad39d5197a84de4f6776e633f01e3f29c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include <stdio.h> /* printf, scanf, puts, NULL */ #include <stdlib.h> /* srand, rand */ #include <time.h> #include <hip/device_functions.h> #define SIZE 32 #define TSIZE 1024 using namespace std; __global__ void addKernel(int *A, int *B) { __shared__ int auxMatrix[SIZE][SIZE]; int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int pos = col + row * SIZE; //if 0,0 if (row == 0 && col == 0) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos + SIZE]; //if 0,31 else if (row == 0 && col == 31) auxMatrix[row][col] = A[pos] + A[pos - 1] + A[pos + SIZE]; //if 31,0 else if (row == 31 && col == 0) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos - SIZE]; //if 31,31 else if (row == 31 && col == 31) auxMatrix[row][col] = A[pos] + A[pos - 1] + A[pos - SIZE]; //if row 0 only else if (row == 0) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos - 1] + A[pos + SIZE]; //if row 31 only else if (row == 31) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos - 1] + A[pos - SIZE]; //if col 0 only else if (col == 0) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos - SIZE] + A[pos + SIZE]; //if col 1 only else if (col == 31) auxMatrix[row][col] = A[pos] + A[pos - 1] + A[pos - SIZE] + A[pos + SIZE]; //if central position in the matrix else if (row != 0 && col != 0 && row != 31 && col != 31) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos - 1] + A[pos + SIZE] + A[pos - SIZE]; __syncthreads(); //printf("\nA[%d]=%d", pos, A[pos]); //printf("\nA[%d]=%d\nauxMatrix[%d][%d]=%d", pos, A[pos], row, col, auxMatrix[row][col]); //printf("\nCol: %d , threadIdx.x: %d, blockIdx.x: %d, blockDim.x: %d", col, threadIdx.x, blockIdx.x, blockDim.x); //printf("\nRow: %d , threadIdx.y: %d, blockIdx.y: %d, blockDim.y: %d", row, threadIdx.y, blockIdx.y, blockDim.y); B[pos] = auxMatrix[row][col]; //printf("\nB[%d]=%d", pos, B[pos]); } int main() { int A[SIZE][SIZE], B[SIZE][SIZE]; srand(time(NULL)); for (int i = 0; i < SIZE; i++) { for (int k = 0; k < SIZE; k++) { A[i][k] = rand() % 99; } } // Add vectors in parallel. int *dev_A, *dev_B; size_t sharedMem = 64; dim3 dimGrid(2, 2); dim3 dimBlock(16, 16); hipMalloc((void**)&dev_A, SIZE * SIZE *sizeof(int)); hipMalloc((void**)&dev_B, SIZE * SIZE *sizeof(int)); hipMemcpy(dev_A, A, SIZE *SIZE *sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( addKernel) , dim3(dimGrid), dim3(dimBlock), sharedMem, 0, dev_A, dev_B); hipDeviceSynchronize(); hipMemcpy(B, dev_B, 32 * 32 * sizeof(int), hipMemcpyDeviceToHost); printf("\n\n--A--"); for (int i = 0; i < SIZE; i++) { printf("\n["); for (int j = 0; j < SIZE; j++) { if (j != 31) if (A[i][j] / 10 == 0) printf("00%d-", A[i][j]); else if (A[i][j] / 100 == 0) printf("0%d-", A[i][j]); else printf("%d-", A[i][j]); else if (A[i][j] / 10 == 0) printf("00%d", A[i][j]); else if (A[i][j] / 100 == 0) printf("0%d", A[i][j]); else printf("%d", A[i][j]); } printf("]"); } printf("\n\n--B--"); for (int i = 0; i < SIZE; i++) { printf("\n["); for (int j = 0; j < SIZE; j++) { if(j!=31) if (B[i][j] / 10 == 0) printf("00%d-", B[i][j]); else if(B[i][j]/100 == 0) printf("0%d-", B[i][j]); else printf("%d-", B[i][j]); else if (B[i][j] / 10 == 0) printf("00%d", B[i][j]); else if (B[i][j] / 100 == 0) printf("0%d", B[i][j]); else printf("%d", B[i][j]); } printf("]"); } // hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_A); hipFree(dev_B); return 0; }
fd616bad39d5197a84de4f6776e633f01e3f29c4.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include <stdio.h> /* printf, scanf, puts, NULL */ #include <stdlib.h> /* srand, rand */ #include <time.h> #include <device_functions.h> #define SIZE 32 #define TSIZE 1024 using namespace std; __global__ void addKernel(int *A, int *B) { __shared__ int auxMatrix[SIZE][SIZE]; int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int pos = col + row * SIZE; //if 0,0 if (row == 0 && col == 0) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos + SIZE]; //if 0,31 else if (row == 0 && col == 31) auxMatrix[row][col] = A[pos] + A[pos - 1] + A[pos + SIZE]; //if 31,0 else if (row == 31 && col == 0) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos - SIZE]; //if 31,31 else if (row == 31 && col == 31) auxMatrix[row][col] = A[pos] + A[pos - 1] + A[pos - SIZE]; //if row 0 only else if (row == 0) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos - 1] + A[pos + SIZE]; //if row 31 only else if (row == 31) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos - 1] + A[pos - SIZE]; //if col 0 only else if (col == 0) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos - SIZE] + A[pos + SIZE]; //if col 1 only else if (col == 31) auxMatrix[row][col] = A[pos] + A[pos - 1] + A[pos - SIZE] + A[pos + SIZE]; //if central position in the matrix else if (row != 0 && col != 0 && row != 31 && col != 31) auxMatrix[row][col] = A[pos] + A[pos + 1] + A[pos - 1] + A[pos + SIZE] + A[pos - SIZE]; __syncthreads(); //printf("\nA[%d]=%d", pos, A[pos]); //printf("\nA[%d]=%d\nauxMatrix[%d][%d]=%d", pos, A[pos], row, col, auxMatrix[row][col]); //printf("\nCol: %d , threadIdx.x: %d, blockIdx.x: %d, blockDim.x: %d", col, threadIdx.x, blockIdx.x, blockDim.x); //printf("\nRow: %d , threadIdx.y: %d, blockIdx.y: %d, blockDim.y: %d", row, threadIdx.y, blockIdx.y, blockDim.y); B[pos] = auxMatrix[row][col]; //printf("\nB[%d]=%d", pos, B[pos]); } int main() { int A[SIZE][SIZE], B[SIZE][SIZE]; srand(time(NULL)); for (int i = 0; i < SIZE; i++) { for (int k = 0; k < SIZE; k++) { A[i][k] = rand() % 99; } } // Add vectors in parallel. int *dev_A, *dev_B; size_t sharedMem = 64; dim3 dimGrid(2, 2); dim3 dimBlock(16, 16); cudaMalloc((void**)&dev_A, SIZE * SIZE *sizeof(int)); cudaMalloc((void**)&dev_B, SIZE * SIZE *sizeof(int)); cudaMemcpy(dev_A, A, SIZE *SIZE *sizeof(int), cudaMemcpyHostToDevice); addKernel <<<dimGrid, dimBlock, sharedMem>>> (dev_A, dev_B); cudaDeviceSynchronize(); cudaMemcpy(B, dev_B, 32 * 32 * sizeof(int), cudaMemcpyDeviceToHost); printf("\n\n--A--"); for (int i = 0; i < SIZE; i++) { printf("\n["); for (int j = 0; j < SIZE; j++) { if (j != 31) if (A[i][j] / 10 == 0) printf("00%d-", A[i][j]); else if (A[i][j] / 100 == 0) printf("0%d-", A[i][j]); else printf("%d-", A[i][j]); else if (A[i][j] / 10 == 0) printf("00%d", A[i][j]); else if (A[i][j] / 100 == 0) printf("0%d", A[i][j]); else printf("%d", A[i][j]); } printf("]"); } printf("\n\n--B--"); for (int i = 0; i < SIZE; i++) { printf("\n["); for (int j = 0; j < SIZE; j++) { if(j!=31) if (B[i][j] / 10 == 0) printf("00%d-", B[i][j]); else if(B[i][j]/100 == 0) printf("0%d-", B[i][j]); else printf("%d-", B[i][j]); else if (B[i][j] / 10 == 0) printf("00%d", B[i][j]); else if (B[i][j] / 100 == 0) printf("0%d", B[i][j]); else printf("%d", B[i][j]); } printf("]"); } // cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_A); cudaFree(dev_B); return 0; }
66f6141762face7959560f854549d3e8e23e760c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define USE_LDG #include "diffusion/diffusion_cuda_shared.h" #include "common/cuda_util.h" namespace diffusion { namespace cuda_shared3_prefetch { #define GET(x) (x) /* Hoists boundary conditions out of the z-direction loop. Three top-level conditional blocks, one corresponding to the horizontal row at y == 0, another to the horizontal row at y == dimy-1, and the other to the rest. The first section takes care of loading halos at the x-direction. The y-direction halos are not cached for simplicity, and it is expected not to have much performance difference. */ __global__ void kernel3d(F1_DECL f1, F2_DECL f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) { // shared memory shape is (dimx+2) * dimy. Halo for y dir is not // cached. extern __shared__ REAL sb[]; const int sbx = blockDim.x+2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int xy = nx * ny; const int block_z = nz / gridDim.z; int k = block_z * blockIdx.z; const int k_end = k + block_z; int p = OFFSET3D(i, j, k, nx, ny); int ps = threadIdx.x+1 + threadIdx.y * sbx; float t1, t2, t3, t4; int s = (j == 0) ? 0 : -nx; int n = (j == ny-1) ? 0 : nx; t3 = GET(f1[p]); t4 = GET(f1[p+xy]); t2 = (k == 0) ? t3 : GET(f1[p-xy]); // Move out the boundary conditions from the loop body if (threadIdx.y == 0) { // the threads at row y == 0 also take care of loading vertical // halows at x == 0 and x == blockDIm.x - 1 int w = (blockIdx.x == 0) ? 0 : -1; int e = (blockIdx.x == gridDim.x-1) ? 0 : 1; int h = (threadIdx.x < blockDim.y) ? w : (blockDim.x - 1 + e); h = - threadIdx.x + h + (threadIdx.x & (blockDim.y-1)) * nx; int sbt = (threadIdx.x & (blockDim.y-1)) * sbx; // the latter half takes care of the east boundary if (threadIdx.x >= blockDim.y) sbt += sbx-1; for (; k < k_end-2; ++k) { SHIFT4(t1, t2, t3, t4); t4 = GET(f1[p+xy*2]); sb[ps] = t2; if (threadIdx.x < blockDim.y*2) { sb[sbt] = LDG(f1 + p+h); } __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs*GET(f1[p+s]) + cn*sb[ps+sbx] + cb*t1 + ct*t3; p += xy; __syncthreads(); } SHIFT4(t1, t2, t3, t4); t4 = (k < nz-2) ? GET(f1[p+xy*2]) : t4; sb[ps] = t2; if (threadIdx.x < blockDim.y*2) { sb[sbt] = LDG(f1 + p+h); } __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb * t1 + ct * t3; p += xy; __syncthreads(); SHIFT4(t1, t2, t3, t4); sb[ps] = t2; if (threadIdx.x < blockDim.y*2) { sb[sbt] = LDG(f1 + p+h); } __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb * t1 + ct * t3; } else if (threadIdx.y == blockDim.y - 1) { for (; k < k_end-2; ++k) { SHIFT4(t1, t2, t3, t4); t4 = GET(f1[p+xy*2]); sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb * t1 + ct * t3; p += xy; __syncthreads(); } SHIFT4(t1, t2, t3, t4); t4 = (k < nz-2) ? GET(f1[p+xy*2]) : t4; sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * GET(f1[p+n]) + cb * t1 + ct * t3; p += xy; __syncthreads(); SHIFT4(t1, t2, t3, t4); sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * GET(f1[p+n]) + cb * t1 + ct * t3; } else { for (; k < k_end-2; ++k) { SHIFT4(t1, t2, t3, t4); t4 = GET(f1[p+xy*2]); sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; p += xy; __syncthreads(); } SHIFT4(t1, t2, t3, t4); t4 = (k < nz-2) ? GET(f1[p+xy*2]) : t4; sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; p += xy; __syncthreads(); SHIFT4(t1, t2, t3, t4); sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; } return; } } // namespace cuda_shared3_prefetch void DiffusionCUDAShared3Prefetch::RunKernel(int count) { size_t s = sizeof(REAL) * nx_ * ny_ * nz_; FORCE_CHECK_CUDA(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice)); dim3 block_dim(block_x_, block_y_); dim3 grid_dim(nx_ / block_x_, ny_ / block_y_); if (ndim_ == 3) grid_dim.z = grid_z_; CHECK_CUDA(hipEventRecord(ev1_)); for (int i = 0; i < count; ++i) { hipLaunchKernelGGL(( cuda_shared3_prefetch::kernel3d), dim3(grid_dim), dim3(block_dim), (block_x_+2)*(block_y_)*sizeof(float), 0, f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_); REAL *t = f1_d_; f1_d_ = f2_d_; f2_d_ = t; } CHECK_CUDA(hipEventRecord(ev2_)); FORCE_CHECK_CUDA(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost)); return; } void DiffusionCUDAShared3Prefetch::Setup() { DiffusionCUDA::Setup(); FORCE_CHECK_CUDA(hipFuncSetCacheConfig(cuda_shared3_prefetch::kernel3d, hipFuncCachePreferShared)); } } // namespace diffusion
66f6141762face7959560f854549d3e8e23e760c.cu
#define USE_LDG #include "diffusion/diffusion_cuda_shared.h" #include "common/cuda_util.h" namespace diffusion { namespace cuda_shared3_prefetch { #define GET(x) (x) /* Hoists boundary conditions out of the z-direction loop. Three top-level conditional blocks, one corresponding to the horizontal row at y == 0, another to the horizontal row at y == dimy-1, and the other to the rest. The first section takes care of loading halos at the x-direction. The y-direction halos are not cached for simplicity, and it is expected not to have much performance difference. */ __global__ void kernel3d(F1_DECL f1, F2_DECL f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) { // shared memory shape is (dimx+2) * dimy. Halo for y dir is not // cached. extern __shared__ REAL sb[]; const int sbx = blockDim.x+2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int xy = nx * ny; const int block_z = nz / gridDim.z; int k = block_z * blockIdx.z; const int k_end = k + block_z; int p = OFFSET3D(i, j, k, nx, ny); int ps = threadIdx.x+1 + threadIdx.y * sbx; float t1, t2, t3, t4; int s = (j == 0) ? 0 : -nx; int n = (j == ny-1) ? 0 : nx; t3 = GET(f1[p]); t4 = GET(f1[p+xy]); t2 = (k == 0) ? t3 : GET(f1[p-xy]); // Move out the boundary conditions from the loop body if (threadIdx.y == 0) { // the threads at row y == 0 also take care of loading vertical // halows at x == 0 and x == blockDIm.x - 1 int w = (blockIdx.x == 0) ? 0 : -1; int e = (blockIdx.x == gridDim.x-1) ? 0 : 1; int h = (threadIdx.x < blockDim.y) ? w : (blockDim.x - 1 + e); h = - threadIdx.x + h + (threadIdx.x & (blockDim.y-1)) * nx; int sbt = (threadIdx.x & (blockDim.y-1)) * sbx; // the latter half takes care of the east boundary if (threadIdx.x >= blockDim.y) sbt += sbx-1; for (; k < k_end-2; ++k) { SHIFT4(t1, t2, t3, t4); t4 = GET(f1[p+xy*2]); sb[ps] = t2; if (threadIdx.x < blockDim.y*2) { sb[sbt] = LDG(f1 + p+h); } __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs*GET(f1[p+s]) + cn*sb[ps+sbx] + cb*t1 + ct*t3; p += xy; __syncthreads(); } SHIFT4(t1, t2, t3, t4); t4 = (k < nz-2) ? GET(f1[p+xy*2]) : t4; sb[ps] = t2; if (threadIdx.x < blockDim.y*2) { sb[sbt] = LDG(f1 + p+h); } __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb * t1 + ct * t3; p += xy; __syncthreads(); SHIFT4(t1, t2, t3, t4); sb[ps] = t2; if (threadIdx.x < blockDim.y*2) { sb[sbt] = LDG(f1 + p+h); } __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb * t1 + ct * t3; } else if (threadIdx.y == blockDim.y - 1) { for (; k < k_end-2; ++k) { SHIFT4(t1, t2, t3, t4); t4 = GET(f1[p+xy*2]); sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb * t1 + ct * t3; p += xy; __syncthreads(); } SHIFT4(t1, t2, t3, t4); t4 = (k < nz-2) ? GET(f1[p+xy*2]) : t4; sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * GET(f1[p+n]) + cb * t1 + ct * t3; p += xy; __syncthreads(); SHIFT4(t1, t2, t3, t4); sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * GET(f1[p+n]) + cb * t1 + ct * t3; } else { for (; k < k_end-2; ++k) { SHIFT4(t1, t2, t3, t4); t4 = GET(f1[p+xy*2]); sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; p += xy; __syncthreads(); } SHIFT4(t1, t2, t3, t4); t4 = (k < nz-2) ? GET(f1[p+xy*2]) : t4; sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; p += xy; __syncthreads(); SHIFT4(t1, t2, t3, t4); sb[ps] = t2; __syncthreads(); f2[p] = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; } return; } } // namespace cuda_shared3_prefetch void DiffusionCUDAShared3Prefetch::RunKernel(int count) { size_t s = sizeof(REAL) * nx_ * ny_ * nz_; FORCE_CHECK_CUDA(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice)); dim3 block_dim(block_x_, block_y_); dim3 grid_dim(nx_ / block_x_, ny_ / block_y_); if (ndim_ == 3) grid_dim.z = grid_z_; CHECK_CUDA(cudaEventRecord(ev1_)); for (int i = 0; i < count; ++i) { cuda_shared3_prefetch::kernel3d<<<grid_dim, block_dim, (block_x_+2)*(block_y_)*sizeof(float)>>> (f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_); REAL *t = f1_d_; f1_d_ = f2_d_; f2_d_ = t; } CHECK_CUDA(cudaEventRecord(ev2_)); FORCE_CHECK_CUDA(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost)); return; } void DiffusionCUDAShared3Prefetch::Setup() { DiffusionCUDA::Setup(); FORCE_CHECK_CUDA(cudaFuncSetCacheConfig(cuda_shared3_prefetch::kernel3d, cudaFuncCachePreferShared)); } } // namespace diffusion
5cafcb756d709d60aab83f5472af1c685d32a622.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> void checkResult(float *A, float *B, const int nx, const int ny) { int i = 0; int j = 0; int cnt = 0; double err = 1.0E-6; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { if (fabs(A[cnt] - B[cnt]) > err) { printf("Do not match...\n"); return; } cnt++; } } printf("matched!\n"); } void initialData(float *a, int nx, int ny) { int i = nx; int j = ny; int cnt = 0; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { a[cnt] = cnt; cnt++; } } } // summary matrix on CPU void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) { int i = 0; int j = 0; int cnt = 0; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { C[cnt] = A[cnt] + B[cnt]; cnt++; } } } void PrintMatrix(float *a) { int i; for (i = 0; i < 10; i++) { printf("%f ", a[i]); } printf("\n"); } // summary matrix on GPU __global__ void sumMatrixOnGPU(float *A, float *B, float *C, int nx, int ny) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int idx = y*nx + x; if (x < nx && y < ny) { C[idx] = A[idx] + B[idx]; } } __global__ void test() { printf("hello\n"); } int main(int argc, char *argv[]) { int dev = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); hipSetDevice(dev); int nx = 1 << 10; // nxny1<<14nx*ny = 1<<28 int ny = 1 << 10; int nxy = nx * ny; int nBytes = sizeof(float)*nxy; printf("Matrix size: nx:%d, ny:%d\n", nx, ny); float *h_A, *h_B, *h_C, *gpuRef; float *d_A, *d_B, *d_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); memset(gpuRef, 0, nBytes); hipMalloc((void **)&d_A, nBytes); hipMalloc((void **)&d_B, nBytes); hipMalloc((void **)&d_C, nBytes); // initialize the data initialData(h_A, nx, ny); initialData(h_B, nx, ny); // copy the data from CPU to GPU hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); // call the summary function sumMatrixOnHost(h_A, h_B, h_C, nx, ny); dim3 block(32, 32); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); sumMatrixOnGPU << <grid, block >> >(d_A, d_B, d_C, nx, ny); hipDeviceSynchronize(); // copy the data from GPU to CPU hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); // check the result checkResult(h_C, gpuRef, nx, ny); PrintMatrix(h_C); PrintMatrix(gpuRef); // free the memory free(h_A); free(h_B); free(h_C); free(gpuRef); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); return 0; }
5cafcb756d709d60aab83f5472af1c685d32a622.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> void checkResult(float *A, float *B, const int nx, const int ny) { int i = 0; int j = 0; int cnt = 0; double err = 1.0E-6; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { if (fabs(A[cnt] - B[cnt]) > err) { printf("Do not match...\n"); return; } cnt++; } } printf("matched!\n"); } void initialData(float *a, int nx, int ny) { int i = nx; int j = ny; int cnt = 0; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { a[cnt] = cnt; cnt++; } } } // summary matrix on CPU void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) { int i = 0; int j = 0; int cnt = 0; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { C[cnt] = A[cnt] + B[cnt]; cnt++; } } } void PrintMatrix(float *a) { int i; for (i = 0; i < 10; i++) { printf("%f ", a[i]); } printf("\n"); } // summary matrix on GPU __global__ void sumMatrixOnGPU(float *A, float *B, float *C, int nx, int ny) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int idx = y*nx + x; if (x < nx && y < ny) { C[idx] = A[idx] + B[idx]; } } __global__ void test() { printf("hello\n"); } int main(int argc, char *argv[]) { int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); cudaSetDevice(dev); int nx = 1 << 10; // 此处对显卡的限制比较明显,书中可以让nx和ny分别为1<<14,所以nx*ny = 1<<28,但是我的显卡不行。 int ny = 1 << 10; int nxy = nx * ny; int nBytes = sizeof(float)*nxy; printf("Matrix size: nx:%d, ny:%d\n", nx, ny); float *h_A, *h_B, *h_C, *gpuRef; float *d_A, *d_B, *d_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); memset(gpuRef, 0, nBytes); cudaMalloc((void **)&d_A, nBytes); cudaMalloc((void **)&d_B, nBytes); cudaMalloc((void **)&d_C, nBytes); // initialize the data initialData(h_A, nx, ny); initialData(h_B, nx, ny); // copy the data from CPU to GPU cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); // call the summary function sumMatrixOnHost(h_A, h_B, h_C, nx, ny); dim3 block(32, 32); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); sumMatrixOnGPU << <grid, block >> >(d_A, d_B, d_C, nx, ny); cudaDeviceSynchronize(); // copy the data from GPU to CPU cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); // check the result checkResult(h_C, gpuRef, nx, ny); PrintMatrix(h_C); PrintMatrix(gpuRef); // free the memory free(h_A); free(h_B); free(h_C); free(gpuRef); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); return 0; }
d6b3cc72640f8d4bbbd189f8c388fa4a88de6ab4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_sobel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int width = XSIZE; int height = YSIZE; float *image = NULL; hipMalloc(&image, XSIZE*YSIZE); float *image_out = NULL; hipMalloc(&image_out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_sobel), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,image,image_out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_sobel), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,image,image_out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_sobel), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,image,image_out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d6b3cc72640f8d4bbbd189f8c388fa4a88de6ab4.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_sobel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int width = XSIZE; int height = YSIZE; float *image = NULL; cudaMalloc(&image, XSIZE*YSIZE); float *image_out = NULL; cudaMalloc(&image_out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_sobel<<<gridBlock,threadBlock>>>(width,height,image,image_out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_sobel<<<gridBlock,threadBlock>>>(width,height,image,image_out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_sobel<<<gridBlock,threadBlock>>>(width,height,image,image_out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
84512f1664d45cf04d814bd8e42c52544d62ed4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <vector> #include <iostream> using namespace std; __global__ void GPU_vector_add(int* left, int* right, int* result) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; result[idx] = left[idx] + right[idx]; } void simple_add() { const int DIM{ 1000 }; int* left; hipMalloc(&left, DIM * sizeof(int)); int* right; hipMalloc(&right, DIM * sizeof(int)); int* result; hipMalloc(&result, DIM * sizeof(int)); int* main_result = new int[DIM]; vector<int> mainv_left; for (int i = 0; i < DIM; ++i) mainv_left.push_back(i);; vector<int> mainv_right; for (int i = 0; i < DIM; ++i) mainv_right.push_back(i * 2);; hipMemcpy(left, mainv_left.data(), DIM * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(right, mainv_right.data(), DIM * sizeof(int), hipMemcpyHostToDevice); hipFree(left); hipFree(right); mainv_left.clear(); mainv_right.clear(); const int thrds = 2; GPU_vector_add << <DIM / thrds, thrds >> > (left, right, result); hipMemcpy(main_result, result, DIM * sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); cout << "simple add result:\n\t"; for (int i = 0; i < DIM; ++i) cout << *(main_result + i) << " "; hipFree(result); delete[] main_result; }
84512f1664d45cf04d814bd8e42c52544d62ed4d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <vector> #include <iostream> using namespace std; __global__ void GPU_vector_add(int* left, int* right, int* result) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; result[idx] = left[idx] + right[idx]; } void simple_add() { const int DIM{ 1000 }; int* left; cudaMalloc(&left, DIM * sizeof(int)); int* right; cudaMalloc(&right, DIM * sizeof(int)); int* result; cudaMalloc(&result, DIM * sizeof(int)); int* main_result = new int[DIM]; vector<int> mainv_left; for (int i = 0; i < DIM; ++i) mainv_left.push_back(i);; vector<int> mainv_right; for (int i = 0; i < DIM; ++i) mainv_right.push_back(i * 2);; cudaMemcpy(left, mainv_left.data(), DIM * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(right, mainv_right.data(), DIM * sizeof(int), cudaMemcpyHostToDevice); cudaFree(left); cudaFree(right); mainv_left.clear(); mainv_right.clear(); const int thrds = 2; GPU_vector_add << <DIM / thrds, thrds >> > (left, right, result); cudaMemcpy(main_result, result, DIM * sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cout << "simple add result:\n\t"; for (int i = 0; i < DIM; ++i) cout << *(main_result + i) << " "; cudaFree(result); delete[] main_result; }
7bdc1328595aff0b4121ada2024e3cb344953fe8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <hiprand/hiprand.h> #include <rocblas.h> #include <ctime> #include <assert.h> // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(hipError_t stat, const char *file, int line) { if (stat != hipSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) { if (stat != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) { if (stat != HIPRAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } #include <mma.h> using namespace nvcuda; // Must be multiples of 16 for wmma code to work #define SQUARE 384 #define MATRIX_M SQUARE #define MATRIX_N SQUARE #define MATRIX_K SQUARE // The only dimensions currently supported by WMMA const int WMMA_M = 16; const int WMMA_N = 16; const int WMMA_K = 16; // Performs an MxNxK GEMM (C=alpha*A*B + beta*C) assuming: // 1) Matrices are packed in memory. // 2) M, N and K are multiples of 16. // 3) Neither A nor B are transposed. // Note: This is NOT a high performance example but is for demonstration purposes only // For a high performance code please use the GEMM provided in cuBLAS. __global__ void wmma_example(unsigned char *a, unsigned char *b, int *c, int M, int N, int K) { // Leading dimensions. Packed with no transpositions. int lda = K; int ldb = N; int ldc = N; // Tile using a 2D grid int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize; int warpN = (blockIdx.y * blockDim.y + threadIdx.y); // Declare the fragments wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, unsigned char, wmma::row_major> a_frag; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, unsigned char, wmma::row_major> b_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int> acc_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int> c_frag; wmma::fill_fragment(acc_frag, 0); int t; // Loop over k for (int i = 0; i < K; i += WMMA_K) { int aRow = warpM * WMMA_M; int aCol = i; int bRow = i; int bCol = warpN * WMMA_N; // Bounds checking if (aRow < M && aCol < K && bRow < K && bCol < N) { // Load the inputs wmma::load_matrix_sync(a_frag, a + aRow*lda + aCol , lda); wmma::load_matrix_sync(b_frag, b + bRow*ldb + bCol , ldb); // Perform the matrix multiplication wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag); } } // Load in the current value of c, scale it by beta, and add this our result scaled by alpha int cRow = warpM * WMMA_M; int cCol = warpN * WMMA_N; if (cRow < M && cCol < N) { wmma::load_matrix_sync(c_frag, c + cRow*ldc + cCol, ldc, wmma::mem_row_major); for(int i=0; i < c_frag.num_elements; i++) { c_frag.x[i] = acc_frag.x[i] + c_frag.x[i]; } // Store the output wmma::store_matrix_sync(c + cRow*ldc + cCol, c_frag, ldc, wmma::mem_row_major); /* for(int i = 0 ; i < c_frag.num_elements;i++){ t = static_cast<int>(c_frag.x[i]); printf("thread C : %d\n", t); }*/ } } __global__ void set_value (unsigned char *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { unsigned char a=1; in[idx]=a; } } __global__ void set_value (int *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { int a=1; in[idx]=a; } } int main(int argc, char* argv[]) { unsigned char *a_u8; unsigned char *b_u8; int *c_int; int *c_host_wmma; cudaErrCheck(hipMalloc((void**)&a_u8, MATRIX_M * MATRIX_K * sizeof(unsigned char))); cudaErrCheck(hipMalloc((void**)&b_u8, MATRIX_K * MATRIX_N * sizeof(unsigned char))); cudaErrCheck(hipMalloc((void**)&c_int, MATRIX_M * MATRIX_N * sizeof(int))); c_host_wmma = (int*)malloc(MATRIX_M * MATRIX_N * sizeof(int)); // hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16. hipLaunchKernelGGL(( set_value) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_u8, MATRIX_M*MATRIX_K); hipLaunchKernelGGL(( set_value) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_u8, MATRIX_K*MATRIX_N); hipLaunchKernelGGL(( set_value) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, c_int, MATRIX_M*MATRIX_N); printf("\nM = %d, N = %d, K = %d.\n\n", MATRIX_M, MATRIX_N, MATRIX_K); // First: using WMMA dim3 gridDim; dim3 blockDim; // blockDim.x must be a multple of warpSize // 128x4 means we have 16 warps and a block computes a 64x64 output tile blockDim.x = 128; blockDim.y = 4; gridDim.x = (MATRIX_M + (WMMA_M * blockDim.x / 32 - 1)) / (WMMA_M * blockDim.x / 32); gridDim.y = (MATRIX_N + WMMA_N * blockDim.y - 1) / (WMMA_N * blockDim.y); hipLaunchKernelGGL(( wmma_example) , dim3(gridDim), dim3(blockDim) , 0, 0, a_u8, b_u8, c_int, MATRIX_M, MATRIX_N, MATRIX_K); cudaErrCheck(hipMemcpy(c_host_wmma, c_int, MATRIX_M * MATRIX_N * sizeof(int), hipMemcpyDeviceToHost)); int t; for(int i = 0 ; i < MATRIX_M; i++){ for(int j = 0 ; j < MATRIX_N ; j++){ t = (c_host_wmma[i*MATRIX_N+j]); printf("%d ",t); } printf("\n"); } cudaErrCheck(hipFree(a_u8)); cudaErrCheck(hipFree(b_u8)); cudaErrCheck(hipFree(c_int)); free(c_host_wmma); cudaErrCheck(hipDeviceReset()); return 0; }
7bdc1328595aff0b4121ada2024e3cb344953fe8.cu
/* Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <curand.h> #include <cublas_v2.h> #include <ctime> #include <assert.h> // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) { if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(curandStatus_t stat, const char *file, int line) { if (stat != CURAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } #include <mma.h> using namespace nvcuda; // Must be multiples of 16 for wmma code to work #define SQUARE 384 #define MATRIX_M SQUARE #define MATRIX_N SQUARE #define MATRIX_K SQUARE // The only dimensions currently supported by WMMA const int WMMA_M = 16; const int WMMA_N = 16; const int WMMA_K = 16; // Performs an MxNxK GEMM (C=alpha*A*B + beta*C) assuming: // 1) Matrices are packed in memory. // 2) M, N and K are multiples of 16. // 3) Neither A nor B are transposed. // Note: This is NOT a high performance example but is for demonstration purposes only // For a high performance code please use the GEMM provided in cuBLAS. __global__ void wmma_example(unsigned char *a, unsigned char *b, int *c, int M, int N, int K) { // Leading dimensions. Packed with no transpositions. int lda = K; int ldb = N; int ldc = N; // Tile using a 2D grid int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize; int warpN = (blockIdx.y * blockDim.y + threadIdx.y); // Declare the fragments wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, unsigned char, wmma::row_major> a_frag; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, unsigned char, wmma::row_major> b_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int> acc_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int> c_frag; wmma::fill_fragment(acc_frag, 0); int t; // Loop over k for (int i = 0; i < K; i += WMMA_K) { int aRow = warpM * WMMA_M; int aCol = i; int bRow = i; int bCol = warpN * WMMA_N; // Bounds checking if (aRow < M && aCol < K && bRow < K && bCol < N) { // Load the inputs wmma::load_matrix_sync(a_frag, a + aRow*lda + aCol , lda); wmma::load_matrix_sync(b_frag, b + bRow*ldb + bCol , ldb); // Perform the matrix multiplication wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag); } } // Load in the current value of c, scale it by beta, and add this our result scaled by alpha int cRow = warpM * WMMA_M; int cCol = warpN * WMMA_N; if (cRow < M && cCol < N) { wmma::load_matrix_sync(c_frag, c + cRow*ldc + cCol, ldc, wmma::mem_row_major); for(int i=0; i < c_frag.num_elements; i++) { c_frag.x[i] = acc_frag.x[i] + c_frag.x[i]; } // Store the output wmma::store_matrix_sync(c + cRow*ldc + cCol, c_frag, ldc, wmma::mem_row_major); /* for(int i = 0 ; i < c_frag.num_elements;i++){ t = static_cast<int>(c_frag.x[i]); printf("thread C : %d\n", t); }*/ } } __global__ void set_value (unsigned char *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { unsigned char a=1; in[idx]=a; } } __global__ void set_value (int *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { int a=1; in[idx]=a; } } int main(int argc, char* argv[]) { unsigned char *a_u8; unsigned char *b_u8; int *c_int; int *c_host_wmma; cudaErrCheck(cudaMalloc((void**)&a_u8, MATRIX_M * MATRIX_K * sizeof(unsigned char))); cudaErrCheck(cudaMalloc((void**)&b_u8, MATRIX_K * MATRIX_N * sizeof(unsigned char))); cudaErrCheck(cudaMalloc((void**)&c_int, MATRIX_M * MATRIX_N * sizeof(int))); c_host_wmma = (int*)malloc(MATRIX_M * MATRIX_N * sizeof(int)); // curand doesn't currently support fp16 so we generate in fp32 and convert to fp16. set_value <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_u8, MATRIX_M*MATRIX_K); set_value <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_u8, MATRIX_K*MATRIX_N); set_value <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (c_int, MATRIX_M*MATRIX_N); printf("\nM = %d, N = %d, K = %d.\n\n", MATRIX_M, MATRIX_N, MATRIX_K); // First: using WMMA dim3 gridDim; dim3 blockDim; // blockDim.x must be a multple of warpSize // 128x4 means we have 16 warps and a block computes a 64x64 output tile blockDim.x = 128; blockDim.y = 4; gridDim.x = (MATRIX_M + (WMMA_M * blockDim.x / 32 - 1)) / (WMMA_M * blockDim.x / 32); gridDim.y = (MATRIX_N + WMMA_N * blockDim.y - 1) / (WMMA_N * blockDim.y); wmma_example <<< gridDim, blockDim >>> (a_u8, b_u8, c_int, MATRIX_M, MATRIX_N, MATRIX_K); cudaErrCheck(cudaMemcpy(c_host_wmma, c_int, MATRIX_M * MATRIX_N * sizeof(int), cudaMemcpyDeviceToHost)); int t; for(int i = 0 ; i < MATRIX_M; i++){ for(int j = 0 ; j < MATRIX_N ; j++){ t = (c_host_wmma[i*MATRIX_N+j]); printf("%d ",t); } printf("\n"); } cudaErrCheck(cudaFree(a_u8)); cudaErrCheck(cudaFree(b_u8)); cudaErrCheck(cudaFree(c_int)); free(c_host_wmma); cudaErrCheck(cudaDeviceReset()); return 0; }
f9848ea2c9a7d4c4a7eeb1b444e39d26a868f46d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This is the REAL "hello world" for CUDA! // It takes the string "Hello ", prints it, then passes it to CUDA with an array // of offsets. Then the offsets are added in parallel to produce the string "World!" // By Ingemar Ragnemalm 2010 #include <stdio.h> const int N = 7; const int blocksize = 7; __global__ void hello(char *a, int *b) { a[threadIdx.x] += b[threadIdx.x]; } int main() { char a[N] = "Hello "; int b[N] = {15, 10, 6, 0, -11, 1, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); hipMalloc( (void**)&ad, csize ); hipMalloc( (void**)&bd, isize ); hipMemcpy( ad, a, csize, hipMemcpyHostToDevice ); hipMemcpy( bd, b, isize, hipMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd); hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost ); hipFree( ad ); printf("%s\n", a); return EXIT_SUCCESS; }
f9848ea2c9a7d4c4a7eeb1b444e39d26a868f46d.cu
// This is the REAL "hello world" for CUDA! // It takes the string "Hello ", prints it, then passes it to CUDA with an array // of offsets. Then the offsets are added in parallel to produce the string "World!" // By Ingemar Ragnemalm 2010 #include <stdio.h> const int N = 7; const int blocksize = 7; __global__ void hello(char *a, int *b) { a[threadIdx.x] += b[threadIdx.x]; } int main() { char a[N] = "Hello "; int b[N] = {15, 10, 6, 0, -11, 1, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); cudaMalloc( (void**)&ad, csize ); cudaMalloc( (void**)&bd, isize ); cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice ); cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hello<<<dimGrid, dimBlock>>>(ad, bd); cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost ); cudaFree( ad ); printf("%s\n", a); return EXIT_SUCCESS; }
1c223e3a2e5bda9ac2068d522324f3e3300f43be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gtest/gtest.h" #include "ATen/ATen.h" #include "ATen/core/TensorAccessor.h" #include "ATen/hip/HIPContext.h" #include <assert.h> using namespace at; __global__ void test_tensor_packed_accessor_kernel( PackedTensorAccessor<float, 1, RestrictPtrTraits> resa, PackedTensorAccessor<float, 2, RestrictPtrTraits> t1a, PackedTensorAccessor<float, 1, RestrictPtrTraits> t2a) { for (int64_t i = 0; i < resa.size(0); i++) { float val = 0.0f; for (int64_t j = 0; j < t1a.size(1); j++) { val += t1a[i][j] * t2a[j]; } resa[i] = val; } } // test PackedTensorAccessor and Tensor.packed_accessor TEST(PackedtensoraccessorTest, PackedtensoraccessorTestCUDA) { if (!at::cuda::is_available()) return; manual_seed(123); Tensor t1 = rand({4, 4}, CUDA(kFloat)); Tensor t2 = rand({4}, CUDA(kFloat)); Tensor res = empty({4}, CUDA(kFloat)); auto t1a = t1.packed_accessor<float, 2, RestrictPtrTraits>(); auto t2a = t2.packed_accessor<float, 1, RestrictPtrTraits>(); auto resa = res.packed_accessor<float, 1, RestrictPtrTraits>(); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( test_tensor_packed_accessor_kernel), dim3(1), dim3(1), 0, stream, resa, t1a, t2a); hipError_t err = hipDeviceSynchronize(); bool isEQ = err == hipSuccess; ASSERT_TRUE(isEQ); auto expected = mv(t1, t2); ASSERT_TRUE(res.allclose(expected)); }
1c223e3a2e5bda9ac2068d522324f3e3300f43be.cu
#include "gtest/gtest.h" #include "ATen/ATen.h" #include "ATen/core/TensorAccessor.h" #include "ATen/cuda/CUDAContext.h" #include <assert.h> using namespace at; __global__ void test_tensor_packed_accessor_kernel( PackedTensorAccessor<float, 1, RestrictPtrTraits> resa, PackedTensorAccessor<float, 2, RestrictPtrTraits> t1a, PackedTensorAccessor<float, 1, RestrictPtrTraits> t2a) { for (int64_t i = 0; i < resa.size(0); i++) { float val = 0.0f; for (int64_t j = 0; j < t1a.size(1); j++) { val += t1a[i][j] * t2a[j]; } resa[i] = val; } } // test PackedTensorAccessor and Tensor.packed_accessor TEST(PackedtensoraccessorTest, PackedtensoraccessorTestCUDA) { if (!at::cuda::is_available()) return; manual_seed(123); Tensor t1 = rand({4, 4}, CUDA(kFloat)); Tensor t2 = rand({4}, CUDA(kFloat)); Tensor res = empty({4}, CUDA(kFloat)); auto t1a = t1.packed_accessor<float, 2, RestrictPtrTraits>(); auto t2a = t2.packed_accessor<float, 1, RestrictPtrTraits>(); auto resa = res.packed_accessor<float, 1, RestrictPtrTraits>(); auto stream = at::cuda::getCurrentCUDAStream(); test_tensor_packed_accessor_kernel<<<1, 1, 0, stream>>>(resa, t1a, t2a); cudaError_t err = cudaDeviceSynchronize(); bool isEQ = err == cudaSuccess; ASSERT_TRUE(isEQ); auto expected = mv(t1, t2); ASSERT_TRUE(res.allclose(expected)); }
bee9b9414b3590f77303ff64595e44bc32e6dc82.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by chengjin on 2020-06-02. // #include "cu_utils.h" #include "cu_device.cuh" #include "detect_kernel.h" namespace quake { namespace framework { namespace ops_lib { template<typename T> __global__ static void _delta2bbox(int row, const T* rois,const T* deltas,const T* scores,T* bboxes, int max_height,int max_width,T max_ratio){ int r_id = blockIdx.x * blockDim.x + threadIdx.x;//row id int type_id = blockIdx.y * blockDim.y + threadIdx.y;//type if (type_id < 5 && r_id < row){ if(type_id==4){ bboxes[r_id*5+type_id]=scores[r_id]; }else{ T roi_ctr_x = rois[r_id*4]; T roi_ctr_y = rois[r_id*4+1]; T roi_width = rois[r_id*4+2]; T roi_height = rois[r_id*4+3]; T dx = deltas[r_id*4]; T dy = deltas[r_id*4+1]; T dw = deltas[r_id*4+2]; T dh = deltas[r_id*4+3]; T gx,gy,gw,gh,res; if(type_id%2==0){ if(max_ratio>T(0)){ _clip(dw,-max_ratio,max_ratio); } gw = roi_width * T(expf(dw)); gx = roi_ctr_x+roi_width*dx; }else{ if(max_ratio>T(0)){ _clip(dh,-max_ratio,max_ratio); } gh = roi_height * T(expf(dh)); gy = roi_ctr_y+roi_height*dy; } //assign the bbox if(type_id==0){ res=gx - gw * T(0.5) + T(0.5); _clip(res,T(0),T(max_width)); bboxes[r_id*5+type_id]=res; }else if(type_id==1){ res=gy - gh * T(0.5) + T(0.5); _clip(res,T(0),T(max_height)); bboxes[r_id*5+type_id]=res; }else if(type_id==2){ res=gx + gw * T(0.5) - T(0.5); _clip(res,T(0),T(max_width)); bboxes[r_id*5+type_id]=res; }else if(type_id==3){ res=gy + gh * T(0.5) - T(0.5); _clip(res,T(0),T(max_height)); bboxes[r_id*5+type_id]=res; } } } } template<typename T> void delta2bbox_forward_gpu(hipStream_t stream,const T* rois,const T* deltas, const T* scores,T* bboxes,int row,int max_height,int max_width,T max_ratio) { dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(row,CU2DBLOCK),n_blocks(5,CU2DBLOCK)); hipLaunchKernelGGL(( _delta2bbox), dim3(Gr),dim3(Bl),0,stream, row,rois,deltas,scores,bboxes,max_height,max_width,max_ratio); } template void delta2bbox_forward_gpu<float>(hipStream_t stream,const float* rois,const float* deltas, const float* scores,float* bboxes,int row,int max_height,int max_width,float max_ratio); template void delta2bbox_forward_gpu<__half>(hipStream_t stream,const __half* rois,const __half* deltas, const __half* scores,__half* bboxes,int row,int max_height,int max_width,__half max_ratio); } // namespace ops_lib } // namespace framework } // namespace quake
bee9b9414b3590f77303ff64595e44bc32e6dc82.cu
// // Created by chengjin on 2020-06-02. // #include "cu_utils.h" #include "cu_device.cuh" #include "detect_kernel.h" namespace quake { namespace framework { namespace ops_lib { template<typename T> __global__ static void _delta2bbox(int row, const T* rois,const T* deltas,const T* scores,T* bboxes, int max_height,int max_width,T max_ratio){ int r_id = blockIdx.x * blockDim.x + threadIdx.x;//row id int type_id = blockIdx.y * blockDim.y + threadIdx.y;//type if (type_id < 5 && r_id < row){ if(type_id==4){ bboxes[r_id*5+type_id]=scores[r_id]; }else{ T roi_ctr_x = rois[r_id*4]; T roi_ctr_y = rois[r_id*4+1]; T roi_width = rois[r_id*4+2]; T roi_height = rois[r_id*4+3]; T dx = deltas[r_id*4]; T dy = deltas[r_id*4+1]; T dw = deltas[r_id*4+2]; T dh = deltas[r_id*4+3]; T gx,gy,gw,gh,res; if(type_id%2==0){ if(max_ratio>T(0)){ _clip(dw,-max_ratio,max_ratio); } gw = roi_width * T(expf(dw)); gx = roi_ctr_x+roi_width*dx; }else{ if(max_ratio>T(0)){ _clip(dh,-max_ratio,max_ratio); } gh = roi_height * T(expf(dh)); gy = roi_ctr_y+roi_height*dy; } //assign the bbox if(type_id==0){ res=gx - gw * T(0.5) + T(0.5); _clip(res,T(0),T(max_width)); bboxes[r_id*5+type_id]=res; }else if(type_id==1){ res=gy - gh * T(0.5) + T(0.5); _clip(res,T(0),T(max_height)); bboxes[r_id*5+type_id]=res; }else if(type_id==2){ res=gx + gw * T(0.5) - T(0.5); _clip(res,T(0),T(max_width)); bboxes[r_id*5+type_id]=res; }else if(type_id==3){ res=gy + gh * T(0.5) - T(0.5); _clip(res,T(0),T(max_height)); bboxes[r_id*5+type_id]=res; } } } } template<typename T> void delta2bbox_forward_gpu(cudaStream_t stream,const T* rois,const T* deltas, const T* scores,T* bboxes,int row,int max_height,int max_width,T max_ratio) { dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(row,CU2DBLOCK),n_blocks(5,CU2DBLOCK)); _delta2bbox<<<Gr,Bl,0,stream>>>(row,rois,deltas,scores,bboxes,max_height,max_width,max_ratio); } template void delta2bbox_forward_gpu<float>(cudaStream_t stream,const float* rois,const float* deltas, const float* scores,float* bboxes,int row,int max_height,int max_width,float max_ratio); template void delta2bbox_forward_gpu<__half>(cudaStream_t stream,const __half* rois,const __half* deltas, const __half* scores,__half* bboxes,int row,int max_height,int max_width,__half max_ratio); } // namespace ops_lib } // namespace framework } // namespace quake
dfa921cfb0c02fd47f12494305e8a88202c15412.hip
// !!! This is a file automatically generated by hipify!!! //THIS IS THE PYTHON CODE FOR VECTOR VECTOR ADDITION FOR MULTI GPU //SAURAV RAI //17558 #include <stdio.h> #include <hip/hip_runtime.h> #include <error.h> #define BLOCKSIZE 16 #define SIZE 1024 int vlen=SIZE; int size=SIZE; float *hVectA,*hVectB,hRes; float elapsedTime,elapsedTime1; double Tsec,Tsec1,gflops,gflops1; hipEvent_t start,stop,start1,stop1; int blocksPerGrid; int gridsPerBlock; void routine(void * givendata); void init(int s); struct Data { int deviceId; int size; float* a; float* b; float retVal; }; Data vector[2]; /*sequential function*/ extern "C" float compare() { init(vlen); float sum=0; for(int i=0;i < vlen;i++) { sum+=hVectA[i] + hVectB[i]; } printf("cpu_sum=%f\n",sum); return sum; } /* kernel to execute vector vector addition */ __global__ void vvaddition(int len,float* A,float* B,float *C) { int tid= blockIdx.x*blockDim.x*blockDim.y + threadIdx.x +threadIdx.y * blockDim.x; while(tid < len) { C[tid] = A[tid] + B[tid]; tid += blockDim.x * gridDim.x; } } /* function display() */ void display(float* arr,int size) { int i; for(i=0;i < size;i++) printf("%f ",arr[i]); printf("\t%d\n",i); } /* extern "C" void result() { printf("Kernel execution done\n"); hRes=vector[0].retVal + vector[1].retVal; printf("The addition of two vectors is :%f\n",hRes); free(hVectA); free(hVectB); } */ /*mem error*/ void mem_error(char *arrayname, char *benchmark, int len, char *type) { printf("\nMemory not sufficient to allocate for array %s\n\tBenchmark : %s \n\tMemory requested = %d number of %s elements\n",arrayname, benchmark, len, type); exit(-1); } /*cuda safe call*/ void CUDA_SAFE_CALL(hipError_t call) { hipError_t ret = call; //printf("RETURN FROM THE CUDA CALL:%d\t:",ret); switch(ret) { case hipSuccess: // printf("Success\n"); break; /* case hipErrorInvalidValue: { printf("ERROR: InvalidValue:%i.\n",__LINE__); exit(-1); break; } case hipErrorInvalidDevicePointer: { printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__); exit(-1); break; } case hipErrorInvalidMemcpyDirection: { printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__); exit(-1); break; } */ default: { printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,hipGetErrorString(ret)); exit(-1); break; } } } /* void SetUp_CUDA_Exe_Config() */ void check_block_grid_dim(hipDeviceProp_t devProp,dim3 blockDim,dim3 gridDim) { if( blockDim.x >= devProp.maxThreadsDim[0] || blockDim.y >= devProp.maxThreadsDim[1] || blockDim.z >= devProp.maxThreadsDim[2] ) { printf("\nBlock Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]); exit(-1); } if( gridDim.x >= devProp.maxGridSize[0] || gridDim.y >= devProp.maxGridSize[1] || gridDim.z >= devProp.maxGridSize[2] ) { printf("\nGrid Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]); exit(-1); } } /*function to free memory*/ void dfree(double * arr[],int len) { for(int i=0;i < len;i++) CUDA_SAFE_CALL(hipFree(arr[i])); printf("mem freed\n"); } /*calculate Gflops*/ double calculate_gflops(double &Tsec) { //printf("time taken is %.8lf\n",Tsec); double gflops=(1.0e-9 * (( 2.0 * vlen )/Tsec)); //printf("Gflops is \t%f\n",gflops); return gflops; } /*function to print on the screen*/ void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0 { printf("\n---------------%s----------------\n",program_name); printf("\tSIZE\t TIME_SEC\t Gflops\n"); if(flag==1) printf("\t%d\t%f\t%lf\t",size,tsec,gflops); else printf("\t%d\t%lf\t%lf\t",size,"---","---"); } /*get device count*/ int get_DeviceCount() { int count; hipGetDeviceCount(&count); return count; } extern "C" float gpu_partial_sum_calculation(int i) { init(vlen); // start=(hipEvent_t)malloc(sizeof(hipEvent_t)); //stop=(hipEvent_t)malloc(sizeof(hipEvent_t)); hipEventCreate(&start); hipEventCreate(&stop); routine(&vector[i-1]); printf("val calculated by device %d=%f\n",vector[i-1].deviceId,vector[i-1].retVal); return vector[i-1].retVal; } void routine(void* givendata) { Data *data = (Data*)givendata; int len = data->size; float *a,*b,*part_c; float *d_a,*d_b,*d_part_c; a=data->a; b=data->b; part_c = (float*)malloc(len*sizeof(float)); float c; CUDA_SAFE_CALL(hipSetDevice(data->deviceId)); CUDA_SAFE_CALL(hipMalloc((void**)&d_a,len*sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void**)&d_b,len*sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void**)&d_part_c,len*sizeof(float))); CUDA_SAFE_CALL(hipMemcpy(d_a,a,len*sizeof(float),hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_b,b,len*sizeof(float),hipMemcpyHostToDevice)); dim3 threadsPerBlock(16,16); int numBlocks; if( len /256 == 0) numBlocks=1; else numBlocks = len/100; dim3 blocksPerGrid(numBlocks ,1); printf("Calling kernel on device: %d\n",data->deviceId); if(data->deviceId==1) { // start=(hipEvent_t)malloc(sizeof(hipEvent_t)); //stop=(hipEvent_t)malloc(sizeof(hipEvent_t)); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipLaunchKernelGGL(( vvaddition), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, len,d_a,d_b,d_part_c); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime,start,stop); Tsec=elapsedTime*1.0e-3; printf("\n\ntime taken by device 0 is: %.8lf\n\n",Tsec); print_on_screen("VECTOR VECTOR MULTIPLICATION (MULTIGPU+MPI)",Tsec,0,vlen/2,1); } else { // start1=(hipEvent_t)malloc(sizeof(hipEvent_t)); //stop1=(hipEvent_t)malloc(sizeof(hipEvent_t)); hipEventCreate(&start1); hipEventCreate(&stop1); hipEventRecord(start1,0); hipLaunchKernelGGL(( vvmul), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, len,d_a,d_b,d_part_c); hipEventRecord(stop1,0); hipEventSynchronize(stop1); hipEventElapsedTime(&elapsedTime1,start1,stop1); Tsec1=elapsedTime1*1.0e-3; printf("\n\ntime taken by device 1: %.8lf\n\n",Tsec1); print_on_screen("VECTOR VECTOR MULTIPLICATION (MULTIGPU+MPI)",Tsec1,0,vlen/2,0); } if(hipPeekAtLastError()) printf("KERNEL ERROR: %s\t on device:%d\n",hipGetErrorString(hipPeekAtLastError()),data->deviceId); CUDA_SAFE_CALL(hipMemcpy(part_c,d_part_c,len*sizeof(float),hipMemcpyDeviceToHost)); // this line has problem because the part_c array size / allocation . int ind; for(ind=0;ind < len;ind++) c += part_c[ind]; CUDA_SAFE_CALL(hipFree(d_a)); CUDA_SAFE_CALL(hipFree(d_b)); CUDA_SAFE_CALL(hipFree(d_part_c)); free(part_c); data->retVal=c; printf("Exiting from device :%d \n",data->deviceId); } void init(int size) { int devCount; CUDA_SAFE_CALL(hipGetDeviceCount(&devCount)); if(devCount < 2) { printf("Atleast 2 GPU's are needed :%d\n",devCount); exit(0); } printf("devices available\n"); int vlength=size; int ind; hVectA=(float*)malloc(vlen*sizeof(float)); hVectB=(float*)malloc(vlen*sizeof(float)); for(ind=0;ind < vlen;ind++) { hVectA[ind]=2; hVectB[ind]=2; } vector[0].deviceId = 0; vector[0].size =vlength/2; vector[0].a =hVectA; vector[0].b =hVectB; vector[1].deviceId = 1; vector[1].size =vlength/2; vector[1].a =hVectA + vlength/2 ; vector[1].b =hVectB + vlength/2 ; } extern "C" void hfree() { free(hVectA); free(hVectB); printf("host mem freed successfully\n"); }
dfa921cfb0c02fd47f12494305e8a88202c15412.cu
//THIS IS THE PYTHON CODE FOR VECTOR VECTOR ADDITION FOR MULTI GPU //SAURAV RAI //17558 #include <stdio.h> #include <cuda.h> #include <error.h> #define BLOCKSIZE 16 #define SIZE 1024 int vlen=SIZE; int size=SIZE; float *hVectA,*hVectB,hRes; float elapsedTime,elapsedTime1; double Tsec,Tsec1,gflops,gflops1; cudaEvent_t start,stop,start1,stop1; int blocksPerGrid; int gridsPerBlock; void routine(void * givendata); void init(int s); struct Data { int deviceId; int size; float* a; float* b; float retVal; }; Data vector[2]; /*sequential function*/ extern "C" float compare() { init(vlen); float sum=0; for(int i=0;i < vlen;i++) { sum+=hVectA[i] + hVectB[i]; } printf("cpu_sum=%f\n",sum); return sum; } /* kernel to execute vector vector addition */ __global__ void vvaddition(int len,float* A,float* B,float *C) { int tid= blockIdx.x*blockDim.x*blockDim.y + threadIdx.x +threadIdx.y * blockDim.x; while(tid < len) { C[tid] = A[tid] + B[tid]; tid += blockDim.x * gridDim.x; } } /* function display() */ void display(float* arr,int size) { int i; for(i=0;i < size;i++) printf("%f ",arr[i]); printf("\t%d\n",i); } /* extern "C" void result() { printf("Kernel execution done\n"); hRes=vector[0].retVal + vector[1].retVal; printf("The addition of two vectors is :%f\n",hRes); free(hVectA); free(hVectB); } */ /*mem error*/ void mem_error(char *arrayname, char *benchmark, int len, char *type) { printf("\nMemory not sufficient to allocate for array %s\n\tBenchmark : %s \n\tMemory requested = %d number of %s elements\n",arrayname, benchmark, len, type); exit(-1); } /*cuda safe call*/ void CUDA_SAFE_CALL(cudaError_t call) { cudaError_t ret = call; //printf("RETURN FROM THE CUDA CALL:%d\t:",ret); switch(ret) { case cudaSuccess: // printf("Success\n"); break; /* case cudaErrorInvalidValue: { printf("ERROR: InvalidValue:%i.\n",__LINE__); exit(-1); break; } case cudaErrorInvalidDevicePointer: { printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__); exit(-1); break; } case cudaErrorInvalidMemcpyDirection: { printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__); exit(-1); break; } */ default: { printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,cudaGetErrorString(ret)); exit(-1); break; } } } /* void SetUp_CUDA_Exe_Config() */ void check_block_grid_dim(cudaDeviceProp devProp,dim3 blockDim,dim3 gridDim) { if( blockDim.x >= devProp.maxThreadsDim[0] || blockDim.y >= devProp.maxThreadsDim[1] || blockDim.z >= devProp.maxThreadsDim[2] ) { printf("\nBlock Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]); exit(-1); } if( gridDim.x >= devProp.maxGridSize[0] || gridDim.y >= devProp.maxGridSize[1] || gridDim.z >= devProp.maxGridSize[2] ) { printf("\nGrid Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]); exit(-1); } } /*function to free memory*/ void dfree(double * arr[],int len) { for(int i=0;i < len;i++) CUDA_SAFE_CALL(cudaFree(arr[i])); printf("mem freed\n"); } /*calculate Gflops*/ double calculate_gflops(double &Tsec) { //printf("time taken is %.8lf\n",Tsec); double gflops=(1.0e-9 * (( 2.0 * vlen )/Tsec)); //printf("Gflops is \t%f\n",gflops); return gflops; } /*function to print on the screen*/ void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0 { printf("\n---------------%s----------------\n",program_name); printf("\tSIZE\t TIME_SEC\t Gflops\n"); if(flag==1) printf("\t%d\t%f\t%lf\t",size,tsec,gflops); else printf("\t%d\t%lf\t%lf\t",size,"---","---"); } /*get device count*/ int get_DeviceCount() { int count; cudaGetDeviceCount(&count); return count; } extern "C" float gpu_partial_sum_calculation(int i) { init(vlen); // start=(cudaEvent_t)malloc(sizeof(cudaEvent_t)); //stop=(cudaEvent_t)malloc(sizeof(cudaEvent_t)); cudaEventCreate(&start); cudaEventCreate(&stop); routine(&vector[i-1]); printf("val calculated by device %d=%f\n",vector[i-1].deviceId,vector[i-1].retVal); return vector[i-1].retVal; } void routine(void* givendata) { Data *data = (Data*)givendata; int len = data->size; float *a,*b,*part_c; float *d_a,*d_b,*d_part_c; a=data->a; b=data->b; part_c = (float*)malloc(len*sizeof(float)); float c; CUDA_SAFE_CALL(cudaSetDevice(data->deviceId)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_a,len*sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_b,len*sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_part_c,len*sizeof(float))); CUDA_SAFE_CALL(cudaMemcpy(d_a,a,len*sizeof(float),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_b,b,len*sizeof(float),cudaMemcpyHostToDevice)); dim3 threadsPerBlock(16,16); int numBlocks; if( len /256 == 0) numBlocks=1; else numBlocks = len/100; dim3 blocksPerGrid(numBlocks ,1); printf("Calling kernel on device: %d\n",data->deviceId); if(data->deviceId==1) { // start=(cudaEvent_t)malloc(sizeof(cudaEvent_t)); //stop=(cudaEvent_t)malloc(sizeof(cudaEvent_t)); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); vvaddition<<<blocksPerGrid,threadsPerBlock>>>(len,d_a,d_b,d_part_c); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime,start,stop); Tsec=elapsedTime*1.0e-3; printf("\n\ntime taken by device 0 is: %.8lf\n\n",Tsec); print_on_screen("VECTOR VECTOR MULTIPLICATION (MULTIGPU+MPI)",Tsec,0,vlen/2,1); } else { // start1=(cudaEvent_t)malloc(sizeof(cudaEvent_t)); //stop1=(cudaEvent_t)malloc(sizeof(cudaEvent_t)); cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaEventRecord(start1,0); vvmul<<<blocksPerGrid,threadsPerBlock>>>(len,d_a,d_b,d_part_c); cudaEventRecord(stop1,0); cudaEventSynchronize(stop1); cudaEventElapsedTime(&elapsedTime1,start1,stop1); Tsec1=elapsedTime1*1.0e-3; printf("\n\ntime taken by device 1: %.8lf\n\n",Tsec1); print_on_screen("VECTOR VECTOR MULTIPLICATION (MULTIGPU+MPI)",Tsec1,0,vlen/2,0); } if(cudaPeekAtLastError()) printf("KERNEL ERROR: %s\t on device:%d\n",cudaGetErrorString(cudaPeekAtLastError()),data->deviceId); CUDA_SAFE_CALL(cudaMemcpy(part_c,d_part_c,len*sizeof(float),cudaMemcpyDeviceToHost)); // this line has problem because the part_c array size / allocation . int ind; for(ind=0;ind < len;ind++) c += part_c[ind]; CUDA_SAFE_CALL(cudaFree(d_a)); CUDA_SAFE_CALL(cudaFree(d_b)); CUDA_SAFE_CALL(cudaFree(d_part_c)); free(part_c); data->retVal=c; printf("Exiting from device :%d \n",data->deviceId); } void init(int size) { int devCount; CUDA_SAFE_CALL(cudaGetDeviceCount(&devCount)); if(devCount < 2) { printf("Atleast 2 GPU's are needed :%d\n",devCount); exit(0); } printf("devices available\n"); int vlength=size; int ind; hVectA=(float*)malloc(vlen*sizeof(float)); hVectB=(float*)malloc(vlen*sizeof(float)); for(ind=0;ind < vlen;ind++) { hVectA[ind]=2; hVectB[ind]=2; } vector[0].deviceId = 0; vector[0].size =vlength/2; vector[0].a =hVectA; vector[0].b =hVectB; vector[1].deviceId = 1; vector[1].size =vlength/2; vector[1].a =hVectA + vlength/2 ; vector[1].b =hVectB + vlength/2 ; } extern "C" void hfree() { free(hVectA); free(hVectB); printf("host mem freed successfully\n"); }
6f8a2a0288436199e6fbc28c1da3002349e7eb47.hip
// !!! This is a file automatically generated by hipify!!! #include <fstream> #include "hip/hip_runtime.h" #include "helper_cuda.h" #include "tools.h" // Write in a file the information about GPUs compatible with CUDA void _get_gpu_info(char *url) { string s_url = url; ofstream data; data.open(s_url + "gpu_info.txt"); int device_count = 0; hipError_t error = hipGetDeviceCount(&device_count); if (error != hipSuccess) { data << "hipGetDeviceCount devolvi: " << error << "\n-> " << hipGetErrorString(error) << "\n"; data << "Resultado = FALLIDO\n"; return; } if (device_count == 0) data << "No hay dispositivos disponibles que soporten CUDA\n"; else data << "Detectado(s) " << device_count << " dispositivo(s) compatible(s) con CUDA\n"; int dev, driver_version = 0, runtime_version = 0; for (dev = 0; dev < device_count; ++dev) { hipSetDevice(dev); hipDeviceProp_t device_prop; hipGetDeviceProperties(&device_prop, dev); data << "\nDispositivo " << dev << ": \"" << device_prop.name << "\"\n"; hipDriverGetVersion(&driver_version); hipRuntimeGetVersion(&runtime_version); data << " - CUDA Driver Version / Runtime Version " << driver_version / 1000 << "." << (driver_version % 100) / 10 << " / " << runtime_version / 1000 << "." << (runtime_version % 100) / 10 << "\n"; data << " - CUDA Capability Major/Minor version number: " << device_prop.major << "." << device_prop.minor << "\n"; data << " - Total amount of global memory: " << device_prop.totalGlobalMem / 1048576.0f << " MBytes (" << device_prop.totalGlobalMem << " bytes)\n"; data << " - (" << device_prop.multiProcessorCount << ") Multiprocessors, (" << _ConvertSMVer2Cores(device_prop.major, device_prop.minor) << ") CUDA Cores/MP: " << _ConvertSMVer2Cores(device_prop.major, device_prop.minor) * device_prop.multiProcessorCount << " CUDA Cores\n"; data << " - Total amount of constant memory: " << device_prop.totalConstMem << " bytes\n"; data << " - Total amount of shared memory per block: " << device_prop.sharedMemPerBlock << " bytes\n"; data << " - Total number of registers available per block: " << device_prop.regsPerBlock << "\n"; data << " - Warp size: " << device_prop.warpSize << "\n"; data << " - Maximum number of threads per multiprocessor: " << device_prop.maxThreadsPerMultiProcessor << "\n"; data << " - Maximum number of threads per block: " << device_prop.maxThreadsPerBlock << "\n"; data << " - Max dimension size of a thread block (x,y,z): (" << device_prop.maxThreadsDim[0] << ", " << device_prop.maxThreadsDim[1] << ", " << device_prop.maxThreadsDim[2] << ")\n"; data << " - Max dimension size of a grid size (x,y,z): (" << device_prop.maxGridSize[0] << ", " << device_prop.maxGridSize[1] << ", " << device_prop.maxGridSize[2] << ")\n"; data << " - memory pitch: " << device_prop.memPitch << " bytes\n"; } data << "\nResultado = EXITOSO\n"; data.close(); } // Write information about custom error codes in a file void _get_error_codes(char *url) { string s_url = url; ofstream data; //data.open("error_codes.txt"); data.open(s_url + "error_codes.txt"); data << "CDIGOS ERRORES GENERALES\n"; data << "======================================\n"; data << "0 Todo correcto\n"; data << "-1 Error no controlado\n"; data << "\nCDIGOS ERRORES UTILIDADES\n"; data << "======================================\n"; data << "100 No existe el dispositivo (GPU) indicado\n"; data << "101 La GPU indicada existe, pero ha habido un problema al asignarla\n"; data << "102 Error al obtener el nmero de GPUs disponibles\n"; data << "103 La GPU indicada existe, pero ha habido un problema al reiniciarla\n"; data << "104 Error al asignar la GPU indicada para posteriormente reiniciarla\n"; data << "105 No hay dispositivos compatibles con CUDA para reiniciar\n"; data << "\nCDIGOS ERRORES CUDA\n"; data << "======================================\n"; data << "200 Error al reservar memoria para la imagen src del device\n"; data << "201 Error al reservar memoria para la imagen dst del device\n"; data << "202 Error al copiar la imagen src del host al device\n"; data << "203 Error al copiar la imagen src del device al dst del host\n"; data << "204 Error al liberar memoria de la imagen src del device\n"; data << "205 Error al liberar memoria de la imagen dst del device\n"; data.close(); } // Returns the number of GPU devices supported by CUDA void _get_cuda_available_devices(int *num_cuda_devices, int *error_code) { hipError_t error = hipGetDeviceCount(num_cuda_devices); if (error != hipSuccess) { *error_code = CODE_ERROR_102; exit(EXIT_FAILURE); } else *error_code = CODE_OK; } // Assign the indicated GPU device to use in the CUDA program void _set_device(int device_id, int *error_code) { int num_devices; _get_cuda_available_devices(&num_devices, error_code); if (device_id < 0 || device_id >= num_devices) *error_code = CODE_ERROR_100; else { hipError_t error = hipSetDevice(device_id); if (error != hipSuccess) { *error_code = CODE_ERROR_101; exit(EXIT_FAILURE); } else *error_code = CODE_OK; } } // Restart the indicated GPU device, first assign it and then restart it void _reset_device(int device_id, int* error_code) { int num_devices; _get_cuda_available_devices(&num_devices, error_code); if (device_id < 0 || device_id >= num_devices) *error_code = CODE_ERROR_100; else { hipError_t error = hipSetDevice(device_id); // Check that the device assignment is correct if (error != hipSuccess) { *error_code = CODE_ERROR_104; exit(EXIT_FAILURE); } else { error = hipDeviceReset(); // Check that the device restart is correct if (error != hipSuccess) { *error_code = CODE_ERROR_103; exit(EXIT_FAILURE); } else *error_code = CODE_OK; } } } // Restart all available GPU devices and assign the first void _reset_all_devices(int* error_code) { int num_devices; _get_cuda_available_devices(&num_devices, error_code); if (num_devices < 1) *error_code = CODE_ERROR_105; else { for (int dev = 0; dev < num_devices; dev++) _reset_device(dev, error_code); _set_device(0, error_code); *error_code = CODE_OK; } }
6f8a2a0288436199e6fbc28c1da3002349e7eb47.cu
#include <fstream> #include "cuda_runtime.h" #include "helper_cuda.h" #include "tools.h" // Write in a file the information about GPUs compatible with CUDA void _get_gpu_info(char *url) { string s_url = url; ofstream data; data.open(s_url + "gpu_info.txt"); int device_count = 0; cudaError_t error = cudaGetDeviceCount(&device_count); if (error != cudaSuccess) { data << "cudaGetDeviceCount devolvió: " << error << "\n-> " << cudaGetErrorString(error) << "\n"; data << "Resultado = FALLIDO\n"; return; } if (device_count == 0) data << "No hay dispositivos disponibles que soporten CUDA\n"; else data << "Detectado(s) " << device_count << " dispositivo(s) compatible(s) con CUDA\n"; int dev, driver_version = 0, runtime_version = 0; for (dev = 0; dev < device_count; ++dev) { cudaSetDevice(dev); cudaDeviceProp device_prop; cudaGetDeviceProperties(&device_prop, dev); data << "\nDispositivo " << dev << ": \"" << device_prop.name << "\"\n"; cudaDriverGetVersion(&driver_version); cudaRuntimeGetVersion(&runtime_version); data << " - CUDA Driver Version / Runtime Version " << driver_version / 1000 << "." << (driver_version % 100) / 10 << " / " << runtime_version / 1000 << "." << (runtime_version % 100) / 10 << "\n"; data << " - CUDA Capability Major/Minor version number: " << device_prop.major << "." << device_prop.minor << "\n"; data << " - Total amount of global memory: " << device_prop.totalGlobalMem / 1048576.0f << " MBytes (" << device_prop.totalGlobalMem << " bytes)\n"; data << " - (" << device_prop.multiProcessorCount << ") Multiprocessors, (" << _ConvertSMVer2Cores(device_prop.major, device_prop.minor) << ") CUDA Cores/MP: " << _ConvertSMVer2Cores(device_prop.major, device_prop.minor) * device_prop.multiProcessorCount << " CUDA Cores\n"; data << " - Total amount of constant memory: " << device_prop.totalConstMem << " bytes\n"; data << " - Total amount of shared memory per block: " << device_prop.sharedMemPerBlock << " bytes\n"; data << " - Total number of registers available per block: " << device_prop.regsPerBlock << "\n"; data << " - Warp size: " << device_prop.warpSize << "\n"; data << " - Maximum number of threads per multiprocessor: " << device_prop.maxThreadsPerMultiProcessor << "\n"; data << " - Maximum number of threads per block: " << device_prop.maxThreadsPerBlock << "\n"; data << " - Max dimension size of a thread block (x,y,z): (" << device_prop.maxThreadsDim[0] << ", " << device_prop.maxThreadsDim[1] << ", " << device_prop.maxThreadsDim[2] << ")\n"; data << " - Max dimension size of a grid size (x,y,z): (" << device_prop.maxGridSize[0] << ", " << device_prop.maxGridSize[1] << ", " << device_prop.maxGridSize[2] << ")\n"; data << " - memory pitch: " << device_prop.memPitch << " bytes\n"; } data << "\nResultado = EXITOSO\n"; data.close(); } // Write information about custom error codes in a file void _get_error_codes(char *url) { string s_url = url; ofstream data; //data.open("error_codes.txt"); data.open(s_url + "error_codes.txt"); data << "CÓDIGOS ERRORES GENERALES\n"; data << "======================================\n"; data << "0 Todo correcto\n"; data << "-1 Error no controlado\n"; data << "\nCÓDIGOS ERRORES UTILIDADES\n"; data << "======================================\n"; data << "100 No existe el dispositivo (GPU) indicado\n"; data << "101 La GPU indicada existe, pero ha habido un problema al asignarla\n"; data << "102 Error al obtener el número de GPUs disponibles\n"; data << "103 La GPU indicada existe, pero ha habido un problema al reiniciarla\n"; data << "104 Error al asignar la GPU indicada para posteriormente reiniciarla\n"; data << "105 No hay dispositivos compatibles con CUDA para reiniciar\n"; data << "\nCÓDIGOS ERRORES CUDA\n"; data << "======================================\n"; data << "200 Error al reservar memoria para la imagen src del device\n"; data << "201 Error al reservar memoria para la imagen dst del device\n"; data << "202 Error al copiar la imagen src del host al device\n"; data << "203 Error al copiar la imagen src del device al dst del host\n"; data << "204 Error al liberar memoria de la imagen src del device\n"; data << "205 Error al liberar memoria de la imagen dst del device\n"; data.close(); } // Returns the number of GPU devices supported by CUDA void _get_cuda_available_devices(int *num_cuda_devices, int *error_code) { cudaError_t error = cudaGetDeviceCount(num_cuda_devices); if (error != cudaSuccess) { *error_code = CODE_ERROR_102; exit(EXIT_FAILURE); } else *error_code = CODE_OK; } // Assign the indicated GPU device to use in the CUDA program void _set_device(int device_id, int *error_code) { int num_devices; _get_cuda_available_devices(&num_devices, error_code); if (device_id < 0 || device_id >= num_devices) *error_code = CODE_ERROR_100; else { cudaError_t error = cudaSetDevice(device_id); if (error != cudaSuccess) { *error_code = CODE_ERROR_101; exit(EXIT_FAILURE); } else *error_code = CODE_OK; } } // Restart the indicated GPU device, first assign it and then restart it void _reset_device(int device_id, int* error_code) { int num_devices; _get_cuda_available_devices(&num_devices, error_code); if (device_id < 0 || device_id >= num_devices) *error_code = CODE_ERROR_100; else { cudaError_t error = cudaSetDevice(device_id); // Check that the device assignment is correct if (error != cudaSuccess) { *error_code = CODE_ERROR_104; exit(EXIT_FAILURE); } else { error = cudaDeviceReset(); // Check that the device restart is correct if (error != cudaSuccess) { *error_code = CODE_ERROR_103; exit(EXIT_FAILURE); } else *error_code = CODE_OK; } } } // Restart all available GPU devices and assign the first void _reset_all_devices(int* error_code) { int num_devices; _get_cuda_available_devices(&num_devices, error_code); if (num_devices < 1) *error_code = CODE_ERROR_105; else { for (int dev = 0; dev < num_devices; dev++) _reset_device(dev, error_code); _set_device(0, error_code); *error_code = CODE_OK; } }
919b74aa929977e26bef1334dcfccedcc6c044e6.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include "paddle/fluid/operators/amp/amp_check_finite_and_scale_op.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T> __global__ void AmpCheckFiniteAndScale(const T* in, const T* scale, int num, bool* found_inf, T* out) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < num) { if (!isfinite(in[idx])) { *found_inf = 1; } out[idx] = *found_inf ? in[idx] : in[idx] * scale[0]; } } template <typename T> class AmpCheckFiniteAndScaleKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); const auto xs = ctx.MultiInput<framework::Tensor>("X"); const auto* scale = ctx.Input<framework::Tensor>("Scale"); auto outs = ctx.MultiOutput<framework::Tensor>("Out"); auto* found_inf = ctx.Output<framework::Tensor>("FoundInfinite"); const T* scale_data = scale->data<T>(); bool* found_inf_data = found_inf->mutable_data<bool>(dev_ctx.GetPlace()); hipMemset(found_inf_data, false, found_inf->numel() * sizeof(bool)); for (size_t i = 0; i < xs.size(); ++i) { const auto* x = xs[i]; auto* out = outs[i]; const T* x_data = x->data<T>(); T* out_data = out->mutable_data<T>(dev_ctx.GetPlace()); int num = x->numel(); int block = 512; int grid = (num + block - 1) / block; VLOG(3) << "launch kernel"; hipLaunchKernelGGL(( AmpCheckFiniteAndScale<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), x_data, scale_data, num, found_inf_data, out_data); VLOG(3) << "finish kernel"; } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( amp_check_finite_and_scale, ops::AmpCheckFiniteAndScaleKernel<paddle::platform::CUDADeviceContext, float>, ops::AmpCheckFiniteAndScaleKernel<paddle::platform::CUDADeviceContext, double>);
919b74aa929977e26bef1334dcfccedcc6c044e6.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda.h> #include "paddle/fluid/operators/amp/amp_check_finite_and_scale_op.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T> __global__ void AmpCheckFiniteAndScale(const T* in, const T* scale, int num, bool* found_inf, T* out) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < num) { if (!isfinite(in[idx])) { *found_inf = 1; } out[idx] = *found_inf ? in[idx] : in[idx] * scale[0]; } } template <typename T> class AmpCheckFiniteAndScaleKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const { auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); const auto xs = ctx.MultiInput<framework::Tensor>("X"); const auto* scale = ctx.Input<framework::Tensor>("Scale"); auto outs = ctx.MultiOutput<framework::Tensor>("Out"); auto* found_inf = ctx.Output<framework::Tensor>("FoundInfinite"); const T* scale_data = scale->data<T>(); bool* found_inf_data = found_inf->mutable_data<bool>(dev_ctx.GetPlace()); cudaMemset(found_inf_data, false, found_inf->numel() * sizeof(bool)); for (size_t i = 0; i < xs.size(); ++i) { const auto* x = xs[i]; auto* out = outs[i]; const T* x_data = x->data<T>(); T* out_data = out->mutable_data<T>(dev_ctx.GetPlace()); int num = x->numel(); int block = 512; int grid = (num + block - 1) / block; VLOG(3) << "launch kernel"; AmpCheckFiniteAndScale<T><<<grid, block, 0, dev_ctx.stream()>>>( x_data, scale_data, num, found_inf_data, out_data); VLOG(3) << "finish kernel"; } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( amp_check_finite_and_scale, ops::AmpCheckFiniteAndScaleKernel<paddle::platform::CUDADeviceContext, float>, ops::AmpCheckFiniteAndScaleKernel<paddle::platform::CUDADeviceContext, double>);
30601a744985eb19f8816aa5fa68819addff12b0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define N 10 __global__ void add(int *a, int *b, int *c){ int bid = blockIdx.x; if(bid < N) c[bid] = a[bid] + b[bid]; } int main(void) { int i, a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; hipMalloc((void**) &dev_a, N*sizeof(int)); hipMalloc((void**) &dev_b, N*sizeof(int)); hipMalloc((void**) &dev_c, N*sizeof(int)); for(i=0; i<N; i++){ a[i] = -i; b[i] = i*i; } hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c); hipMemcpy(c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost); printf("\na + b = c\n"); for(i = 0; i<N; i++){ printf("%5d + %5d = %5d\n", a[i], b[i], c[i]); } hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); }
30601a744985eb19f8816aa5fa68819addff12b0.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #define N 10 __global__ void add(int *a, int *b, int *c){ int bid = blockIdx.x; if(bid < N) c[bid] = a[bid] + b[bid]; } int main(void) { int i, a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; cudaMalloc((void**) &dev_a, N*sizeof(int)); cudaMalloc((void**) &dev_b, N*sizeof(int)); cudaMalloc((void**) &dev_c, N*sizeof(int)); for(i=0; i<N; i++){ a[i] = -i; b[i] = i*i; } cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); add<<<N,1>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost); printf("\na + b = c\n"); for(i = 0; i<N; i++){ printf("%5d + %5d = %5d\n", a[i], b[i], c[i]); } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); }
a8cd80dc0fbbecc38f82744ab20c0bd948037f5e.hip
// !!! This is a file automatically generated by hipify!!! #include <THHUNN/THHUNN.h> #include <TH/THHalf.h> #include <THH/THHNumerics.cuh> #include <THH/THHApply.cuh> template <typename T> struct hardtanhupdateOutput_functor { const T max_val_; const T min_val_; hardtanhupdateOutput_functor(T min_val, T max_val) : max_val_(max_val), min_val_(min_val) {} __device__ void operator()(T *output, const T *input) const { if (*input < min_val_) *output = min_val_; else if (*input > max_val_) *output = max_val_; else *output = *input; } __device__ void operator()(T *input) const { if (*input < min_val_) *input = min_val_; else if (*input > max_val_) *input = max_val_; } }; template <typename T> struct hardtanhupdateGradInput_functor { const T max_val_; const T min_val_; hardtanhupdateGradInput_functor(T min_val, T max_val) : max_val_(max_val), min_val_(min_val) {} __device__ void operator()(T *gradInput, const T *input, const T *gradOutput) const { if (*input <= min_val_ || *input >= max_val_) *gradInput = ScalarConvert<int, T>::to(0); else *gradInput = *gradOutput; } __device__ void operator()(T *gradInput, const T *input) const { if (*input <= min_val_ || *input >= max_val_) *gradInput = ScalarConvert<int, T>::to(0); } }; #include <THHUNN/generic/HardTanh.hip> #include <THH/THHGenerateFloatTypes.h>
a8cd80dc0fbbecc38f82744ab20c0bd948037f5e.cu
#include <THCUNN/THCUNN.h> #include <TH/THHalf.h> #include <THC/THCNumerics.cuh> #include <THC/THCApply.cuh> template <typename T> struct hardtanhupdateOutput_functor { const T max_val_; const T min_val_; hardtanhupdateOutput_functor(T min_val, T max_val) : max_val_(max_val), min_val_(min_val) {} __device__ void operator()(T *output, const T *input) const { if (*input < min_val_) *output = min_val_; else if (*input > max_val_) *output = max_val_; else *output = *input; } __device__ void operator()(T *input) const { if (*input < min_val_) *input = min_val_; else if (*input > max_val_) *input = max_val_; } }; template <typename T> struct hardtanhupdateGradInput_functor { const T max_val_; const T min_val_; hardtanhupdateGradInput_functor(T min_val, T max_val) : max_val_(max_val), min_val_(min_val) {} __device__ void operator()(T *gradInput, const T *input, const T *gradOutput) const { if (*input <= min_val_ || *input >= max_val_) *gradInput = ScalarConvert<int, T>::to(0); else *gradInput = *gradOutput; } __device__ void operator()(T *gradInput, const T *input) const { if (*input <= min_val_ || *input >= max_val_) *gradInput = ScalarConvert<int, T>::to(0); } }; #include <THCUNN/generic/HardTanh.cu> #include <THC/THCGenerateFloatTypes.h>
75eed49c31a320f3b4776a874551c16467bdc723.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vector> #include <iostream> #include <string> #include <set> #include <map> #include "../../nvmatrix/include/nvmatrix.cuh" #include "../../nvmatrix/include/nvmatrix_operators.cuh" #include "../../util/include/matrix.h" #include "../include/convnet.cuh" #include "../include/util.cuh" using namespace std; /* * ======================= * ConvNet * ======================= */ ConvNet::ConvNet(PyObject* layerParams, intv& deviceIDs, int minibatchSize, bool conserveMem) : Thread(true) { _deviceIDs = deviceIDs; _data = NULL; _bufferData = NULL; _bufferMinibatchIdx = -1; _bufferPassIdx = -1; _trainingProgress = 0; _totalPassesDone = 0; _conserveMem = conserveMem; _sync = new ThreadSynchronizer(deviceIDs.size() + 1); PyObjectV* layerList = pyDictGetValues(layerParams); std::sort(layerList->begin(), layerList->end(), LayerIDComparator()); _dataCopyPD = new PipeDispenserBlocking(DIVUP(_deviceIDs.size(),2)); // hard-coded for now initDataLayers(layerList); initGPUThreads(layerList); connectReplicas(); // Connect replicas to one another connectChildren(layerParams); // Connect forward/backward links in graph _numFwdTerminal = 0; // Execute post-initialization stuff for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) { for (int r = 0; r < it->second.size(); r++) { _numFwdTerminal += it->second[r]->getNext().size() == 0; if (it->second[r]->getNext().size() == 0) { printf("Fwd terminal: %s\n", it->second[r]->getName().c_str()); } it->second[r]->postInit(); } } // Find and count the terminal nodes in the backward pass for (int p = 0; p < getNumPasses(); p++) { set<Layer*> visited; _numBwdTerminal[p] = 0; for (int t = 0; t < _convNetThreads.size(); t++) { vector<CostLayer*>& cl = _convNetThreads[t]->getCostLayers(); for (int c = 0; c < cl.size(); c++) { findBwdTerminal(*cl[c], visited, _numBwdTerminal[p], p); } } } _dp = new DataProvider(minibatchSize); // Py_DECREF(layerList); delete layerList; } ConvNet::~ConvNet() { for (vector<ConvNetThread*>::const_iterator it = _convNetThreads.begin(); it != _convNetThreads.end(); ++it) { (*it)->getMessageQueue().enqueue(new Message(EXIT_CONVNET)); (*it)->join(); delete *it; } for (DataLayerVector::const_iterator it = _dataLayers.begin(); it != _dataLayers.end(); ++it) { delete *it; } for (intv::const_iterator it = _deviceIDs.begin(); it != _deviceIDs.end(); ++it) { DEVICE_MEMORY_MANAGER::destroyInstance(*it); } HOST_MEMORY_MANAGER::destroyInstance(); delete _sync; delete _dataCopyPD; delete _dp; } void ConvNet::stop() { getWorkerQueue().enqueue(new ExitWorker(*this)); join(); } PipeDispenser& ConvNet::getDataCopyPD() { return *_dataCopyPD; } void ConvNet::initDataLayers(PyObjectV* layerList) { for (int i = 0; i < layerList->size(); i++) { PyObject* paramsDict = layerList->at(i); std::string layerType = pyDictGetString(paramsDict, "type"); if (layerType == "data") { int numReplicas = pyDictGetInt(paramsDict, "numReplicas"); for (int r = 0; r < numReplicas; ++r) { DataLayer* dataLayer = new DataLayer(this, paramsDict, r); _dataLayers.push_back(dataLayer); _layerMap[dataLayer->getName()][r] = dataLayer; } } } } void ConvNet::initGPUThreads(PyObjectV* layerList) { // Initialize GPU worker threads for (int i = 0; i < _deviceIDs.size(); ++i) { ConvNetThread* cng = new ConvNetThread(layerList, _deviceIDs[i], i, this); _convNetThreads.push_back(cng); for (NameLayerMap::iterator it = cng->getLayerMap().begin(); it != cng->getLayerMap().end(); ++it) { const std::string& name = it->first; Layer* layer = it->second; _layerMap[name][layer->getReplicaID()] = layer; } } } void ConvNet::connectReplicas() { _numReplicasMax = 0; _numReplicasMin = 1 << 16; for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) { _numReplicasMax = max(_numReplicasMax, int(it->second.size())); _numReplicasMin = min(_numReplicasMin, int(it->second.size())); for (map<int,Layer*>::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) { Layer& l1 = *it2->second; for (map<int,Layer*>::iterator it3 = it->second.begin(); it3 != it->second.end(); ++it3) { Layer& l2 = *it3->second; l1.addReplica(l2); } } } } void ConvNet::connectChildren(PyObject* layerParams) { for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) { PyObject* paramsDict = PyDict_GetItemString(layerParams, it->first.c_str()); PyObject* inputList = PyDict_GetItemString(paramsDict, "inputs"); if (inputList != NULL) { // Iterate over "replicas" of this layer int numReplicas = _layerMap[it->first].size(); for (int i = 0; i < PyList_GET_SIZE(inputList); i++) { std::string inputName = PyString_AsString(PyList_GetItem(inputList, i)); int numReplicasPrev = _layerMap[inputName].size(); // How many replicas from the previous layer must this layer be connected to? int numInputReplicas = numReplicasPrev / numReplicas; for (int r = 0; r < numReplicas; r++) { for (int rp = r, ridx = 0; ridx < numInputReplicas; rp += numReplicas, ridx++) { it->second[r]->addPrev(*_layerMap[inputName][rp], ridx); _layerMap[inputName][rp]->addNext(*it->second[r]); } } } } } } void ConvNet::findBwdTerminal(Layer& l, set<Layer*>& visited, int& terminal, int passIdx) { if (visited.count(&l) == 0) { visited.insert(&l); if (l.isGradConsumer()) { bool hasPrevConsumer = false; if (l.getPrev().size() > 0) { for (int i = 0; i < l.getPrev()[0].size(); i++) { // Looking only at 0th replica is fine to see if you have // grad consumers below you. hasPrevConsumer |= l.getPrev()[0][i]->isGradConsumer(); } } if (!hasPrevConsumer || !l.isGradProducer() || (passIdx + 1 < l.getNumReplicasPrev() && l.getNumReplicasPrev() > l.getNumReplicas())) { terminal++; l.setBwdTerminal(passIdx); printf("found bwd terminal %s[%d] in passIdx=%d\n", l.getName().c_str(), l.getReplicaID(), passIdx); } else if (l.isGradProducer()) { for (int r = 0; r < l.getPrev().size(); r++) { for (int i = 0; i < l.getPrev()[r].size(); i++) { findBwdTerminal(*l.getPrev()[r][i], visited, terminal, passIdx); } } } } } } void* ConvNet::run() { for (vector<ConvNetThread*>::const_iterator it = _convNetThreads.begin(); it != _convNetThreads.end(); ++it) { (*it)->start(); } // The manager thread defaults to using the GPU of the first worker. // Put more logic here if this is inappropriate. NVMatrix::setDeviceID(_convNetThreads[0]->getDeviceID()); copyToGPU(); bool exit = false; while (!exit) { Worker* worker = _workerQueue.dequeue(); exit = worker->run(); delete worker; } return NULL; } Queue<Worker*>& ConvNet::getWorkerQueue() { return _workerQueue; } Queue<WorkResult*>& ConvNet::getResultQueue() { return _resultQueue; } DataProvider& ConvNet::getDataProvider() { return *_dp; } Layer& ConvNet::getLayer(std::string& name, int replicaID) { return *_layerMap[name][replicaID]; } void ConvNet::sendMessage(MESSAGES msg, bool sync) { sendMessage(new Message(msg), sync); } void ConvNet::sendMessage(Message* msg, bool sync) { for (int i = 0; i < _convNetThreads.size(); i++) { _convNetThreads[i]->getMessageQueue().enqueue(msg->clone()); } delete msg; if (sync) { syncWithChildren(); } } void ConvNet::copyToCPU() { sendMessage(COPY_TO_CPU, true); } void ConvNet::copyToGPU() { sendMessage(COPY_TO_GPU, false); } void ConvNet::updateWeights(int passIdx) { sendMessage(UPDATE_WEIGHTS, true); sendMessage(CONSTRAIN_WEIGHTS, true); } void ConvNet::reset(int passIdx) { sendMessage((passIdx % getNumPasses()) == 0 ? RESET : RESET_PASS_IDX, false); } void ConvNet::reset() { reset(0); } // Fprop given data void ConvNet::fprop(CPUData& data, int passIdx, PASS_TYPE passType) { reset(passIdx); // This is necessary because setData below could delete data. If there's // an outstanding copy request, this'll cause a segfault. for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->waitForCopyFinish(); } setData(data, passIdx); for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->fprop(passType, passIdx, false); } waitForTerminals(_numFwdTerminal, FPROP_TERMINAL); } // Fprop given minibatch idx void ConvNet::fprop(int miniIdx, int passIdx, PASS_TYPE passType) { reset(passIdx); bool fromBuffer = miniIdx == _bufferMinibatchIdx && passIdx == _bufferPassIdx; if (!fromBuffer) { // This is necessary because setData below could delete data. If there's // an outstanding copy request, this'll cause a segfault. for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->waitForCopyFinish(); } setData(_dp->getMinibatch(miniIdx), passIdx); } else { setDataFromBuffer(); } for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->fprop(passType, passIdx, fromBuffer); } if (passIdx == getNumPasses() - 1) { // Do double-buffering from next minibatch from the DataProvider setBuffer(miniIdx == _dp->getNumMinibatches() - 1 ? NULL : &_dp->getMinibatch(miniIdx + 1), miniIdx + 1, 0); } else { // Do double-buffering from next microbatch within current minibatch setBuffer(_data, miniIdx, passIdx + 1); } waitForTerminals(_numFwdTerminal, FPROP_TERMINAL); } void ConvNet::setDataFromBuffer() { if (_bufferData != _data) { delete _data; } _data = _bufferData; _bufferData = NULL; _bufferMinibatchIdx = -1; _bufferPassIdx = -1; } void ConvNet::setData(CPUData& data, int passIdx) { bool same = _data == _bufferData; if (&data != _data) { delete _data; } if (&data != _bufferData && !same) { delete _bufferData; _bufferData = NULL; _bufferMinibatchIdx = -1; _bufferPassIdx = -1; } _data = &data; for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->copyData(*_data, false, passIdx); } } void ConvNet::setBuffer(CPUData* bufferData, int bufferMinibatchIdx, int bufferPassIdx) { _bufferData = bufferData; _bufferMinibatchIdx = bufferMinibatchIdx; _bufferPassIdx = bufferPassIdx; if (bufferData != NULL) { for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->copyData(*_bufferData, true, bufferPassIdx); } } } CPUData& ConvNet::getData() { assert(_data != NULL); return *_data; } void ConvNet::bprop(int passIdx, PASS_TYPE passType) { _totalPassesDone++; sendMessage(new BpropStartMessage(passType, passIdx), false); waitForTerminals(_numBwdTerminal[passIdx], BPROP_TERMINAL); reset(passIdx + 1); } void ConvNet::waitForTerminals(int numMsgs, MESSAGES msgType) { for (int rcvd = 0; rcvd < numMsgs; rcvd++) { Message* m = _msgQueue.dequeue(); assert(m->getType() == msgType); delete m; } } // Same as getCost() but adds results to given cost and returns it Cost& ConvNet::getCost(Cost& cost) { Cost &tmp = getCost(); cost += tmp; delete &tmp; return cost; } Cost& ConvNet::getCost() { Cost& cost = *new Cost(); for (int t = 0; t < _convNetThreads.size(); t++) { Cost& tcost = _convNetThreads[t]->getCost(); cost += tcost; delete &tcost; } return cost; } double ConvNet::getCostValue() { Cost& cost = getCost(); double val = cost.getValue(); delete &cost; return val; } Queue<Message*>& ConvNet::getMessageQueue() { return _msgQueue; } intv& ConvNet::getDeviceIDs() { return _deviceIDs; } ThreadSynchronizer& ConvNet::getSync() { return *_sync; } void ConvNet::syncWithChildren() { sendMessage(SYNC, false); _sync->sync(); } int ConvNet::getTotalPassesDone() { return _totalPassesDone; } int ConvNet::getMinibatchSize() { return _dp->getMinibatchSize(); } int ConvNet::getNumReplicasMax() { return _numReplicasMax; } int ConvNet::getNumReplicasMin() { return _numReplicasMin; } int ConvNet::getNumPasses() { return _numReplicasMax / _numReplicasMin; } void ConvNet::setTrainingProgress(double progress) { _trainingProgress = progress; } double ConvNet::getTrainingProgress() const { return _trainingProgress; } bool ConvNet::isConserveMemory() { return _conserveMem; } /* * Gradient checking stuff */ void ConvNet::checkGradients() { _numFailures = 0; _numTests = 0; _baseErr = 0; for (int p = 0; p < getNumPasses(); ++p) { fprop(0, p, PASS_GC); _baseErr += getCostValue(); bprop(p, PASS_GC); } // We call grad check only on the first replica, // but because weights are aware of their fellow replicas, // we can simultaneously perturb the weights of all // replicas. for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) { map<int, Layer*>& layers = it->second; if (layers[0]->getDeviceID() >= 0 /*&& (layers[0]->getName() == "fc10")*/) { // If layer on GPU (data layers aren't) layers[0]->checkGradient(); } } cout << "------------------------" << endl; if (_numFailures > 0) { cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl; } else { cout << "ALL " << _numTests << " TESTS PASSED" << endl; } } // Copies to all replicas void ConvNet::checkGradient_copyWeightsToGPU(Matrix& weightsCPU, Weights& weights) { int d = NVMatrix::getDeviceID(); for (map<int, Weights*>::const_iterator it = weights.getReplicas().begin(); it != weights.getReplicas().end(); ++it) { NVMatrix::setDeviceID(it->second->getDeviceID()); it->second->getW().copyFromHost(weightsCPU); } NVMatrix::setDeviceID(d); } /* * name: weight matrix name * eps: finite difference step */ bool ConvNet::checkGradient(const std::string& name, float eps, Weights& weights) { Matrix numGrad(weights.getNumRows(), weights.getNumCols()); Matrix diff(numGrad); numGrad.apply(Matrix::ZERO); Matrix weightsCPU; weights.getW().copyToHost(weightsCPU, true); for(int i = 0; i < weights.getNumRows(); i++) { for (int j = 0; j < weights.getNumCols(); j++) { float v = weightsCPU(i,j); weightsCPU(i,j) += eps; checkGradient_copyWeightsToGPU(weightsCPU, weights); weightsCPU(i,j) = v; double err = 0; for (int p = 0; p < getNumPasses(); ++p) { // printf("trying fprop %d\n", p); fprop(0, p, PASS_GC); // printf(" success\n"); err += getCostValue(); } numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps); if (isnan((double)numGrad(i,j)) || isinf((double)numGrad(i,j))) { cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl; cout << "Consider reducing the sizes of the weights or finite difference steps." << endl; cout << "Exiting." << endl; exit(1); } checkGradient_copyWeightsToGPU(weightsCPU, weights); } } Matrix gradCPU; NVMatrix::setDeviceID(weights.getDeviceID()); map<int,NVMatrix*> mats; for (map<int, Weights*>::const_iterator it = weights.getReplicas().begin(); it != weights.getReplicas().end(); ++it) { mats[it->first] = &it->second->getGrad(); } weights.getReducer().reduce(mats, 1, false); weights.getGrad().copyToHost(gradCPU, true); gradCPU.scale(-1.0 / _data->getNumCases()); float analNorm = gradCPU.norm(); float numNorm = numGrad.norm(); numGrad.subtract(gradCPU, diff); float relErr = diff.norm() / analNorm; bool fail = relErr >= GC_REL_ERR_THRESH; if (fail || !GC_SUPPRESS_PASSES) { cout << "========================" << endl; printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str()); cout << "========================" << endl; cout << "Analytic:" << endl; gradCPU.print(0, 6, 0, 4); cout << "Numeric:" << endl; numGrad.print(0, 6, 0, 4); printf("Analytic norm: %e\n", analNorm); printf("Numeric norm: %e\n", numNorm); printf("Relative error: %e\n", relErr); } _numTests++; _numFailures += fail; return fail; } /* * ======================================================================================================= * ConvNetThread * ======================================================================================================= */ ConvNetThread::ConvNetThread(PyObjectV* layerList, int deviceID, int deviceIdx, ConvNet* convNet) // : Thread(true, getDeviceCPUs(deviceID)), _deviceID(deviceID), _convNet(convNet) { : Thread(true), _deviceID(deviceID), _convNet(convNet) { try { int numLayers = layerList->size(); for (int i = 0; i < numLayers; i++) { PyObject* paramsDict = layerList->at(i); std::string layerType = pyDictGetString(paramsDict, "type"); if (layerType != "data") { intv& gpus = *pyDictGetIntV(paramsDict, "gpu"); int rid = indexOf(gpus, deviceIdx); if (rid >= 0) { initLayer(paramsDict, rid); } delete &gpus; } } } catch (std::string& s) { cout << "Error creating ConvNet: " << s << endl; exit(1); } } ConvNetThread::~ConvNetThread() { NVMatrix::setDeviceID(_deviceID); NVMatrix::destroyCublas(); NVMatrix::destroyRandom(); for (NameLayerMap::const_iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { delete it->second; } _nameLayerMap.clear(); } void ConvNetThread::startTimer() { NVMatrix::syncStream(); _timer.start(); } double ConvNetThread::stopTimer() { NVMatrix::syncStream(); return _timer.stop(); } void ConvNetThread::initLayer(PyObject* paramsDict, int replicaID) { std::string type = pyDictGetString(paramsDict, "type"); std::string name = pyDictGetString(paramsDict, "name"); if (type == "fc") { _nameLayerMap[name] = new FCLayer(this, paramsDict, replicaID, false); } else if (type == "sfc") { _nameLayerMap[name] = new SplitFCLayer(this, paramsDict, replicaID, false); } else if (type == "conv") { _nameLayerMap[name] = new ConvLayer(this, paramsDict, replicaID); } else if (type == "local") { _nameLayerMap[name] = new LocalUnsharedLayer(this, paramsDict, replicaID); } else if (type == "pool") { _nameLayerMap[name] = &PoolLayer::make(this, paramsDict, replicaID); } else if (type == "cmpool") { _nameLayerMap[name] = &CrossMapPoolLayer::make(this, paramsDict, replicaID); } else if (type == "rnorm") { _nameLayerMap[name] = new ResponseNormLayer(this, paramsDict, replicaID); } else if (type == "cmrnorm") { _nameLayerMap[name] = new CrossMapResponseNormLayer(this, paramsDict, replicaID); } else if (type == "cnorm") { _nameLayerMap[name] = new ContrastNormLayer(this, paramsDict, replicaID); } else if (type == "softmax") { _nameLayerMap[name] = new SoftmaxLayer(this, paramsDict, replicaID); } else if (type == "chnavg") { _nameLayerMap[name] = new ChannelwiseAvgLayer(this, paramsDict, replicaID); } else if (type == "eltsum") { _nameLayerMap[name] = new EltwiseSumLayer(this, paramsDict, replicaID); } else if (type == "eltmax") { _nameLayerMap[name] = new EltwiseMaxLayer(this, paramsDict, replicaID); } else if (type == "neuron") { _nameLayerMap[name] = new NeuronLayer(this, paramsDict, replicaID); } else if (type == "nailbed") { _nameLayerMap[name] = new NailbedLayer(this, paramsDict, replicaID); } else if (type == "blur") { _nameLayerMap[name] = new GaussianBlurLayer(this, paramsDict, replicaID); } else if (type == "href") { _nameLayerMap[name] = new HorizontalReflectionLayer(this, paramsDict, replicaID); } else if (type == "resize") { _nameLayerMap[name] = new ResizeLayer(this, paramsDict, replicaID); } else if (type == "rgb2yuv") { _nameLayerMap[name] = new RGBToYUVLayer(this, paramsDict, replicaID); } else if (type == "rgb2lab") { _nameLayerMap[name] = new RGBToLABLayer(this, paramsDict, replicaID); } else if (type == "rscale") { _nameLayerMap[name] = new RandomScaleLayer(this, paramsDict, replicaID); } else if (type == "crop") { _nameLayerMap[name] = new CropLayer(this, paramsDict, replicaID); } else if (type == "concat") { _nameLayerMap[name] = new ConcatenationLayer(this, paramsDict, replicaID); } else if (type == "pass") { _nameLayerMap[name] = new PassThroughLayer(this, paramsDict, replicaID); } else if (type == "dropout") { _nameLayerMap[name] = new DropoutLayer(this, paramsDict, replicaID); } else if (type == "dropout2") { _nameLayerMap[name] = new Dropout2Layer(this, paramsDict, replicaID); } else if (type == "deconv.double") { _nameLayerMap[name] = new DeconvLocalDoubleLayer(this, paramsDict, replicaID); } else if (type == "prelu") { _nameLayerMap[name] = new PReluLayer(this, paramsDict, replicaID, true); } else if (strncmp(type.c_str(), "cost.", 5) == 0) { CostLayer *c = &CostLayer::make(this, paramsDict, type, replicaID); _nameLayerMap[name] = c; _costs.push_back(c); } else { throw std::string("Unknown layer type ") + type; } } /* * This executes in a new CPU thread so it's OK to initialize CUDA stuff here. */ void ConvNetThread::initCuda() { NVMatrix::setDeviceID(_deviceID); checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferShared)); for (int i = 0; i < _convNet->getDeviceIDs().size(); i++) { int d = _convNet->getDeviceIDs()[i]; if (d != _deviceID) { if (NVMatrix::canAccessPeer(_deviceID, d)) { printf("Enabling peer access GPU %d --> GPU %d\n", NVMatrix::getDeviceID(), d); checkCudaErrors(hipDeviceEnablePeerAccess(d, 0)); } else { printf("No peer access GPU %d --> GPU %d\n", _deviceID, d); } } } // NVMatrix::syncStream(); NVMatrix::initCublas(); NVMatrix::initRandom(/*7*/); srand(time(0)); } void* ConvNetThread::run() { initCuda(); bool exit = false; while (!exit) { Message* m = _msgQueue.dequeue(); if (m->getType() == FPROP_READY) { FpropMessage* msg = static_cast<FpropMessage*>(m); msg->getToLayer().fprop(msg->getPassType(), msg->getPassIdx()); } else if (m->getType() == BPROP_READY) { BpropMessage* msg = static_cast<BpropMessage*>(m); msg->getToLayer().incRcvdBInputMsgs(); msg->getToLayer().bprop(msg->getPassType(), msg->getPassIdx()); } else if (m->getType() == BPROP_START) { BpropStartMessage* msg = static_cast<BpropStartMessage*>(m); for (int i = 0; i < _costs.size(); i++) { dynamic_cast<Layer*>(_costs[i])->bprop(msg->getPassType(), msg->getPassIdx()); } } else if (m->getType() == SYNC) { NVMatrix::syncStream(); _convNet->getSync().sync(); } else if (m->getType() == COPY_TO_CPU) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->copyToCPU(); } } else if (m->getType() == COPY_TO_GPU) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->copyToGPU(); } } else if (m->getType() == RESET) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->reset(); } } else if (m->getType() == RESET_PASS_IDX) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->resetPassIdx(); } } else if (m->getType() == UPDATE_WEIGHTS) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->updateWeights(); } } else if (m->getType() == CONSTRAIN_WEIGHTS) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->constrainWeights(); } } else if (m->getType() == EXIT_CONVNET) { exit = true; } delete m; } return NULL; } Cost& ConvNetThread::getCost() { // In a single ConvNetThread, all costs are guaranteed to be different // (i.e. not replicas of one another) return *new Cost(_costs); } Layer& ConvNetThread::getLayer(std::string& name) { return *_nameLayerMap[name]; } int ConvNetThread::getDeviceID() { return _deviceID; } Queue<Message*>& ConvNetThread::getMessageQueue() { return _msgQueue; } vector<CostLayer*>& ConvNetThread::getCostLayers() { return _costs; } NameLayerMap& ConvNetThread::getLayerMap() { return _nameLayerMap; } ConvNet& ConvNetThread::getConvNet() { return *_convNet; }
75eed49c31a320f3b4776a874551c16467bdc723.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vector> #include <iostream> #include <string> #include <set> #include <map> #include "../../nvmatrix/include/nvmatrix.cuh" #include "../../nvmatrix/include/nvmatrix_operators.cuh" #include "../../util/include/matrix.h" #include "../include/convnet.cuh" #include "../include/util.cuh" using namespace std; /* * ======================= * ConvNet * ======================= */ ConvNet::ConvNet(PyObject* layerParams, intv& deviceIDs, int minibatchSize, bool conserveMem) : Thread(true) { _deviceIDs = deviceIDs; _data = NULL; _bufferData = NULL; _bufferMinibatchIdx = -1; _bufferPassIdx = -1; _trainingProgress = 0; _totalPassesDone = 0; _conserveMem = conserveMem; _sync = new ThreadSynchronizer(deviceIDs.size() + 1); PyObjectV* layerList = pyDictGetValues(layerParams); std::sort(layerList->begin(), layerList->end(), LayerIDComparator()); _dataCopyPD = new PipeDispenserBlocking(DIVUP(_deviceIDs.size(),2)); // hard-coded for now initDataLayers(layerList); initGPUThreads(layerList); connectReplicas(); // Connect replicas to one another connectChildren(layerParams); // Connect forward/backward links in graph _numFwdTerminal = 0; // Execute post-initialization stuff for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) { for (int r = 0; r < it->second.size(); r++) { _numFwdTerminal += it->second[r]->getNext().size() == 0; if (it->second[r]->getNext().size() == 0) { printf("Fwd terminal: %s\n", it->second[r]->getName().c_str()); } it->second[r]->postInit(); } } // Find and count the terminal nodes in the backward pass for (int p = 0; p < getNumPasses(); p++) { set<Layer*> visited; _numBwdTerminal[p] = 0; for (int t = 0; t < _convNetThreads.size(); t++) { vector<CostLayer*>& cl = _convNetThreads[t]->getCostLayers(); for (int c = 0; c < cl.size(); c++) { findBwdTerminal(*cl[c], visited, _numBwdTerminal[p], p); } } } _dp = new DataProvider(minibatchSize); // Py_DECREF(layerList); delete layerList; } ConvNet::~ConvNet() { for (vector<ConvNetThread*>::const_iterator it = _convNetThreads.begin(); it != _convNetThreads.end(); ++it) { (*it)->getMessageQueue().enqueue(new Message(EXIT_CONVNET)); (*it)->join(); delete *it; } for (DataLayerVector::const_iterator it = _dataLayers.begin(); it != _dataLayers.end(); ++it) { delete *it; } for (intv::const_iterator it = _deviceIDs.begin(); it != _deviceIDs.end(); ++it) { DEVICE_MEMORY_MANAGER::destroyInstance(*it); } HOST_MEMORY_MANAGER::destroyInstance(); delete _sync; delete _dataCopyPD; delete _dp; } void ConvNet::stop() { getWorkerQueue().enqueue(new ExitWorker(*this)); join(); } PipeDispenser& ConvNet::getDataCopyPD() { return *_dataCopyPD; } void ConvNet::initDataLayers(PyObjectV* layerList) { for (int i = 0; i < layerList->size(); i++) { PyObject* paramsDict = layerList->at(i); std::string layerType = pyDictGetString(paramsDict, "type"); if (layerType == "data") { int numReplicas = pyDictGetInt(paramsDict, "numReplicas"); for (int r = 0; r < numReplicas; ++r) { DataLayer* dataLayer = new DataLayer(this, paramsDict, r); _dataLayers.push_back(dataLayer); _layerMap[dataLayer->getName()][r] = dataLayer; } } } } void ConvNet::initGPUThreads(PyObjectV* layerList) { // Initialize GPU worker threads for (int i = 0; i < _deviceIDs.size(); ++i) { ConvNetThread* cng = new ConvNetThread(layerList, _deviceIDs[i], i, this); _convNetThreads.push_back(cng); for (NameLayerMap::iterator it = cng->getLayerMap().begin(); it != cng->getLayerMap().end(); ++it) { const std::string& name = it->first; Layer* layer = it->second; _layerMap[name][layer->getReplicaID()] = layer; } } } void ConvNet::connectReplicas() { _numReplicasMax = 0; _numReplicasMin = 1 << 16; for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) { _numReplicasMax = max(_numReplicasMax, int(it->second.size())); _numReplicasMin = min(_numReplicasMin, int(it->second.size())); for (map<int,Layer*>::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) { Layer& l1 = *it2->second; for (map<int,Layer*>::iterator it3 = it->second.begin(); it3 != it->second.end(); ++it3) { Layer& l2 = *it3->second; l1.addReplica(l2); } } } } void ConvNet::connectChildren(PyObject* layerParams) { for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) { PyObject* paramsDict = PyDict_GetItemString(layerParams, it->first.c_str()); PyObject* inputList = PyDict_GetItemString(paramsDict, "inputs"); if (inputList != NULL) { // Iterate over "replicas" of this layer int numReplicas = _layerMap[it->first].size(); for (int i = 0; i < PyList_GET_SIZE(inputList); i++) { std::string inputName = PyString_AsString(PyList_GetItem(inputList, i)); int numReplicasPrev = _layerMap[inputName].size(); // How many replicas from the previous layer must this layer be connected to? int numInputReplicas = numReplicasPrev / numReplicas; for (int r = 0; r < numReplicas; r++) { for (int rp = r, ridx = 0; ridx < numInputReplicas; rp += numReplicas, ridx++) { it->second[r]->addPrev(*_layerMap[inputName][rp], ridx); _layerMap[inputName][rp]->addNext(*it->second[r]); } } } } } } void ConvNet::findBwdTerminal(Layer& l, set<Layer*>& visited, int& terminal, int passIdx) { if (visited.count(&l) == 0) { visited.insert(&l); if (l.isGradConsumer()) { bool hasPrevConsumer = false; if (l.getPrev().size() > 0) { for (int i = 0; i < l.getPrev()[0].size(); i++) { // Looking only at 0th replica is fine to see if you have // grad consumers below you. hasPrevConsumer |= l.getPrev()[0][i]->isGradConsumer(); } } if (!hasPrevConsumer || !l.isGradProducer() || (passIdx + 1 < l.getNumReplicasPrev() && l.getNumReplicasPrev() > l.getNumReplicas())) { terminal++; l.setBwdTerminal(passIdx); printf("found bwd terminal %s[%d] in passIdx=%d\n", l.getName().c_str(), l.getReplicaID(), passIdx); } else if (l.isGradProducer()) { for (int r = 0; r < l.getPrev().size(); r++) { for (int i = 0; i < l.getPrev()[r].size(); i++) { findBwdTerminal(*l.getPrev()[r][i], visited, terminal, passIdx); } } } } } } void* ConvNet::run() { for (vector<ConvNetThread*>::const_iterator it = _convNetThreads.begin(); it != _convNetThreads.end(); ++it) { (*it)->start(); } // The manager thread defaults to using the GPU of the first worker. // Put more logic here if this is inappropriate. NVMatrix::setDeviceID(_convNetThreads[0]->getDeviceID()); copyToGPU(); bool exit = false; while (!exit) { Worker* worker = _workerQueue.dequeue(); exit = worker->run(); delete worker; } return NULL; } Queue<Worker*>& ConvNet::getWorkerQueue() { return _workerQueue; } Queue<WorkResult*>& ConvNet::getResultQueue() { return _resultQueue; } DataProvider& ConvNet::getDataProvider() { return *_dp; } Layer& ConvNet::getLayer(std::string& name, int replicaID) { return *_layerMap[name][replicaID]; } void ConvNet::sendMessage(MESSAGES msg, bool sync) { sendMessage(new Message(msg), sync); } void ConvNet::sendMessage(Message* msg, bool sync) { for (int i = 0; i < _convNetThreads.size(); i++) { _convNetThreads[i]->getMessageQueue().enqueue(msg->clone()); } delete msg; if (sync) { syncWithChildren(); } } void ConvNet::copyToCPU() { sendMessage(COPY_TO_CPU, true); } void ConvNet::copyToGPU() { sendMessage(COPY_TO_GPU, false); } void ConvNet::updateWeights(int passIdx) { sendMessage(UPDATE_WEIGHTS, true); sendMessage(CONSTRAIN_WEIGHTS, true); } void ConvNet::reset(int passIdx) { sendMessage((passIdx % getNumPasses()) == 0 ? RESET : RESET_PASS_IDX, false); } void ConvNet::reset() { reset(0); } // Fprop given data void ConvNet::fprop(CPUData& data, int passIdx, PASS_TYPE passType) { reset(passIdx); // This is necessary because setData below could delete data. If there's // an outstanding copy request, this'll cause a segfault. for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->waitForCopyFinish(); } setData(data, passIdx); for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->fprop(passType, passIdx, false); } waitForTerminals(_numFwdTerminal, FPROP_TERMINAL); } // Fprop given minibatch idx void ConvNet::fprop(int miniIdx, int passIdx, PASS_TYPE passType) { reset(passIdx); bool fromBuffer = miniIdx == _bufferMinibatchIdx && passIdx == _bufferPassIdx; if (!fromBuffer) { // This is necessary because setData below could delete data. If there's // an outstanding copy request, this'll cause a segfault. for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->waitForCopyFinish(); } setData(_dp->getMinibatch(miniIdx), passIdx); } else { setDataFromBuffer(); } for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->fprop(passType, passIdx, fromBuffer); } if (passIdx == getNumPasses() - 1) { // Do double-buffering from next minibatch from the DataProvider setBuffer(miniIdx == _dp->getNumMinibatches() - 1 ? NULL : &_dp->getMinibatch(miniIdx + 1), miniIdx + 1, 0); } else { // Do double-buffering from next microbatch within current minibatch setBuffer(_data, miniIdx, passIdx + 1); } waitForTerminals(_numFwdTerminal, FPROP_TERMINAL); } void ConvNet::setDataFromBuffer() { if (_bufferData != _data) { delete _data; } _data = _bufferData; _bufferData = NULL; _bufferMinibatchIdx = -1; _bufferPassIdx = -1; } void ConvNet::setData(CPUData& data, int passIdx) { bool same = _data == _bufferData; if (&data != _data) { delete _data; } if (&data != _bufferData && !same) { delete _bufferData; _bufferData = NULL; _bufferMinibatchIdx = -1; _bufferPassIdx = -1; } _data = &data; for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->copyData(*_data, false, passIdx); } } void ConvNet::setBuffer(CPUData* bufferData, int bufferMinibatchIdx, int bufferPassIdx) { _bufferData = bufferData; _bufferMinibatchIdx = bufferMinibatchIdx; _bufferPassIdx = bufferPassIdx; if (bufferData != NULL) { for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->copyData(*_bufferData, true, bufferPassIdx); } } } CPUData& ConvNet::getData() { assert(_data != NULL); return *_data; } void ConvNet::bprop(int passIdx, PASS_TYPE passType) { _totalPassesDone++; sendMessage(new BpropStartMessage(passType, passIdx), false); waitForTerminals(_numBwdTerminal[passIdx], BPROP_TERMINAL); reset(passIdx + 1); } void ConvNet::waitForTerminals(int numMsgs, MESSAGES msgType) { for (int rcvd = 0; rcvd < numMsgs; rcvd++) { Message* m = _msgQueue.dequeue(); assert(m->getType() == msgType); delete m; } } // Same as getCost() but adds results to given cost and returns it Cost& ConvNet::getCost(Cost& cost) { Cost &tmp = getCost(); cost += tmp; delete &tmp; return cost; } Cost& ConvNet::getCost() { Cost& cost = *new Cost(); for (int t = 0; t < _convNetThreads.size(); t++) { Cost& tcost = _convNetThreads[t]->getCost(); cost += tcost; delete &tcost; } return cost; } double ConvNet::getCostValue() { Cost& cost = getCost(); double val = cost.getValue(); delete &cost; return val; } Queue<Message*>& ConvNet::getMessageQueue() { return _msgQueue; } intv& ConvNet::getDeviceIDs() { return _deviceIDs; } ThreadSynchronizer& ConvNet::getSync() { return *_sync; } void ConvNet::syncWithChildren() { sendMessage(SYNC, false); _sync->sync(); } int ConvNet::getTotalPassesDone() { return _totalPassesDone; } int ConvNet::getMinibatchSize() { return _dp->getMinibatchSize(); } int ConvNet::getNumReplicasMax() { return _numReplicasMax; } int ConvNet::getNumReplicasMin() { return _numReplicasMin; } int ConvNet::getNumPasses() { return _numReplicasMax / _numReplicasMin; } void ConvNet::setTrainingProgress(double progress) { _trainingProgress = progress; } double ConvNet::getTrainingProgress() const { return _trainingProgress; } bool ConvNet::isConserveMemory() { return _conserveMem; } /* * Gradient checking stuff */ void ConvNet::checkGradients() { _numFailures = 0; _numTests = 0; _baseErr = 0; for (int p = 0; p < getNumPasses(); ++p) { fprop(0, p, PASS_GC); _baseErr += getCostValue(); bprop(p, PASS_GC); } // We call grad check only on the first replica, // but because weights are aware of their fellow replicas, // we can simultaneously perturb the weights of all // replicas. for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) { map<int, Layer*>& layers = it->second; if (layers[0]->getDeviceID() >= 0 /*&& (layers[0]->getName() == "fc10")*/) { // If layer on GPU (data layers aren't) layers[0]->checkGradient(); } } cout << "------------------------" << endl; if (_numFailures > 0) { cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl; } else { cout << "ALL " << _numTests << " TESTS PASSED" << endl; } } // Copies to all replicas void ConvNet::checkGradient_copyWeightsToGPU(Matrix& weightsCPU, Weights& weights) { int d = NVMatrix::getDeviceID(); for (map<int, Weights*>::const_iterator it = weights.getReplicas().begin(); it != weights.getReplicas().end(); ++it) { NVMatrix::setDeviceID(it->second->getDeviceID()); it->second->getW().copyFromHost(weightsCPU); } NVMatrix::setDeviceID(d); } /* * name: weight matrix name * eps: finite difference step */ bool ConvNet::checkGradient(const std::string& name, float eps, Weights& weights) { Matrix numGrad(weights.getNumRows(), weights.getNumCols()); Matrix diff(numGrad); numGrad.apply(Matrix::ZERO); Matrix weightsCPU; weights.getW().copyToHost(weightsCPU, true); for(int i = 0; i < weights.getNumRows(); i++) { for (int j = 0; j < weights.getNumCols(); j++) { float v = weightsCPU(i,j); weightsCPU(i,j) += eps; checkGradient_copyWeightsToGPU(weightsCPU, weights); weightsCPU(i,j) = v; double err = 0; for (int p = 0; p < getNumPasses(); ++p) { // printf("trying fprop %d\n", p); fprop(0, p, PASS_GC); // printf(" success\n"); err += getCostValue(); } numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps); if (isnan((double)numGrad(i,j)) || isinf((double)numGrad(i,j))) { cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl; cout << "Consider reducing the sizes of the weights or finite difference steps." << endl; cout << "Exiting." << endl; exit(1); } checkGradient_copyWeightsToGPU(weightsCPU, weights); } } Matrix gradCPU; NVMatrix::setDeviceID(weights.getDeviceID()); map<int,NVMatrix*> mats; for (map<int, Weights*>::const_iterator it = weights.getReplicas().begin(); it != weights.getReplicas().end(); ++it) { mats[it->first] = &it->second->getGrad(); } weights.getReducer().reduce(mats, 1, false); weights.getGrad().copyToHost(gradCPU, true); gradCPU.scale(-1.0 / _data->getNumCases()); float analNorm = gradCPU.norm(); float numNorm = numGrad.norm(); numGrad.subtract(gradCPU, diff); float relErr = diff.norm() / analNorm; bool fail = relErr >= GC_REL_ERR_THRESH; if (fail || !GC_SUPPRESS_PASSES) { cout << "========================" << endl; printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str()); cout << "========================" << endl; cout << "Analytic:" << endl; gradCPU.print(0, 6, 0, 4); cout << "Numeric:" << endl; numGrad.print(0, 6, 0, 4); printf("Analytic norm: %e\n", analNorm); printf("Numeric norm: %e\n", numNorm); printf("Relative error: %e\n", relErr); } _numTests++; _numFailures += fail; return fail; } /* * ======================================================================================================= * ConvNetThread * ======================================================================================================= */ ConvNetThread::ConvNetThread(PyObjectV* layerList, int deviceID, int deviceIdx, ConvNet* convNet) // : Thread(true, getDeviceCPUs(deviceID)), _deviceID(deviceID), _convNet(convNet) { : Thread(true), _deviceID(deviceID), _convNet(convNet) { try { int numLayers = layerList->size(); for (int i = 0; i < numLayers; i++) { PyObject* paramsDict = layerList->at(i); std::string layerType = pyDictGetString(paramsDict, "type"); if (layerType != "data") { intv& gpus = *pyDictGetIntV(paramsDict, "gpu"); int rid = indexOf(gpus, deviceIdx); if (rid >= 0) { initLayer(paramsDict, rid); } delete &gpus; } } } catch (std::string& s) { cout << "Error creating ConvNet: " << s << endl; exit(1); } } ConvNetThread::~ConvNetThread() { NVMatrix::setDeviceID(_deviceID); NVMatrix::destroyCublas(); NVMatrix::destroyRandom(); for (NameLayerMap::const_iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { delete it->second; } _nameLayerMap.clear(); } void ConvNetThread::startTimer() { NVMatrix::syncStream(); _timer.start(); } double ConvNetThread::stopTimer() { NVMatrix::syncStream(); return _timer.stop(); } void ConvNetThread::initLayer(PyObject* paramsDict, int replicaID) { std::string type = pyDictGetString(paramsDict, "type"); std::string name = pyDictGetString(paramsDict, "name"); if (type == "fc") { _nameLayerMap[name] = new FCLayer(this, paramsDict, replicaID, false); } else if (type == "sfc") { _nameLayerMap[name] = new SplitFCLayer(this, paramsDict, replicaID, false); } else if (type == "conv") { _nameLayerMap[name] = new ConvLayer(this, paramsDict, replicaID); } else if (type == "local") { _nameLayerMap[name] = new LocalUnsharedLayer(this, paramsDict, replicaID); } else if (type == "pool") { _nameLayerMap[name] = &PoolLayer::make(this, paramsDict, replicaID); } else if (type == "cmpool") { _nameLayerMap[name] = &CrossMapPoolLayer::make(this, paramsDict, replicaID); } else if (type == "rnorm") { _nameLayerMap[name] = new ResponseNormLayer(this, paramsDict, replicaID); } else if (type == "cmrnorm") { _nameLayerMap[name] = new CrossMapResponseNormLayer(this, paramsDict, replicaID); } else if (type == "cnorm") { _nameLayerMap[name] = new ContrastNormLayer(this, paramsDict, replicaID); } else if (type == "softmax") { _nameLayerMap[name] = new SoftmaxLayer(this, paramsDict, replicaID); } else if (type == "chnavg") { _nameLayerMap[name] = new ChannelwiseAvgLayer(this, paramsDict, replicaID); } else if (type == "eltsum") { _nameLayerMap[name] = new EltwiseSumLayer(this, paramsDict, replicaID); } else if (type == "eltmax") { _nameLayerMap[name] = new EltwiseMaxLayer(this, paramsDict, replicaID); } else if (type == "neuron") { _nameLayerMap[name] = new NeuronLayer(this, paramsDict, replicaID); } else if (type == "nailbed") { _nameLayerMap[name] = new NailbedLayer(this, paramsDict, replicaID); } else if (type == "blur") { _nameLayerMap[name] = new GaussianBlurLayer(this, paramsDict, replicaID); } else if (type == "href") { _nameLayerMap[name] = new HorizontalReflectionLayer(this, paramsDict, replicaID); } else if (type == "resize") { _nameLayerMap[name] = new ResizeLayer(this, paramsDict, replicaID); } else if (type == "rgb2yuv") { _nameLayerMap[name] = new RGBToYUVLayer(this, paramsDict, replicaID); } else if (type == "rgb2lab") { _nameLayerMap[name] = new RGBToLABLayer(this, paramsDict, replicaID); } else if (type == "rscale") { _nameLayerMap[name] = new RandomScaleLayer(this, paramsDict, replicaID); } else if (type == "crop") { _nameLayerMap[name] = new CropLayer(this, paramsDict, replicaID); } else if (type == "concat") { _nameLayerMap[name] = new ConcatenationLayer(this, paramsDict, replicaID); } else if (type == "pass") { _nameLayerMap[name] = new PassThroughLayer(this, paramsDict, replicaID); } else if (type == "dropout") { _nameLayerMap[name] = new DropoutLayer(this, paramsDict, replicaID); } else if (type == "dropout2") { _nameLayerMap[name] = new Dropout2Layer(this, paramsDict, replicaID); } else if (type == "deconv.double") { _nameLayerMap[name] = new DeconvLocalDoubleLayer(this, paramsDict, replicaID); } else if (type == "prelu") { _nameLayerMap[name] = new PReluLayer(this, paramsDict, replicaID, true); } else if (strncmp(type.c_str(), "cost.", 5) == 0) { CostLayer *c = &CostLayer::make(this, paramsDict, type, replicaID); _nameLayerMap[name] = c; _costs.push_back(c); } else { throw std::string("Unknown layer type ") + type; } } /* * This executes in a new CPU thread so it's OK to initialize CUDA stuff here. */ void ConvNetThread::initCuda() { NVMatrix::setDeviceID(_deviceID); checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared)); for (int i = 0; i < _convNet->getDeviceIDs().size(); i++) { int d = _convNet->getDeviceIDs()[i]; if (d != _deviceID) { if (NVMatrix::canAccessPeer(_deviceID, d)) { printf("Enabling peer access GPU %d --> GPU %d\n", NVMatrix::getDeviceID(), d); checkCudaErrors(cudaDeviceEnablePeerAccess(d, 0)); } else { printf("No peer access GPU %d --> GPU %d\n", _deviceID, d); } } } // NVMatrix::syncStream(); NVMatrix::initCublas(); NVMatrix::initRandom(/*7*/); srand(time(0)); } void* ConvNetThread::run() { initCuda(); bool exit = false; while (!exit) { Message* m = _msgQueue.dequeue(); if (m->getType() == FPROP_READY) { FpropMessage* msg = static_cast<FpropMessage*>(m); msg->getToLayer().fprop(msg->getPassType(), msg->getPassIdx()); } else if (m->getType() == BPROP_READY) { BpropMessage* msg = static_cast<BpropMessage*>(m); msg->getToLayer().incRcvdBInputMsgs(); msg->getToLayer().bprop(msg->getPassType(), msg->getPassIdx()); } else if (m->getType() == BPROP_START) { BpropStartMessage* msg = static_cast<BpropStartMessage*>(m); for (int i = 0; i < _costs.size(); i++) { dynamic_cast<Layer*>(_costs[i])->bprop(msg->getPassType(), msg->getPassIdx()); } } else if (m->getType() == SYNC) { NVMatrix::syncStream(); _convNet->getSync().sync(); } else if (m->getType() == COPY_TO_CPU) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->copyToCPU(); } } else if (m->getType() == COPY_TO_GPU) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->copyToGPU(); } } else if (m->getType() == RESET) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->reset(); } } else if (m->getType() == RESET_PASS_IDX) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->resetPassIdx(); } } else if (m->getType() == UPDATE_WEIGHTS) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->updateWeights(); } } else if (m->getType() == CONSTRAIN_WEIGHTS) { for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) { it->second->constrainWeights(); } } else if (m->getType() == EXIT_CONVNET) { exit = true; } delete m; } return NULL; } Cost& ConvNetThread::getCost() { // In a single ConvNetThread, all costs are guaranteed to be different // (i.e. not replicas of one another) return *new Cost(_costs); } Layer& ConvNetThread::getLayer(std::string& name) { return *_nameLayerMap[name]; } int ConvNetThread::getDeviceID() { return _deviceID; } Queue<Message*>& ConvNetThread::getMessageQueue() { return _msgQueue; } vector<CostLayer*>& ConvNetThread::getCostLayers() { return _costs; } NameLayerMap& ConvNetThread::getLayerMap() { return _nameLayerMap; } ConvNet& ConvNetThread::getConvNet() { return *_convNet; }
03583aa7457b92b3b03ac67709ef4440053a4370.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> int log2(int i) { int r = 0; while (i >>= 1) r++; return r; } int bit_reverse(int w, int bits) { int r = 0; for (int i = 0; i < bits; i++) { int bit = (w & (1 << i)) >> i; r |= bit << (bits - i - 1); } return r; } /* Using device 0: NVIDIA Tegra X1; global mem: 2076037120B; compute v5.3; clock: 921600 kHz Running naive histo bin 0: count 7 bin 1: count 7 bin 2: count 6 bin 3: count 6 bin 4: count 7 bin 5: count 6 bin 6: count 7 bin 7: count 6 bin 8: count 7 bin 9: count 7 bin 10: count 7 bin 11: count 7 bin 12: count 7 bin 13: count 6 bin 14: count 6 bin 15: count 8 incorrect due to race condition in d_bins[myBin]++ this does not happen in serial code as each thread runs separately e.g. BIN with value 5, and thread 1 and 2 wants to increase it - thread 1 reads 5, increase to 6, write 6 back to bin - thread 2 reads 5, increase to 6, write 6 back to bin - but actual answer is 7 */ __global__ void naive_histo(int *d_bins, const int *d_in, const int BIN_COUNT) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int myItem = d_in[myId]; int myBin = myItem % BIN_COUNT; d_bins[myBin]++; } /* Using device 0: NVIDIA Tegra X1; global mem: 2076037120B; compute v5.3; clock: 921600 kHz Running simple histo bin 0: count 4096 bin 1: count 4096 bin 2: count 4096 bin 3: count 4096 bin 4: count 4096 bin 5: count 4096 bin 6: count 4096 bin 7: count 4096 bin 8: count 4096 bin 9: count 4096 bin 10: count 4096 bin 11: count 4096 bin 12: count 4096 bin 13: count 4096 bin 14: count 4096 bin 15: count 4096 */ __global__ void simple_histo(int *d_bins, const int *d_in, const int BIN_COUNT) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int myItem = d_in[myId]; int myBin = myItem % BIN_COUNT; atomicAdd(&(d_bins[myBin]), 1); } int main(int argc, char **argv) { int deviceCount; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; hipSetDevice(dev); hipDeviceProp_t devProps; if (hipGetDeviceProperties(&devProps, dev) == 0) { printf("Using device %d:\n", dev); printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n", devProps.name, (int)devProps.totalGlobalMem, (int)devProps.major, (int)devProps.minor, (int)devProps.clockRate); } const int ARRAY_SIZE = 65536; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); const int BIN_COUNT = 16; const int BIN_BYTES = BIN_COUNT * sizeof(int); // generate the input array on the host int h_in[ARRAY_SIZE]; for(int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = bit_reverse(i, log2(ARRAY_SIZE)); } int h_bins[BIN_COUNT]; for(int i = 0; i < BIN_COUNT; i++) { h_bins[i] = 0; } // declare GPU memory pointers int * d_in; int * d_bins; // allocate GPU memory hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_bins, BIN_BYTES); // transfer the arrays to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); hipMemcpy(d_bins, h_bins, BIN_BYTES, hipMemcpyHostToDevice); int whichKernel = 0; if (argc == 2) { whichKernel = atoi(argv[1]); } // launch the kernel switch(whichKernel) { case 0: printf("Running naive histo\n"); hipLaunchKernelGGL(( naive_histo), dim3(ARRAY_SIZE / 64), dim3(64), 0, 0, d_bins, d_in, BIN_COUNT); break; case 1: printf("Running simple histo\n"); hipLaunchKernelGGL(( simple_histo), dim3(ARRAY_SIZE / 64), dim3(64), 0, 0, d_bins, d_in, BIN_COUNT); break; default: fprintf(stderr, "error: ran no kernel\n"); exit(EXIT_FAILURE); } // copy back the sum from GPU hipMemcpy(h_bins, d_bins, BIN_BYTES, hipMemcpyDeviceToHost); for(int i = 0; i < BIN_COUNT; i++) { printf("bin %d: count %d\n", i, h_bins[i]); } // free GPU memory allocation hipFree(d_in); hipFree(d_bins); return 0; }
03583aa7457b92b3b03ac67709ef4440053a4370.cu
#include <stdio.h> #include <cuda_runtime.h> int log2(int i) { int r = 0; while (i >>= 1) r++; return r; } int bit_reverse(int w, int bits) { int r = 0; for (int i = 0; i < bits; i++) { int bit = (w & (1 << i)) >> i; r |= bit << (bits - i - 1); } return r; } /* Using device 0: NVIDIA Tegra X1; global mem: 2076037120B; compute v5.3; clock: 921600 kHz Running naive histo bin 0: count 7 bin 1: count 7 bin 2: count 6 bin 3: count 6 bin 4: count 7 bin 5: count 6 bin 6: count 7 bin 7: count 6 bin 8: count 7 bin 9: count 7 bin 10: count 7 bin 11: count 7 bin 12: count 7 bin 13: count 6 bin 14: count 6 bin 15: count 8 incorrect due to race condition in d_bins[myBin]++ this does not happen in serial code as each thread runs separately e.g. BIN with value 5, and thread 1 and 2 wants to increase it - thread 1 reads 5, increase to 6, write 6 back to bin - thread 2 reads 5, increase to 6, write 6 back to bin - but actual answer is 7 */ __global__ void naive_histo(int *d_bins, const int *d_in, const int BIN_COUNT) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int myItem = d_in[myId]; int myBin = myItem % BIN_COUNT; d_bins[myBin]++; } /* Using device 0: NVIDIA Tegra X1; global mem: 2076037120B; compute v5.3; clock: 921600 kHz Running simple histo bin 0: count 4096 bin 1: count 4096 bin 2: count 4096 bin 3: count 4096 bin 4: count 4096 bin 5: count 4096 bin 6: count 4096 bin 7: count 4096 bin 8: count 4096 bin 9: count 4096 bin 10: count 4096 bin 11: count 4096 bin 12: count 4096 bin 13: count 4096 bin 14: count 4096 bin 15: count 4096 */ __global__ void simple_histo(int *d_bins, const int *d_in, const int BIN_COUNT) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int myItem = d_in[myId]; int myBin = myItem % BIN_COUNT; atomicAdd(&(d_bins[myBin]), 1); } int main(int argc, char **argv) { int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; cudaSetDevice(dev); cudaDeviceProp devProps; if (cudaGetDeviceProperties(&devProps, dev) == 0) { printf("Using device %d:\n", dev); printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n", devProps.name, (int)devProps.totalGlobalMem, (int)devProps.major, (int)devProps.minor, (int)devProps.clockRate); } const int ARRAY_SIZE = 65536; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); const int BIN_COUNT = 16; const int BIN_BYTES = BIN_COUNT * sizeof(int); // generate the input array on the host int h_in[ARRAY_SIZE]; for(int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = bit_reverse(i, log2(ARRAY_SIZE)); } int h_bins[BIN_COUNT]; for(int i = 0; i < BIN_COUNT; i++) { h_bins[i] = 0; } // declare GPU memory pointers int * d_in; int * d_bins; // allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_bins, BIN_BYTES); // transfer the arrays to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_bins, h_bins, BIN_BYTES, cudaMemcpyHostToDevice); int whichKernel = 0; if (argc == 2) { whichKernel = atoi(argv[1]); } // launch the kernel switch(whichKernel) { case 0: printf("Running naive histo\n"); naive_histo<<<ARRAY_SIZE / 64, 64>>>(d_bins, d_in, BIN_COUNT); break; case 1: printf("Running simple histo\n"); simple_histo<<<ARRAY_SIZE / 64, 64>>>(d_bins, d_in, BIN_COUNT); break; default: fprintf(stderr, "error: ran no kernel\n"); exit(EXIT_FAILURE); } // copy back the sum from GPU cudaMemcpy(h_bins, d_bins, BIN_BYTES, cudaMemcpyDeviceToHost); for(int i = 0; i < BIN_COUNT; i++) { printf("bin %d: count %d\n", i, h_bins[i]); } // free GPU memory allocation cudaFree(d_in); cudaFree(d_bins); return 0; }
af1d2320dadf17311759506d061b3599ea18d00c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" const int BLOCKDIM = 16; /** * @brief Calculates the Euclidean distance between two points (x0, y0) and * (x1, y1) * * @param[in] x0 The x0 coordinate * @param[in] y0 The y0 coordinate * @param[in] x1 The x1 coordinate * @param[in] y1 The y1 coordinate * * @return The distance between the two points */ __device__ inline float gaussian(float x, float mu, float sigma) { return static_cast<float>(expf(-((x - mu) * (x - mu))/(2 * sigma * sigma)) / (2 * M_PI * sigma * sigma)); } __device__ inline float distance(int x0, int y0, int x1, int y1) { return static_cast<float>(sqrtf( (x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1) )); } __global__ void bilateralNaiveGpuKernel( float* inputImage, float* outputImage, int rows, int cols, uint32_t window, float sigmaD, float sigmaR) { float filteredPixel, neighbourPixel, currentPixel; float wP, gR, gD; int neighbourCol; int neighbourRow; const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col >= cols || row >= rows) { return; } filteredPixel = 0; wP = 0; for (int windowCol = 0; windowCol < window; windowCol++) { for (int windowRow = 0; windowRow < window; windowRow++) { neighbourCol = col - (window / 2) - windowCol; neighbourRow = row - (window / 2) - windowRow; // Prevent us indexing into regions that don't exist if (neighbourCol < 0) { neighbourCol = 0; } if (neighbourRow < 0) { neighbourRow = 0; } neighbourPixel = inputImage[neighbourCol + neighbourRow * cols]; currentPixel = inputImage[col + row * cols]; // Intensity factor gR = gaussian(neighbourPixel - currentPixel, 0.0, sigmaR); // Distance factor gD = gaussian(distance(col, row, neighbourCol, neighbourRow), 0.0, sigmaD); filteredPixel += neighbourPixel * (gR * gD); wP += (gR * gD); } } outputImage[col + row * cols] = filteredPixel / wP; }
af1d2320dadf17311759506d061b3599ea18d00c.cu
#include "includes.h" const int BLOCKDIM = 16; /** * @brief Calculates the Euclidean distance between two points (x0, y0) and * (x1, y1) * * @param[in] x0 The x0 coordinate * @param[in] y0 The y0 coordinate * @param[in] x1 The x1 coordinate * @param[in] y1 The y1 coordinate * * @return The distance between the two points */ __device__ inline float gaussian(float x, float mu, float sigma) { return static_cast<float>(expf(-((x - mu) * (x - mu))/(2 * sigma * sigma)) / (2 * M_PI * sigma * sigma)); } __device__ inline float distance(int x0, int y0, int x1, int y1) { return static_cast<float>(sqrtf( (x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1) )); } __global__ void bilateralNaiveGpuKernel( float* inputImage, float* outputImage, int rows, int cols, uint32_t window, float sigmaD, float sigmaR) { float filteredPixel, neighbourPixel, currentPixel; float wP, gR, gD; int neighbourCol; int neighbourRow; const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col >= cols || row >= rows) { return; } filteredPixel = 0; wP = 0; for (int windowCol = 0; windowCol < window; windowCol++) { for (int windowRow = 0; windowRow < window; windowRow++) { neighbourCol = col - (window / 2) - windowCol; neighbourRow = row - (window / 2) - windowRow; // Prevent us indexing into regions that don't exist if (neighbourCol < 0) { neighbourCol = 0; } if (neighbourRow < 0) { neighbourRow = 0; } neighbourPixel = inputImage[neighbourCol + neighbourRow * cols]; currentPixel = inputImage[col + row * cols]; // Intensity factor gR = gaussian(neighbourPixel - currentPixel, 0.0, sigmaR); // Distance factor gD = gaussian(distance(col, row, neighbourCol, neighbourRow), 0.0, sigmaD); filteredPixel += neighbourPixel * (gR * gD); wP += (gR * gD); } } outputImage[col + row * cols] = filteredPixel / wP; }
07d856ab5d8dab1219d7c32c414628262cb316ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define _NTHREAD 512 #define _NBLOCK 65535 #include<cuda.h> __global__ void _AFFINE_KERNEL(int* ,int ,int* ,int ,int ,int ,int ,int ,int ,int ,int ,int ,int ); #include<stdio.h> #include<stdlib.h> int main() { int XP1[20][20],XS3[20],i,j,k; for(i=0;i<20;i++) for(j=0;j<20;j++) { XP1[i][j]=i+j; XS3[i]=2*i; } int _SZ_XS3_1 = 20; int _SZ_XP1_2 = 20; int _SZ_XP1_1 = 20; int *_DEV_XS3; hipMalloc((void**) &_DEV_XS3, sizeof(int)*_SZ_XS3_1); hipMemcpy(_DEV_XS3, XS3, sizeof(int)*_SZ_XS3_1, hipMemcpyHostToDevice); int *_DEV_XP1; hipMalloc((void**) &_DEV_XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1); hipMemcpy(_DEV_XP1, XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1, hipMemcpyHostToDevice); float _NUM_THREADS = 400,_NUM_BLOCKS=1; int _NUM_TILE=1; dim3 _THREADS(512); dim3 _BLOCKS(1); if(_NUM_THREADS < _NTHREAD) { _THREADS.x=20; _THREADS.y=20; } else { _NUM_BLOCKS=_NUM_THREADS/256; _BLOCKS.x=_BLOCKS.y=ceil(sqrt(_NUM_BLOCKS)); _THREADS.x=_THREADS.y=ceil(sqrt(400.0/(_BLOCKS.x*_BLOCKS.y))); int temp=_NUM_BLOCKS; if(_NUM_BLOCKS>_NBLOCK) _NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1); } int _CUDA_TILE; for(i=0;i<20;i+=3) for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++) hipLaunchKernelGGL(( _AFFINE_KERNEL), dim3(_BLOCKS),dim3(_THREADS), 0, 0, _DEV_XS3, _SZ_XS3_1, _DEV_XP1, _SZ_XP1_2, _SZ_XP1_1, 2, i, j, 0, 20, 0, 20, _CUDA_TILE); hipDeviceSynchronize(); hipMemcpy(XP1, _DEV_XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1, hipMemcpyDeviceToHost); return 0; } __global__ void _AFFINE_KERNEL(int* XS3,int _SZ_XS3_1,int* XP1,int _SZ_XP1_2,int _SZ_XP1_1,int phi_count, int CUDA_i, int CUDA_j, int CUDA_L_i,int CUDA_U_i, int CUDA_L_j,int CUDA_U_j, int _CUDA_TILE) { int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x; int j = gridDim.y*blockDim.y*_CUDA_TILE + blockDim.y*blockIdx.y + threadIdx.y; if((CUDA_i<=i)&&(i<(CUDA_i+3))&&(i<CUDA_U_i)){ if((CUDA_L_j<=j)&&(j<CUDA_U_j)){ XP1[i*_SZ_XP1_1+j]=XP1[(i+3)*_SZ_XP1_1+j+4]+XS3[i]; }}}
07d856ab5d8dab1219d7c32c414628262cb316ee.cu
#define _NTHREAD 512 #define _NBLOCK 65535 #include<cuda.h> __global__ void _AFFINE_KERNEL(int* ,int ,int* ,int ,int ,int ,int ,int ,int ,int ,int ,int ,int ); #include<stdio.h> #include<stdlib.h> int main() { int XP1[20][20],XS3[20],i,j,k; for(i=0;i<20;i++) for(j=0;j<20;j++) { XP1[i][j]=i+j; XS3[i]=2*i; } int _SZ_XS3_1 = 20; int _SZ_XP1_2 = 20; int _SZ_XP1_1 = 20; int *_DEV_XS3; cudaMalloc((void**) &_DEV_XS3, sizeof(int)*_SZ_XS3_1); cudaMemcpy(_DEV_XS3, XS3, sizeof(int)*_SZ_XS3_1, cudaMemcpyHostToDevice); int *_DEV_XP1; cudaMalloc((void**) &_DEV_XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1); cudaMemcpy(_DEV_XP1, XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1, cudaMemcpyHostToDevice); float _NUM_THREADS = 400,_NUM_BLOCKS=1; int _NUM_TILE=1; dim3 _THREADS(512); dim3 _BLOCKS(1); if(_NUM_THREADS < _NTHREAD) { _THREADS.x=20; _THREADS.y=20; } else { _NUM_BLOCKS=_NUM_THREADS/256; _BLOCKS.x=_BLOCKS.y=ceil(sqrt(_NUM_BLOCKS)); _THREADS.x=_THREADS.y=ceil(sqrt(400.0/(_BLOCKS.x*_BLOCKS.y))); int temp=_NUM_BLOCKS; if(_NUM_BLOCKS>_NBLOCK) _NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1); } int _CUDA_TILE; for(i=0;i<20;i+=3) for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++) _AFFINE_KERNEL<<<_BLOCKS,_THREADS>>>(_DEV_XS3, _SZ_XS3_1, _DEV_XP1, _SZ_XP1_2, _SZ_XP1_1, 2, i, j, 0, 20, 0, 20, _CUDA_TILE); cudaDeviceSynchronize(); cudaMemcpy(XP1, _DEV_XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1, cudaMemcpyDeviceToHost); return 0; } __global__ void _AFFINE_KERNEL(int* XS3,int _SZ_XS3_1,int* XP1,int _SZ_XP1_2,int _SZ_XP1_1,int phi_count, int CUDA_i, int CUDA_j, int CUDA_L_i,int CUDA_U_i, int CUDA_L_j,int CUDA_U_j, int _CUDA_TILE) { int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x; int j = gridDim.y*blockDim.y*_CUDA_TILE + blockDim.y*blockIdx.y + threadIdx.y; if((CUDA_i<=i)&&(i<(CUDA_i+3))&&(i<CUDA_U_i)){ if((CUDA_L_j<=j)&&(j<CUDA_U_j)){ XP1[i*_SZ_XP1_1+j]=XP1[(i+3)*_SZ_XP1_1+j+4]+XS3[i]; }}}
664d1325bc925190151e609386ab3544f0d5f8ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // 2018.11.10 // very slow average 200s // every thread check for a Queen. not a pair[] // 2018.12.29 //copy form CheckOne_Datafile_compare/gpu_1_Queen #include "Kernel_p.h" #define DEBUG #define BLOCK_SIZE 512 //----------------------------Kernel---------------------------------------- __global__ void Ker_Warm(){ // empty body, just warmup GPU; if(threadIdx.x == 0 ) printf("GPU is OK!\n"); } __global__ void Ker_Check_Combination3 ( unsigned int *d_combination, // unsigned int combination_size, // length of combinations =queen number =N unsigned int *d_result // return conflicts count. ) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; // use shared mem,so must be in a Block, need not global thread No. d_result[0] =0; if( tid == 0 ) { for(int i =0 ;i<combination_size ; i++){ printf("%4d ", d_combination[i]); d_result[0] += d_combination[i]; } printf("\nsum = %8d \n ", d_result[0]); } } __global__ void Ker_Check_Combination ( unsigned int *d_combination, // unsigned int combination_size, // length of combinations =queen number =N unsigned int *d_result // return conflicts count. ) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; // use shared mem,so must be in a Block, need not global thread No. //printf("block =%d, thread=%d, tid=%d\n",blockIdx.x, threadIdx.x ,tid); if(tid >= combination_size) return; if(tid==0) d_result[0]=0; int curX=tid; int curY=d_combination[tid]; //check every queen after cur; for(int iX=tid+1 ; iX <= combination_size-1 ; iX++){ int iY=d_combination[iX]; if(iY == curY || iX+iY == curX+ curY || iY -iX == curY - curX) { // not a Permutations, it is random numbers. //printf("-------->>> (%5d,%5d) (%5d,%5d) thread:%5d \n ", curX,curY,iX,iY,tid); atomicAdd ((unsigned int *)&d_result[0],1); // break; // get all conflicts } } }// end of Kernel //----------------------------CPU Interface---------------------------------------- void setDevice(int i) { checkCudaErrors( hipSetDevice( i ) ); } int getDevice() { int id=-1; checkCudaErrors( hipGetDevice( &id ) ); return id; } void warmGPU() // warm a single GPU { hipError_t cuda_err; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float cuda_time=0; hipEventRecord(start, 0); hipLaunchKernelGGL(( Ker_Warm) , dim3(1),dim3(1), 0, 0, ); cuda_err= hipSuccess; cuda_err = hipGetLastError(); if (cuda_err != hipSuccess) { fprintf(stderr, "Failed to launch (error code= %s)!\n", hipGetErrorString(cuda_err)); exit(EXIT_FAILURE); } else { #ifdef DEBUG fprintf(stderr, "Worm launch successed! ( code= %s)!\n", hipGetErrorString(cuda_err)); #endif } //checkCudaErrors( hipDeviceSynchronize() ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&cuda_time, start, stop); #ifdef DEBUG printf("%-40s %f ms \n","warmup() run time=",cuda_time); #endif } void warmGPU0_1() // warm GPU 0 and 1 in diffirent streams// wrong?? { hipError_t cuda_err; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float cuda_time=0; hipEventRecord(start, 0); hipStream_t stream0; hipStream_t stream1; checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipStreamCreate(&stream0)); hipLaunchKernelGGL(( Ker_Warm) , dim3(1),dim3(1),0 , stream0, ); checkCudaErrors(hipSetDevice(1)); checkCudaErrors(hipStreamCreate(&stream1)); hipLaunchKernelGGL(( Ker_Warm) , dim3(1),dim3(1),0 , stream1, ); cuda_err= hipSuccess; cuda_err = hipGetLastError(); if (cuda_err != hipSuccess) { fprintf(stderr, "Failed to launch (error code= %s)!\n", hipGetErrorString(cuda_err)); exit(EXIT_FAILURE); } else { #ifdef DEBUG fprintf(stderr, "Worm launch successed! ( code= %s)!\n", hipGetErrorString(cuda_err)); #endif } //checkCudaErrors( hipDeviceSynchronize() ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&cuda_time, start, stop); #ifdef DEBUG printf("%-40s %f ms \n","warmup() run time=",cuda_time); #endif checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipStreamDestroy(stream0)); checkCudaErrors(hipSetDevice(1)); checkCudaErrors(hipStreamDestroy(stream1)); } void get_conflicts(unsigned int * combi0, unsigned int * combi1, unsigned int size, unsigned *res0, unsigned *res1) { /* printf("\n----get_conflicts() begin! \n"); */ unsigned *h_combi0= combi0 ; // unsigned *h_combi1= combi1 ; // unsigned *d_combi0= 0; unsigned *d_combi1= 0; unsigned * h_result0 = 0; unsigned * h_result1 = 0; unsigned * d_result0 = 0; unsigned * d_result1 = 0; hipError_t cuda_err; hipStream_t stream0; hipStream_t stream1; //--------------cpu---------------------------------- //int GPU_N; //checkCudaErrors(hipGetDeviceCount(&GPU_N)); cuda_err = hipSuccess; //---------------GPU0 memroy alloc----------------------------- checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipStreamCreate(&stream0)); checkCudaErrors(hipMalloc((void **)&d_combi0, size * sizeof( unsigned))); checkCudaErrors(hipMalloc((void **)&d_result0 , 1 * sizeof( unsigned))); checkCudaErrors(hipHostMalloc((void **)&h_result0, 1 * sizeof(unsigned))); // hipHostMalloc , not malloc() /* memcpy( h_combi0, combi , size * sizeof(unsigned)); */ cuda_err = hipGetLastError(); if (cuda_err != hipSuccess) { fprintf(stderr, "GPU 0 alloc d_combi error! (error code= %s)!\n", hipGetErrorString(cuda_err)); exit(EXIT_FAILURE); } //---------------GPU1 memory alloc----------------------------- checkCudaErrors(hipSetDevice(1)); checkCudaErrors(hipStreamCreate(&stream1)); checkCudaErrors(hipMalloc((void **)&d_combi1, size * sizeof( unsigned))); checkCudaErrors(hipMalloc((void **)&d_result1 , 1 * sizeof( unsigned))); checkCudaErrors(hipHostMalloc((void **)&h_result1, 1 * sizeof( unsigned))); // hipHostMalloc , not malloc() /* memcpy( h_combi1, combi , size * sizeof(unsigned)); */ cuda_err = hipGetLastError(); if (cuda_err != hipSuccess) { fprintf(stderr, "GPU1 alloc d_combi error! (error code= %s)!\n", hipGetErrorString(cuda_err)); exit(EXIT_FAILURE); } // ansync transfer, run Kernel , and transfer result back; // -----------------GPU0---------------------------------------------------------------------------- checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipMemcpyAsync(d_combi0, h_combi0, size * sizeof( unsigned), hipMemcpyHostToDevice, stream0)); int GRID_SIZE = ( size + BLOCK_SIZE-1)/BLOCK_SIZE ; /* printf("gridsize = %d, blocksize =%d, queens = %d\n", GRID_SIZE, BLOCK_SIZE, size ); */ checkCudaErrors(hipSetDevice(0)); /* printf( "Switch to device : %d\n", getDevice()); */ hipLaunchKernelGGL(( Ker_Check_Combination), dim3(GRID_SIZE), dim3(BLOCK_SIZE) , 0, stream0 , d_combi0 ,size , d_result0); getLastCudaError("Kernel() in divece 0 execution failed.\n"); checkCudaErrors(hipMemcpyAsync(h_result0, d_result0, 1 * sizeof(unsigned), hipMemcpyDeviceToHost,stream0)); // -----------------GPU1---------------------------------------------------------------------------- checkCudaErrors(hipSetDevice(1)); checkCudaErrors(hipMemcpyAsync(d_combi1, h_combi1, size * sizeof( unsigned), hipMemcpyHostToDevice, stream1)); /* printf( "Switch to device : %d\n", getDevice()); */ cuda_err= hipSuccess; hipLaunchKernelGGL(( Ker_Check_Combination), dim3(( size + BLOCK_SIZE-1)/BLOCK_SIZE) , dim3(BLOCK_SIZE) , 0, stream1 , d_combi1 ,size , d_result1); getLastCudaError("Kernel() in divece 1 execution failed.\n"); checkCudaErrors(hipMemcpyAsync(h_result1, d_result1, 1 * sizeof(unsigned), hipMemcpyDeviceToHost,stream1)); //------------------GPU0 ----------------------------------------------------------- checkCudaErrors(hipSetDevice(0)); hipStreamSynchronize(stream0); //------------------GPU1 ----------------------------------------------------------- checkCudaErrors(hipSetDevice(1)); hipStreamSynchronize(stream1); // ----------------------CPU --------------------------------- *res0= h_result0[0]; *res1= h_result1[0]; //************************************************************************************************************ //--------GPU0--------------------------- checkCudaErrors(hipSetDevice(0)); checkCudaErrors( hipFree(d_combi0) ); checkCudaErrors( hipFree(d_result0) ); /* checkCudaErrors(hipHostFree(h_combi0)); */ checkCudaErrors(hipHostFree(h_result0)); checkCudaErrors(hipStreamDestroy(stream0)); //--------GPU0--------------------------- checkCudaErrors(hipSetDevice(1)); checkCudaErrors( hipFree(d_combi1) ); checkCudaErrors( hipFree(d_result1) ); /* checkCudaErrors(hipHostFree(h_combi1)); */ checkCudaErrors(hipHostFree(h_result1)); checkCudaErrors(hipStreamDestroy(stream1)); /* printf(" get_conflicts() run OK !\n"); */ }
664d1325bc925190151e609386ab3544f0d5f8ea.cu
// 2018.11.10 // very slow average 200s // every thread check for a Queen. not a pair[] // 2018.12.29 //copy form CheckOne_Datafile_compare/gpu_1_Queen #include "Kernel_p.h" #define DEBUG #define BLOCK_SIZE 512 //----------------------------Kernel---------------------------------------- __global__ void Ker_Warm(){ // empty body, just warmup GPU; if(threadIdx.x == 0 ) printf("GPU is OK!\n"); } __global__ void Ker_Check_Combination3 ( unsigned int *d_combination, // unsigned int combination_size, // length of combinations =queen number =N unsigned int *d_result // return conflicts count. ) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; // use shared mem,so must be in a Block, need not global thread No. d_result[0] =0; if( tid == 0 ) { for(int i =0 ;i<combination_size ; i++){ printf("%4d ", d_combination[i]); d_result[0] += d_combination[i]; } printf("\nsum = %8d \n ", d_result[0]); } } __global__ void Ker_Check_Combination ( unsigned int *d_combination, // unsigned int combination_size, // length of combinations =queen number =N unsigned int *d_result // return conflicts count. ) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; // use shared mem,so must be in a Block, need not global thread No. //printf("block =%d, thread=%d, tid=%d\n",blockIdx.x, threadIdx.x ,tid); if(tid >= combination_size) return; if(tid==0) d_result[0]=0; int curX=tid; int curY=d_combination[tid]; //check every queen after cur; for(int iX=tid+1 ; iX <= combination_size-1 ; iX++){ int iY=d_combination[iX]; if(iY == curY || iX+iY == curX+ curY || iY -iX == curY - curX) { // not a Permutations, it is random numbers. //printf("-------->>> (%5d,%5d) (%5d,%5d) thread:%5d \n ", curX,curY,iX,iY,tid); atomicAdd ((unsigned int *)&d_result[0],1); // break; // get all conflicts } } }// end of Kernel //----------------------------CPU Interface---------------------------------------- void setDevice(int i) { checkCudaErrors( cudaSetDevice( i ) ); } int getDevice() { int id=-1; checkCudaErrors( cudaGetDevice( &id ) ); return id; } void warmGPU() // warm a single GPU { cudaError_t cuda_err; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float cuda_time=0; cudaEventRecord(start, 0); Ker_Warm <<<1,1>>> (); cuda_err= cudaSuccess; cuda_err = cudaGetLastError(); if (cuda_err != cudaSuccess) { fprintf(stderr, "Failed to launch (error code= %s)!\n", cudaGetErrorString(cuda_err)); exit(EXIT_FAILURE); } else { #ifdef DEBUG fprintf(stderr, "Worm launch successed! ( code= %s)!\n", cudaGetErrorString(cuda_err)); #endif } //checkCudaErrors( cudaDeviceSynchronize() ); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cuda_time, start, stop); #ifdef DEBUG printf("%-40s %f ms \n","warmup() run time=",cuda_time); #endif } void warmGPU0_1() // warm GPU 0 and 1 in diffirent streams// wrong?? { cudaError_t cuda_err; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float cuda_time=0; cudaEventRecord(start, 0); cudaStream_t stream0; cudaStream_t stream1; checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaStreamCreate(&stream0)); Ker_Warm <<<1,1,0 , stream0>>> (); checkCudaErrors(cudaSetDevice(1)); checkCudaErrors(cudaStreamCreate(&stream1)); Ker_Warm <<<1,1,0 , stream1>>> (); cuda_err= cudaSuccess; cuda_err = cudaGetLastError(); if (cuda_err != cudaSuccess) { fprintf(stderr, "Failed to launch (error code= %s)!\n", cudaGetErrorString(cuda_err)); exit(EXIT_FAILURE); } else { #ifdef DEBUG fprintf(stderr, "Worm launch successed! ( code= %s)!\n", cudaGetErrorString(cuda_err)); #endif } //checkCudaErrors( cudaDeviceSynchronize() ); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cuda_time, start, stop); #ifdef DEBUG printf("%-40s %f ms \n","warmup() run time=",cuda_time); #endif checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaStreamDestroy(stream0)); checkCudaErrors(cudaSetDevice(1)); checkCudaErrors(cudaStreamDestroy(stream1)); } void get_conflicts(unsigned int * combi0, unsigned int * combi1, unsigned int size, unsigned *res0, unsigned *res1) { /* printf("\n----get_conflicts() begin! \n"); */ unsigned *h_combi0= combi0 ; // 不是页锁定内存,是主程序直接传入的数组 unsigned *h_combi1= combi1 ; // 不是页锁定内存,是主程序直接传入的数组 unsigned *d_combi0= 0; unsigned *d_combi1= 0; unsigned * h_result0 = 0; unsigned * h_result1 = 0; unsigned * d_result0 = 0; unsigned * d_result1 = 0; cudaError_t cuda_err; cudaStream_t stream0; cudaStream_t stream1; //--------------cpu---------------------------------- //int GPU_N; //checkCudaErrors(cudaGetDeviceCount(&GPU_N)); cuda_err = cudaSuccess; //---------------GPU0 memroy alloc----------------------------- checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaStreamCreate(&stream0)); checkCudaErrors(cudaMalloc((void **)&d_combi0, size * sizeof( unsigned))); checkCudaErrors(cudaMalloc((void **)&d_result0 , 1 * sizeof( unsigned))); checkCudaErrors(cudaMallocHost((void **)&h_result0, 1 * sizeof(unsigned))); // cudaMallocHost , not malloc() /* memcpy( h_combi0, combi , size * sizeof(unsigned)); */ cuda_err = cudaGetLastError(); if (cuda_err != cudaSuccess) { fprintf(stderr, "GPU 0 alloc d_combi error! (error code= %s)!\n", cudaGetErrorString(cuda_err)); exit(EXIT_FAILURE); } //---------------GPU1 memory alloc----------------------------- checkCudaErrors(cudaSetDevice(1)); checkCudaErrors(cudaStreamCreate(&stream1)); checkCudaErrors(cudaMalloc((void **)&d_combi1, size * sizeof( unsigned))); checkCudaErrors(cudaMalloc((void **)&d_result1 , 1 * sizeof( unsigned))); checkCudaErrors(cudaMallocHost((void **)&h_result1, 1 * sizeof( unsigned))); // cudaMallocHost , not malloc() /* memcpy( h_combi1, combi , size * sizeof(unsigned)); */ cuda_err = cudaGetLastError(); if (cuda_err != cudaSuccess) { fprintf(stderr, "GPU1 alloc d_combi error! (error code= %s)!\n", cudaGetErrorString(cuda_err)); exit(EXIT_FAILURE); } // ansync transfer, run Kernel , and transfer result back; // -----------------GPU0---------------------------------------------------------------------------- checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaMemcpyAsync(d_combi0, h_combi0, size * sizeof( unsigned), cudaMemcpyHostToDevice, stream0)); int GRID_SIZE = ( size + BLOCK_SIZE-1)/BLOCK_SIZE ; /* printf("gridsize = %d, blocksize =%d, queens = %d\n", GRID_SIZE, BLOCK_SIZE, size ); */ checkCudaErrors(cudaSetDevice(0)); /* printf( "Switch to device : %d\n", getDevice()); */ Ker_Check_Combination<<< GRID_SIZE, BLOCK_SIZE , 0, stream0 >>> (d_combi0 ,size , d_result0); getLastCudaError("Kernel() in divece 0 execution failed.\n"); checkCudaErrors(cudaMemcpyAsync(h_result0, d_result0, 1 * sizeof(unsigned), cudaMemcpyDeviceToHost,stream0)); // -----------------GPU1---------------------------------------------------------------------------- checkCudaErrors(cudaSetDevice(1)); checkCudaErrors(cudaMemcpyAsync(d_combi1, h_combi1, size * sizeof( unsigned), cudaMemcpyHostToDevice, stream1)); /* printf( "Switch to device : %d\n", getDevice()); */ cuda_err= cudaSuccess; Ker_Check_Combination<<< ( size + BLOCK_SIZE-1)/BLOCK_SIZE , BLOCK_SIZE , 0, stream1 >>> (d_combi1 ,size , d_result1); getLastCudaError("Kernel() in divece 1 execution failed.\n"); checkCudaErrors(cudaMemcpyAsync(h_result1, d_result1, 1 * sizeof(unsigned), cudaMemcpyDeviceToHost,stream1)); //------------------GPU0 同步----------------------------------------------------------- checkCudaErrors(cudaSetDevice(0)); cudaStreamSynchronize(stream0); //------------------GPU1 同步----------------------------------------------------------- checkCudaErrors(cudaSetDevice(1)); cudaStreamSynchronize(stream1); // ----------------------CPU --------------------------------- *res0= h_result0[0]; *res1= h_result1[0]; //************************************************************************************************************ //--------GPU0--------------------------- checkCudaErrors(cudaSetDevice(0)); checkCudaErrors( cudaFree(d_combi0) ); checkCudaErrors( cudaFree(d_result0) ); /* checkCudaErrors(cudaFreeHost(h_combi0)); */ checkCudaErrors(cudaFreeHost(h_result0)); checkCudaErrors(cudaStreamDestroy(stream0)); //--------GPU0--------------------------- checkCudaErrors(cudaSetDevice(1)); checkCudaErrors( cudaFree(d_combi1) ); checkCudaErrors( cudaFree(d_result1) ); /* checkCudaErrors(cudaFreeHost(h_combi1)); */ checkCudaErrors(cudaFreeHost(h_result1)); checkCudaErrors(cudaStreamDestroy(stream1)); /* printf(" get_conflicts() run OK !\n"); */ }
4c040b31d026bf740518f61ac89bc546b7dfbc2e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cuda_kernel(double *A, double *B, double *C, int arraySize) { // Get thread ID. int tid = blockDim.x * blockIdx.x + threadIdx.x; // Check if thread is within array bounds. if (tid < arraySize) { // Add a and b. C[tid] = A[tid] + B[ tid]; } }
4c040b31d026bf740518f61ac89bc546b7dfbc2e.cu
#include "includes.h" __global__ void cuda_kernel(double *A, double *B, double *C, int arraySize) { // Get thread ID. int tid = blockDim.x * blockIdx.x + threadIdx.x; // Check if thread is within array bounds. if (tid < arraySize) { // Add a and b. C[tid] = A[tid] + B[ tid]; } }
efd92950d5f259d062aff22a77b7c7001d290e6d.hip
// !!! This is a file automatically generated by hipify!!! #include "cvt.cuh" #include <cstdio> #include <stdio.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> __global__ void convert_yv12_to_bgra_HD_kernel(uint8_t * yv12_input, uint8_t * rgba_output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; uint8_t Y = yv12_input[y * 1920 + x]; uint8_t U = yv12_input[1920 * 1080 + (x / 2) + 960 * (y / 2)]; uint8_t V = yv12_input[1920 * 1080 * 5 / 4 + (x / 2) + 960 * (y / 2)]; int B = (int)(Y + (U - 128) + (((U - 128) * 198) >> 8)); int G = (int)(Y - (((U - 128) * 88) >> 8) - (((V - 128) * 183) >> 8)); int R = (int)(Y + (V - 128) + (((V - 128) * 103) >> 8)); int A = 255; rgba_output[y * 1920 * 4 + 4 * x] = (uint8_t)((B < 0) ? 0 : ((B > 255) ? 255 : B)); rgba_output[y * 1920 * 4 + 4 * x + 1] = (uint8_t)((G < 0) ? 0 : ((G > 255) ? 255 : G)); rgba_output[y * 1920 * 4 + 4 * x + 2] = (uint8_t)((R < 0) ? 0 : ((R > 255) ? 255 : R)); rgba_output[y * 1920 * 4 + 4 * x + 3] = (uint8_t)A; } void my_convert_yv12_to_bgra_HD(uint8_t * input, uint8_t * output, int dev_id) { hipSetDevice(dev_id); dim3 Block(32, 20); dim3 Grid(1920 / 32, 1080 / 20); hipLaunchKernelGGL(( convert_yv12_to_bgra_HD_kernel) , dim3(Grid), dim3(Block) , 0, 0, input, output); //printf("CVT__called.\n"); //hipDeviceSynchronize(); }
efd92950d5f259d062aff22a77b7c7001d290e6d.cu
#include "cvt.cuh" #include <cstdio> #include <stdio.h> #include <cuda_runtime_api.h> #include <cuda.h> __global__ void convert_yv12_to_bgra_HD_kernel(uint8_t * yv12_input, uint8_t * rgba_output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; uint8_t Y = yv12_input[y * 1920 + x]; uint8_t U = yv12_input[1920 * 1080 + (x / 2) + 960 * (y / 2)]; uint8_t V = yv12_input[1920 * 1080 * 5 / 4 + (x / 2) + 960 * (y / 2)]; int B = (int)(Y + (U - 128) + (((U - 128) * 198) >> 8)); int G = (int)(Y - (((U - 128) * 88) >> 8) - (((V - 128) * 183) >> 8)); int R = (int)(Y + (V - 128) + (((V - 128) * 103) >> 8)); int A = 255; rgba_output[y * 1920 * 4 + 4 * x] = (uint8_t)((B < 0) ? 0 : ((B > 255) ? 255 : B)); rgba_output[y * 1920 * 4 + 4 * x + 1] = (uint8_t)((G < 0) ? 0 : ((G > 255) ? 255 : G)); rgba_output[y * 1920 * 4 + 4 * x + 2] = (uint8_t)((R < 0) ? 0 : ((R > 255) ? 255 : R)); rgba_output[y * 1920 * 4 + 4 * x + 3] = (uint8_t)A; } void my_convert_yv12_to_bgra_HD(uint8_t * input, uint8_t * output, int dev_id) { cudaSetDevice(dev_id); dim3 Block(32, 20); dim3 Grid(1920 / 32, 1080 / 20); convert_yv12_to_bgra_HD_kernel <<< Grid, Block >>> (input, output); //printf("CVT__called.\n"); //cudaDeviceSynchronize(); }
9329c12b2f41ee0c21724143dd9bbd2ca98557a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) by Argonne National Laboratory. * See COPYRIGHT in top-level directory. */ #include "mpl_gpu_cuda.h" #include <stdio.h> __global__ void MPL_gpu_kernel_trigger(MPL_gpu_event_t *var) { *var -= 1; __threadfence_system(); } __global__ void MPL_gpu_kernel_wait(MPL_gpu_event_t *var) { while(*var > 0); } extern "C" void MPL_gpu_enqueue_trigger(volatile int *var, hipStream_t stream) { hipError_t cerr; void *args[] = {&var}; cerr = cudaLaunchKernel((const void *) MPL_gpu_kernel_trigger, dim3(1,1,1), dim3(1,1,1), args, 0, stream); if (cerr != hipSuccess) { fprintf(stderr, "CUDA Error (%s): %s\n", __func__, hipGetErrorString(cerr)); } } extern "C" void MPL_gpu_enqueue_wait(volatile int *var, hipStream_t stream) { hipError_t cerr; void *args[] = {&var}; cerr = cudaLaunchKernel((const void *) MPL_gpu_kernel_wait, dim3(1,1,1), dim3(1,1,1), args, 0, stream); if (cerr != hipSuccess) { fprintf(stderr, "CUDA Error (%s): %s\n", __func__, hipGetErrorString(cerr)); } } extern "C" void MPL_gpu_event_init_count(MPL_gpu_event_t *var, int count) { *var = count; } extern "C" void MPL_gpu_event_complete(MPL_gpu_event_t *var) { *var -= 1; } extern "C" bool MPL_gpu_event_is_complete(MPL_gpu_event_t *var) { return (*var) <= 0; }
9329c12b2f41ee0c21724143dd9bbd2ca98557a9.cu
/* * Copyright (C) by Argonne National Laboratory. * See COPYRIGHT in top-level directory. */ #include "mpl_gpu_cuda.h" #include <stdio.h> __global__ void MPL_gpu_kernel_trigger(MPL_gpu_event_t *var) { *var -= 1; __threadfence_system(); } __global__ void MPL_gpu_kernel_wait(MPL_gpu_event_t *var) { while(*var > 0); } extern "C" void MPL_gpu_enqueue_trigger(volatile int *var, cudaStream_t stream) { cudaError_t cerr; void *args[] = {&var}; cerr = cudaLaunchKernel((const void *) MPL_gpu_kernel_trigger, dim3(1,1,1), dim3(1,1,1), args, 0, stream); if (cerr != cudaSuccess) { fprintf(stderr, "CUDA Error (%s): %s\n", __func__, cudaGetErrorString(cerr)); } } extern "C" void MPL_gpu_enqueue_wait(volatile int *var, cudaStream_t stream) { cudaError_t cerr; void *args[] = {&var}; cerr = cudaLaunchKernel((const void *) MPL_gpu_kernel_wait, dim3(1,1,1), dim3(1,1,1), args, 0, stream); if (cerr != cudaSuccess) { fprintf(stderr, "CUDA Error (%s): %s\n", __func__, cudaGetErrorString(cerr)); } } extern "C" void MPL_gpu_event_init_count(MPL_gpu_event_t *var, int count) { *var = count; } extern "C" void MPL_gpu_event_complete(MPL_gpu_event_t *var) { *var -= 1; } extern "C" bool MPL_gpu_event_is_complete(MPL_gpu_event_t *var) { return (*var) <= 0; }
c88cc9593f1dbf093bda2bbbae665e88bc76dc68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #include <time.h> #include "../inc/common.h" #include "cuda_app_def.h" #include "mat.h" #include "cuMat.h" /*180*/ __global__ void cuMat2dRotate_180_kernel(float *matIn, float *matOut, unsigned int outW, unsigned int outH) { int i = threadIdx.x; int j = threadIdx.y; if (!matIn || !matOut || !outW || !outH) { return; } int row = blockIdx.y*blockDim.y + threadIdx.y; // X row, Ycol int col = blockIdx.x*blockDim.x + threadIdx.x; matOut[j * outW + i] = matIn[i * outH+ j]; } __global__ void addKernel(float *c, const float *a, const float *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } //kernel __global__ void cuMat2DAdd_kernel(float* _C, const float* _A, const float *_B, int n) { // int row = (blockIdx.x*blockDim.x + threadIdx.x)/n; // X row, Ycol int col = (blockIdx.x*blockDim.x + threadIdx.x)%n; if (row < n) { //Thread(row,col)C(row,col) _C[col + row*n] = _A[col + row*n] + _B[col + row*n]; } } __global__ void cuMat2DRolate180_kernel(float* _B, const float* _A, int n) { // int row = (blockIdx.x*blockDim.x + threadIdx.x) / n; // X row, Ycol int col = (blockIdx.x*blockDim.x + threadIdx.x) % n; if (row < n) { //Thread(row,col)C(row,col) _B[col + row*n] = _A[(n-1-col) + (n-1-row)*n]; } } __global__ void cuMat2DSubSum_kernel(float* inData, nSize inSize, float* mapMat, nSize mapSize, float* outData, int outSizeW) { int i = threadIdx.x; int j = threadIdx.y; int r, c; // printf("x:%d, y:%d\n", blockDim.x, blockDim.y); outData[j * outSizeW + i] = (float)0.0; for (r = 0; r < mapSize.r; r++) { for (c = 0; c < mapSize.c; c++) { outData[j * outSizeW + i] += mapMat[r * mapSize.c + c] * inData[(j + r) * inSize.c + i + c]; } } } __global__ void cuMat2dEdgeExpand_kernel(float *matIn, nSize matSize, float *matOut, int addc, int addr) { int col, row; int out_c = matSize.c + 2 * addc; col = blockIdx.x*blockDim.x + threadIdx.x; row = blockIdx.y*blockDim.y + threadIdx.y; // printf("x:%d, y:%d\n", blockDim.x, blockDim.y); if (row < addr || row >= (matSize.r + addr) \ || col < addc || col >= (matSize.c + addc)) { matOut[row * out_c + col] = (float)0.0; } else { matOut[row * out_c + col] = matIn[(row - addr) * matSize.c + col - addc]; /* */ } } __global__ void cuMat2dEdgeShrink_kernel(float *matIn, nSize matSize, float *matOut, int shrinkc, int shrinkr) { int i, j; int w = matSize.c; int h = matSize.r; i = threadIdx.x; j = threadIdx.y; if ((j >= shrinkr) && (i >= shrinkc) && (j < (h - shrinkr)) && (i < (w - shrinkc))) { matOut[(j - shrinkr) * (w - 2 * shrinkc) + i - shrinkc] = matIn[j * w + i]; /* */ } } void cuMat2dCorrelation_Valid(float *srcMat, nSize srcSize, float *mapMat, nSize mapSize, float *dstMat, nSize dstSize) { int i, j, c, r; float *pTmpData = NULL; nSize exSize = { 0, 0 }; int halfmapsizew; int halfmapsizeh; hipError_t cuRet = hipSuccess; hipError_t cudaStatus = hipSuccess; if (!srcMat || !mapMat || !dstMat) { PRT_ERR("param error !\n"); return; } if (mapSize.r % 2 == 0 && mapSize.c % 2 == 0)/* */ { halfmapsizew = (mapSize.c) / 2; /* */ halfmapsizeh = (mapSize.r) / 2; } else { halfmapsizew = (mapSize.c - 1) / 2; /* */ halfmapsizeh = (mapSize.r - 1) / 2; } /* fullfullinSize+(mapSize-1) */ int outSizeW = srcSize.c + (mapSize.c - 1); /* */ int outSizeH = srcSize.r + (mapSize.r - 1); nSize outSize = { outSizeW, outSizeH }; float *pOutDataDev = NULL; cuRet = hipMalloc((void**)&pOutDataDev, outSizeW*outSizeH*sizeof(float)); RET_CHEAK_ZERO(cuRet); /* fullfullinSize+(mapSize-1) */ /* inputData */ exSize.c = srcSize.c + 2 * (mapSize.c - 1); exSize.r = srcSize.r + 2 * (mapSize.r - 1); float *pTmpDev = NULL; cuRet = hipMalloc((void**)&pTmpDev, exSize.c*exSize.r*sizeof(float)); RET_CHEAK_ZERO(cuRet); dim3 blk; dim3 gid; gid.x = 2; gid.y = 2; gid.z = 1; blk.x = exSize.c/2; blk.y = exSize.r/2; blk.z = 1; // printf("x,y,z: %d-%d-%d\n", blk.x, blk.y, blk.z); cuMat2dEdgeExpand_kernel << <gid, blk >> >(srcMat, srcSize, pTmpDev, mapSize.c - 1, mapSize.r - 1); CUDA_STS_CHECK(cudaStatus); hipDeviceSynchronize(); blk.x = outSizeW; blk.y = outSizeH; // printf("x,y,z: %d-%d-%d\n", blk.x, blk.y, blk.z); cuMat2DSubSum_kernel << <1, blk >> >(pTmpDev, exSize, mapMat, mapSize, pOutDataDev, outSizeW); CUDA_STS_CHECK(cudaStatus); hipDeviceSynchronize(); blk.x = outSizeW; blk.y = outSizeH; if (mapSize.r % 2 == 0 && mapSize.c % 2 == 0)/* */ { cuMat2dEdgeShrink_kernel << <1, blk >> >(pOutDataDev, outSize, dstMat, halfmapsizew * 2-1, halfmapsizeh * 2-1); } else { cuMat2dEdgeShrink_kernel << <1, blk >> >(pOutDataDev, outSize, dstMat, halfmapsizew * 2, halfmapsizeh * 2); } CUDA_STS_CHECK(cudaStatus); hipDeviceSynchronize(); hipFree(pTmpDev); hipFree(pOutDataDev); }
c88cc9593f1dbf093bda2bbbae665e88bc76dc68.cu
 #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #include <time.h> #include "../inc/common.h" #include "cuda_app_def.h" #include "mat.h" #include "cuMat.h" /*矩阵旋转180度*/ __global__ void cuMat2dRotate_180_kernel(float *matIn, float *matOut, unsigned int outW, unsigned int outH) { int i = threadIdx.x; int j = threadIdx.y; if (!matIn || !matOut || !outW || !outH) { return; } int row = blockIdx.y*blockDim.y + threadIdx.y; // X 对应矩阵row, Y对应举证col int col = blockIdx.x*blockDim.x + threadIdx.x; matOut[j * outW + i] = matIn[i * outH+ j]; } __global__ void addKernel(float *c, const float *a, const float *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } //矩阵加法的kernel __global__ void cuMat2DAdd_kernel(float* _C, const float* _A, const float *_B, int n) { //找出该线程所在的行列 int row = (blockIdx.x*blockDim.x + threadIdx.x)/n; // X 对应矩阵row, Y对应举证col int col = (blockIdx.x*blockDim.x + threadIdx.x)%n; if (row < n) { //线程Thread(row,col)负责计算C(row,col) _C[col + row*n] = _A[col + row*n] + _B[col + row*n]; } } __global__ void cuMat2DRolate180_kernel(float* _B, const float* _A, int n) { //找出该线程所在的行列 int row = (blockIdx.x*blockDim.x + threadIdx.x) / n; // X 对应矩阵row, Y对应举证col int col = (blockIdx.x*blockDim.x + threadIdx.x) % n; if (row < n) { //线程Thread(row,col)负责计算C(row,col) _B[col + row*n] = _A[(n-1-col) + (n-1-row)*n]; } } __global__ void cuMat2DSubSum_kernel(float* inData, nSize inSize, float* mapMat, nSize mapSize, float* outData, int outSizeW) { int i = threadIdx.x; int j = threadIdx.y; int r, c; // printf("x:%d, y:%d\n", blockDim.x, blockDim.y); outData[j * outSizeW + i] = (float)0.0; for (r = 0; r < mapSize.r; r++) { for (c = 0; c < mapSize.c; c++) { outData[j * outSizeW + i] += mapMat[r * mapSize.c + c] * inData[(j + r) * inSize.c + i + c]; } } } __global__ void cuMat2dEdgeExpand_kernel(float *matIn, nSize matSize, float *matOut, int addc, int addr) { int col, row; int out_c = matSize.c + 2 * addc; col = blockIdx.x*blockDim.x + threadIdx.x; row = blockIdx.y*blockDim.y + threadIdx.y; // printf("x:%d, y:%d\n", blockDim.x, blockDim.y); if (row < addr || row >= (matSize.r + addr) \ || col < addc || col >= (matSize.c + addc)) { matOut[row * out_c + col] = (float)0.0; } else { matOut[row * out_c + col] = matIn[(row - addr) * matSize.c + col - addc]; /* 复制原向量的数据 */ } } __global__ void cuMat2dEdgeShrink_kernel(float *matIn, nSize matSize, float *matOut, int shrinkc, int shrinkr) { int i, j; int w = matSize.c; int h = matSize.r; i = threadIdx.x; j = threadIdx.y; if ((j >= shrinkr) && (i >= shrinkc) && (j < (h - shrinkr)) && (i < (w - shrinkc))) { matOut[(j - shrinkr) * (w - 2 * shrinkc) + i - shrinkc] = matIn[j * w + i]; /* 复制原向量的数据 */ } } void cuMat2dCorrelation_Valid(float *srcMat, nSize srcSize, float *mapMat, nSize mapSize, float *dstMat, nSize dstSize) { int i, j, c, r; float *pTmpData = NULL; nSize exSize = { 0, 0 }; int halfmapsizew; int halfmapsizeh; cudaError cuRet = cudaSuccess; cudaError_t cudaStatus = cudaSuccess; if (!srcMat || !mapMat || !dstMat) { PRT_ERR("param error !\n"); return; } if (mapSize.r % 2 == 0 && mapSize.c % 2 == 0)/* 模板大小为偶数 */ { halfmapsizew = (mapSize.c) / 2; /* 卷积模块的半瓣大小 */ halfmapsizeh = (mapSize.r) / 2; } else { halfmapsizew = (mapSize.c - 1) / 2; /* 卷积模块的半瓣大小 */ halfmapsizeh = (mapSize.r - 1) / 2; } /* 这里先默认进行full模式的操作,full模式的输出大小为inSize+(mapSize-1) */ int outSizeW = srcSize.c + (mapSize.c - 1); /* 这里的输出扩大一部分 */ int outSizeH = srcSize.r + (mapSize.r - 1); nSize outSize = { outSizeW, outSizeH }; float *pOutDataDev = NULL; cuRet = cudaMalloc((void**)&pOutDataDev, outSizeW*outSizeH*sizeof(float)); RET_CHEAK_ZERO(cuRet); /* 这里先默认进行full模式的操作,full模式的输出大小为inSize+(mapSize-1) */ /* 为了方便计算,将inputData扩大一圈 */ exSize.c = srcSize.c + 2 * (mapSize.c - 1); exSize.r = srcSize.r + 2 * (mapSize.r - 1); float *pTmpDev = NULL; cuRet = cudaMalloc((void**)&pTmpDev, exSize.c*exSize.r*sizeof(float)); RET_CHEAK_ZERO(cuRet); dim3 blk; dim3 gid; gid.x = 2; gid.y = 2; gid.z = 1; blk.x = exSize.c/2; blk.y = exSize.r/2; blk.z = 1; // printf("x,y,z: %d-%d-%d\n", blk.x, blk.y, blk.z); cuMat2dEdgeExpand_kernel << <gid, blk >> >(srcMat, srcSize, pTmpDev, mapSize.c - 1, mapSize.r - 1); CUDA_STS_CHECK(cudaStatus); cudaThreadSynchronize(); blk.x = outSizeW; blk.y = outSizeH; // printf("x,y,z: %d-%d-%d\n", blk.x, blk.y, blk.z); cuMat2DSubSum_kernel << <1, blk >> >(pTmpDev, exSize, mapMat, mapSize, pOutDataDev, outSizeW); CUDA_STS_CHECK(cudaStatus); cudaThreadSynchronize(); blk.x = outSizeW; blk.y = outSizeH; if (mapSize.r % 2 == 0 && mapSize.c % 2 == 0)/* 模板大小为偶数 */ { cuMat2dEdgeShrink_kernel << <1, blk >> >(pOutDataDev, outSize, dstMat, halfmapsizew * 2-1, halfmapsizeh * 2-1); } else { cuMat2dEdgeShrink_kernel << <1, blk >> >(pOutDataDev, outSize, dstMat, halfmapsizew * 2, halfmapsizeh * 2); } CUDA_STS_CHECK(cudaStatus); cudaThreadSynchronize(); cudaFree(pTmpDev); cudaFree(pOutDataDev); }
d77e0a605fe037a783cd287087f4c7262b6f0dae.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vdivupdate.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int lengthA = 1; const double alpha = 1; const double *a = NULL; hipMalloc(&a, XSIZE*YSIZE); const double *b = NULL; hipMalloc(&b, XSIZE*YSIZE); double *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vdivupdate), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthA,alpha,a,b,c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vdivupdate), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthA,alpha,a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vdivupdate), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthA,alpha,a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d77e0a605fe037a783cd287087f4c7262b6f0dae.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vdivupdate.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int lengthA = 1; const double alpha = 1; const double *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); const double *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); double *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vdivupdate<<<gridBlock,threadBlock>>>(lengthA,alpha,a,b,c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vdivupdate<<<gridBlock,threadBlock>>>(lengthA,alpha,a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vdivupdate<<<gridBlock,threadBlock>>>(lengthA,alpha,a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d9609146ed1065308d5d6fc7a45d4648de4992f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/operators/optimizers/adagrad_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace { template <typename T, int block_size> __global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows, T* grad_merge, const int64_t* grad_merge_rows, size_t grad_merge_rows_size, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; __shared__ size_t grad_merge_idx; if (tid == 0) { for (size_t i = 0; i < grad_merge_rows_size; i++) { if (grad_rows[ty] == grad_merge_rows[i]) { grad_merge_idx = i; } } } __syncthreads(); grad += ty * row_numel; grad_merge += grad_merge_idx * row_numel; for (int index = tid; index < row_numel; index += block_size) { paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]); } } template <typename T, int block_size> __global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows, const T* learning_rate, T* param, T* moment, int64_t row_numel, T epsilon) { const int ty = blockIdx.y; int tid = threadIdx.x; grad += ty * row_numel; param += rows[ty] * row_numel; moment += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd(param + index, -1.0 * learning_rate[0] * grad[index] / (sqrt(moment[index]) + epsilon)); } } } // namespace template <typename T> struct SparseAdagradFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& grad, const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param) { // 1. g_m.rows = set(g.rows) auto grad_width = grad.value().dims()[1]; math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func; auto grad_merge = merge_func(context, grad); auto* grad_merge_data = grad_merge.mutable_value()->template data<T>(); framework::Vector<int64_t> merge_rows(grad_merge.rows()); // 2. m += g_m * g_m auto grad_square = SquareSelectedRows<platform::CUDADeviceContext, T>(context, grad_merge); math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor; functor(context, grad_square, moment); // 3. update parameter auto* lr = learning_rate.data<T>(); auto* param_data = param->data<T>(); auto* moment_data = moment->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid2(1, merge_rows.size()); hipLaunchKernelGGL(( SparseAdagradFunctorKernel< T, 256>), dim3(grid2), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr, param_data, moment_data, grad_width, epsilon); } }; template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>; template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
d9609146ed1065308d5d6fc7a45d4648de4992f6.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/operators/optimizers/adagrad_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace { template <typename T, int block_size> __global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows, T* grad_merge, const int64_t* grad_merge_rows, size_t grad_merge_rows_size, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; __shared__ size_t grad_merge_idx; if (tid == 0) { for (size_t i = 0; i < grad_merge_rows_size; i++) { if (grad_rows[ty] == grad_merge_rows[i]) { grad_merge_idx = i; } } } __syncthreads(); grad += ty * row_numel; grad_merge += grad_merge_idx * row_numel; for (int index = tid; index < row_numel; index += block_size) { paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]); } } template <typename T, int block_size> __global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows, const T* learning_rate, T* param, T* moment, int64_t row_numel, T epsilon) { const int ty = blockIdx.y; int tid = threadIdx.x; grad += ty * row_numel; param += rows[ty] * row_numel; moment += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd(param + index, -1.0 * learning_rate[0] * grad[index] / (sqrt(moment[index]) + epsilon)); } } } // namespace template <typename T> struct SparseAdagradFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& grad, const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param) { // 1. g_m.rows = set(g.rows) auto grad_width = grad.value().dims()[1]; math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func; auto grad_merge = merge_func(context, grad); auto* grad_merge_data = grad_merge.mutable_value()->template data<T>(); framework::Vector<int64_t> merge_rows(grad_merge.rows()); // 2. m += g_m * g_m auto grad_square = SquareSelectedRows<platform::CUDADeviceContext, T>(context, grad_merge); math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor; functor(context, grad_square, moment); // 3. update parameter auto* lr = learning_rate.data<T>(); auto* param_data = param->data<T>(); auto* moment_data = moment->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid2(1, merge_rows.size()); SparseAdagradFunctorKernel< T, 256><<<grid2, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>( grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr, param_data, moment_data, grad_width, epsilon); } }; template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>; template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
da5c56485822c566a807178c38b456f18481edf3.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <hip/hip_runtime_api.h> #include <Array.hpp> #include <copy.hpp> #include <kernel/memcopy.hpp> #include <err_cuda.hpp> #include <math.hpp> #include <common/complex.hpp> using common::is_complex; namespace cuda { template<typename T> void copyData(T *dst, const Array<T> &src) { // FIXME: Merge this with copyArray src.eval(); Array<T> out = src; const T *ptr = NULL; if (src.isLinear() || // No offsets, No strides src.ndims() == 1 // Simple offset, no strides. ) { //A.get() gets data with offsets ptr = src.get(); } else { //FIXME: Think about implementing eval out = copyArray(src); ptr = out.get(); } auto stream = cuda::getActiveStream(); CUDA_CHECK(hipMemcpyAsync(dst, ptr, src.elements() * sizeof(T), hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); return; } template<typename T> Array<T> copyArray(const Array<T> &src) { Array<T> out = createEmptyArray<T>(src.dims()); if (src.isLinear()) { CUDA_CHECK(hipMemcpyAsync(out.get(), src.get(), src.elements() * sizeof(T), hipMemcpyDeviceToDevice, cuda::getActiveStream())); } else { // FIXME: Seems to fail when using Param<T> kernel::memcopy(out.get(), out.strides().get(), src.get(), src.dims().get(), src.strides().get(), (uint)src.ndims()); } return out; } template<typename inType, typename outType> Array<outType> padArray(Array<inType> const &in, dim4 const &dims, outType default_value, double factor) { ARG_ASSERT(1, (in.ndims() == (size_t)dims.ndims())); Array<outType> ret = createEmptyArray<outType>(dims); kernel::copy<inType, outType>(ret, in, in.ndims(), default_value, factor); return ret; } template<typename T> void multiply_inplace(Array<T> &in, double val) { kernel::copy<T, T>(in, in, in.ndims(), scalar<T>(0), val); } template<typename inType, typename outType> struct copyWrapper { void operator()(Array<outType> &out, Array<inType> const &in) { kernel::copy<inType, outType>(out, in, in.ndims(), scalar<outType>(0), 1); } }; template<typename T> struct copyWrapper<T, T> { void operator()(Array<T> &out, Array<T> const &in) { if (out.isLinear() && in.isLinear() && out.elements() == in.elements()) { CUDA_CHECK(hipMemcpyAsync(out.get(), in.get(), in.elements() * sizeof(T), hipMemcpyDeviceToDevice, cuda::getActiveStream())); } else { kernel::copy<T, T>(out, in, in.ndims(), scalar<T>(0), 1); } } }; template<typename inType, typename outType> void copyArray(Array<outType> &out, Array<inType> const &in) { static_assert(!(is_complex<inType>::value && !is_complex<outType>::value), "Cannot copy from complex value to a non complex value"); ARG_ASSERT(1, (in.ndims() == (size_t)out.dims().ndims())); copyWrapper<inType, outType> copyFn; copyFn(out, in); } #define INSTANTIATE(T) \ template void copyData<T> (T *dst, const Array<T> &src); \ template Array<T> copyArray<T>(const Array<T> &src); \ template void multiply_inplace<T> (Array<T> &in, double norm); \ INSTANTIATE(float ) INSTANTIATE(double ) INSTANTIATE(cfloat ) INSTANTIATE(cdouble) INSTANTIATE(int ) INSTANTIATE(uint ) INSTANTIATE(uchar ) INSTANTIATE(char ) INSTANTIATE(intl ) INSTANTIATE(uintl ) INSTANTIATE(short ) INSTANTIATE(ushort ) #define INSTANTIATE_PAD_ARRAY(SRC_T) \ template Array<float > padArray<SRC_T, float >(Array<SRC_T> const &src, dim4 const &dims, float default_value, double factor); \ template Array<double > padArray<SRC_T, double >(Array<SRC_T> const &src, dim4 const &dims, double default_value, double factor); \ template Array<cfloat > padArray<SRC_T, cfloat >(Array<SRC_T> const &src, dim4 const &dims, cfloat default_value, double factor); \ template Array<cdouble> padArray<SRC_T, cdouble>(Array<SRC_T> const &src, dim4 const &dims, cdouble default_value, double factor); \ template Array<int > padArray<SRC_T, int >(Array<SRC_T> const &src, dim4 const &dims, int default_value, double factor); \ template Array<uint > padArray<SRC_T, uint >(Array<SRC_T> const &src, dim4 const &dims, uint default_value, double factor); \ template Array<intl > padArray<SRC_T, intl >(Array<SRC_T> const &src, dim4 const &dims, intl default_value, double factor); \ template Array<uintl > padArray<SRC_T, uintl >(Array<SRC_T> const &src, dim4 const &dims, uintl default_value, double factor); \ template Array<short > padArray<SRC_T, short >(Array<SRC_T> const &src, dim4 const &dims, short default_value, double factor); \ template Array<ushort > padArray<SRC_T, ushort >(Array<SRC_T> const &src, dim4 const &dims, ushort default_value, double factor); \ template Array<uchar > padArray<SRC_T, uchar >(Array<SRC_T> const &src, dim4 const &dims, uchar default_value, double factor); \ template Array<char > padArray<SRC_T, char >(Array<SRC_T> const &src, dim4 const &dims, char default_value, double factor); \ template void copyArray<SRC_T, float >(Array<float > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, double >(Array<double > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, cfloat >(Array<cfloat > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, cdouble>(Array<cdouble> &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, int >(Array<int > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, uint >(Array<uint > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, intl >(Array<intl > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, uintl >(Array<uintl > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, short >(Array<short > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, ushort >(Array<ushort > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, uchar >(Array<uchar > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, char >(Array<char > &dst, Array<SRC_T> const &src); INSTANTIATE_PAD_ARRAY(float ) INSTANTIATE_PAD_ARRAY(double) INSTANTIATE_PAD_ARRAY(int ) INSTANTIATE_PAD_ARRAY(uint ) INSTANTIATE_PAD_ARRAY(intl ) INSTANTIATE_PAD_ARRAY(uintl ) INSTANTIATE_PAD_ARRAY(short ) INSTANTIATE_PAD_ARRAY(ushort) INSTANTIATE_PAD_ARRAY(uchar ) INSTANTIATE_PAD_ARRAY(char ) #define INSTANTIATE_PAD_ARRAY_COMPLEX(SRC_T) \ template Array<cfloat > padArray<SRC_T, cfloat >(Array<SRC_T> const &src, dim4 const &dims, cfloat default_value, double factor); \ template Array<cdouble> padArray<SRC_T, cdouble>(Array<SRC_T> const &src, dim4 const &dims, cdouble default_value, double factor); \ template void copyArray<SRC_T, cfloat >(Array<cfloat > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, cdouble >(Array<cdouble > &dst, Array<SRC_T> const &src); INSTANTIATE_PAD_ARRAY_COMPLEX(cfloat ) INSTANTIATE_PAD_ARRAY_COMPLEX(cdouble) template<typename T> T getScalar(const Array<T> &in) { T retVal; CUDA_CHECK(hipMemcpyAsync(&retVal, in.get(), sizeof(T), hipMemcpyDeviceToHost, cuda::getActiveStream())); CUDA_CHECK(hipStreamSynchronize(cuda::getActiveStream())); return retVal; } #define INSTANTIATE_GETSCALAR(T) \ template T getScalar(const Array<T> &in); INSTANTIATE_GETSCALAR(float ) INSTANTIATE_GETSCALAR(double ) INSTANTIATE_GETSCALAR(cfloat ) INSTANTIATE_GETSCALAR(cdouble) INSTANTIATE_GETSCALAR(int ) INSTANTIATE_GETSCALAR(uint ) INSTANTIATE_GETSCALAR(uchar ) INSTANTIATE_GETSCALAR(char ) INSTANTIATE_GETSCALAR(intl ) INSTANTIATE_GETSCALAR(uintl ) INSTANTIATE_GETSCALAR(short ) INSTANTIATE_GETSCALAR(ushort ) }
da5c56485822c566a807178c38b456f18481edf3.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <cuda_runtime_api.h> #include <Array.hpp> #include <copy.hpp> #include <kernel/memcopy.hpp> #include <err_cuda.hpp> #include <math.hpp> #include <common/complex.hpp> using common::is_complex; namespace cuda { template<typename T> void copyData(T *dst, const Array<T> &src) { // FIXME: Merge this with copyArray src.eval(); Array<T> out = src; const T *ptr = NULL; if (src.isLinear() || // No offsets, No strides src.ndims() == 1 // Simple offset, no strides. ) { //A.get() gets data with offsets ptr = src.get(); } else { //FIXME: Think about implementing eval out = copyArray(src); ptr = out.get(); } auto stream = cuda::getActiveStream(); CUDA_CHECK(cudaMemcpyAsync(dst, ptr, src.elements() * sizeof(T), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); return; } template<typename T> Array<T> copyArray(const Array<T> &src) { Array<T> out = createEmptyArray<T>(src.dims()); if (src.isLinear()) { CUDA_CHECK(cudaMemcpyAsync(out.get(), src.get(), src.elements() * sizeof(T), cudaMemcpyDeviceToDevice, cuda::getActiveStream())); } else { // FIXME: Seems to fail when using Param<T> kernel::memcopy(out.get(), out.strides().get(), src.get(), src.dims().get(), src.strides().get(), (uint)src.ndims()); } return out; } template<typename inType, typename outType> Array<outType> padArray(Array<inType> const &in, dim4 const &dims, outType default_value, double factor) { ARG_ASSERT(1, (in.ndims() == (size_t)dims.ndims())); Array<outType> ret = createEmptyArray<outType>(dims); kernel::copy<inType, outType>(ret, in, in.ndims(), default_value, factor); return ret; } template<typename T> void multiply_inplace(Array<T> &in, double val) { kernel::copy<T, T>(in, in, in.ndims(), scalar<T>(0), val); } template<typename inType, typename outType> struct copyWrapper { void operator()(Array<outType> &out, Array<inType> const &in) { kernel::copy<inType, outType>(out, in, in.ndims(), scalar<outType>(0), 1); } }; template<typename T> struct copyWrapper<T, T> { void operator()(Array<T> &out, Array<T> const &in) { if (out.isLinear() && in.isLinear() && out.elements() == in.elements()) { CUDA_CHECK(cudaMemcpyAsync(out.get(), in.get(), in.elements() * sizeof(T), cudaMemcpyDeviceToDevice, cuda::getActiveStream())); } else { kernel::copy<T, T>(out, in, in.ndims(), scalar<T>(0), 1); } } }; template<typename inType, typename outType> void copyArray(Array<outType> &out, Array<inType> const &in) { static_assert(!(is_complex<inType>::value && !is_complex<outType>::value), "Cannot copy from complex value to a non complex value"); ARG_ASSERT(1, (in.ndims() == (size_t)out.dims().ndims())); copyWrapper<inType, outType> copyFn; copyFn(out, in); } #define INSTANTIATE(T) \ template void copyData<T> (T *dst, const Array<T> &src); \ template Array<T> copyArray<T>(const Array<T> &src); \ template void multiply_inplace<T> (Array<T> &in, double norm); \ INSTANTIATE(float ) INSTANTIATE(double ) INSTANTIATE(cfloat ) INSTANTIATE(cdouble) INSTANTIATE(int ) INSTANTIATE(uint ) INSTANTIATE(uchar ) INSTANTIATE(char ) INSTANTIATE(intl ) INSTANTIATE(uintl ) INSTANTIATE(short ) INSTANTIATE(ushort ) #define INSTANTIATE_PAD_ARRAY(SRC_T) \ template Array<float > padArray<SRC_T, float >(Array<SRC_T> const &src, dim4 const &dims, float default_value, double factor); \ template Array<double > padArray<SRC_T, double >(Array<SRC_T> const &src, dim4 const &dims, double default_value, double factor); \ template Array<cfloat > padArray<SRC_T, cfloat >(Array<SRC_T> const &src, dim4 const &dims, cfloat default_value, double factor); \ template Array<cdouble> padArray<SRC_T, cdouble>(Array<SRC_T> const &src, dim4 const &dims, cdouble default_value, double factor); \ template Array<int > padArray<SRC_T, int >(Array<SRC_T> const &src, dim4 const &dims, int default_value, double factor); \ template Array<uint > padArray<SRC_T, uint >(Array<SRC_T> const &src, dim4 const &dims, uint default_value, double factor); \ template Array<intl > padArray<SRC_T, intl >(Array<SRC_T> const &src, dim4 const &dims, intl default_value, double factor); \ template Array<uintl > padArray<SRC_T, uintl >(Array<SRC_T> const &src, dim4 const &dims, uintl default_value, double factor); \ template Array<short > padArray<SRC_T, short >(Array<SRC_T> const &src, dim4 const &dims, short default_value, double factor); \ template Array<ushort > padArray<SRC_T, ushort >(Array<SRC_T> const &src, dim4 const &dims, ushort default_value, double factor); \ template Array<uchar > padArray<SRC_T, uchar >(Array<SRC_T> const &src, dim4 const &dims, uchar default_value, double factor); \ template Array<char > padArray<SRC_T, char >(Array<SRC_T> const &src, dim4 const &dims, char default_value, double factor); \ template void copyArray<SRC_T, float >(Array<float > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, double >(Array<double > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, cfloat >(Array<cfloat > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, cdouble>(Array<cdouble> &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, int >(Array<int > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, uint >(Array<uint > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, intl >(Array<intl > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, uintl >(Array<uintl > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, short >(Array<short > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, ushort >(Array<ushort > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, uchar >(Array<uchar > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, char >(Array<char > &dst, Array<SRC_T> const &src); INSTANTIATE_PAD_ARRAY(float ) INSTANTIATE_PAD_ARRAY(double) INSTANTIATE_PAD_ARRAY(int ) INSTANTIATE_PAD_ARRAY(uint ) INSTANTIATE_PAD_ARRAY(intl ) INSTANTIATE_PAD_ARRAY(uintl ) INSTANTIATE_PAD_ARRAY(short ) INSTANTIATE_PAD_ARRAY(ushort) INSTANTIATE_PAD_ARRAY(uchar ) INSTANTIATE_PAD_ARRAY(char ) #define INSTANTIATE_PAD_ARRAY_COMPLEX(SRC_T) \ template Array<cfloat > padArray<SRC_T, cfloat >(Array<SRC_T> const &src, dim4 const &dims, cfloat default_value, double factor); \ template Array<cdouble> padArray<SRC_T, cdouble>(Array<SRC_T> const &src, dim4 const &dims, cdouble default_value, double factor); \ template void copyArray<SRC_T, cfloat >(Array<cfloat > &dst, Array<SRC_T> const &src); \ template void copyArray<SRC_T, cdouble >(Array<cdouble > &dst, Array<SRC_T> const &src); INSTANTIATE_PAD_ARRAY_COMPLEX(cfloat ) INSTANTIATE_PAD_ARRAY_COMPLEX(cdouble) template<typename T> T getScalar(const Array<T> &in) { T retVal; CUDA_CHECK(cudaMemcpyAsync(&retVal, in.get(), sizeof(T), cudaMemcpyDeviceToHost, cuda::getActiveStream())); CUDA_CHECK(cudaStreamSynchronize(cuda::getActiveStream())); return retVal; } #define INSTANTIATE_GETSCALAR(T) \ template T getScalar(const Array<T> &in); INSTANTIATE_GETSCALAR(float ) INSTANTIATE_GETSCALAR(double ) INSTANTIATE_GETSCALAR(cfloat ) INSTANTIATE_GETSCALAR(cdouble) INSTANTIATE_GETSCALAR(int ) INSTANTIATE_GETSCALAR(uint ) INSTANTIATE_GETSCALAR(uchar ) INSTANTIATE_GETSCALAR(char ) INSTANTIATE_GETSCALAR(intl ) INSTANTIATE_GETSCALAR(uintl ) INSTANTIATE_GETSCALAR(short ) INSTANTIATE_GETSCALAR(ushort ) }
11455317de0ecea357d8d729c23cade81d9f3b10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> __host__ void cpuFunction(){} __device__ void gpuFunction(){} __global__ void fd(float *u0, float *u1, float *u2, int nx) { int ix = threadIdx.x + blockIdx.x*blockDim.x; if (ix > 0 && ix < nx-1) u2[ix] = u1[ix+1]+u1[ix-1]-u0[ix]; } __global__ void update(float *u0, float *u1, float *u2) { int ix = threadIdx.x + blockIdx.x*blockDim.x; u0[ix] = u1[ix]; u1[ix] = u2[ix]; } int main() { int ix, it, nx = 100, nt = 100; size_t size = nx*sizeof(float); float xmax = 1.0, dx = xmax/(nx+1); float *hU0, *hU1; float *u0, *u1, *u2, c = 0.1, dt = dx/c, x, a = 1000; hU0 = (float*) malloc(size); hU1 = (float*) malloc(size); hipMalloc((void **)&u0, size); hipMalloc((void **)&u1, size); hipMalloc((void **)&u2, size); for (ix=0; ix<nx; ix++) { x = ix*dx; hU0[ix] = exp(-a*pow(x-0.5*xmax,2.0)); hU1[ix] = exp(-a*pow(x-0.5*xmax-c*dt,2.0)); } hipMemcpy(u0, hU0, size, hipMemcpyHostToDevice); hipMemcpy(u1, hU1, size, hipMemcpyHostToDevice); for (it=0; it<nt; it++) { hipLaunchKernelGGL(( fd), dim3(1),dim3(nx), 0, 0, u0, u1, u2, nx); hipLaunchKernelGGL(( update), dim3(1),dim3(nx), 0, 0, u0, u1, u2); } hipMemcpy(hU0, u2, size, hipMemcpyDeviceToHost); FILE *file = fopen("u.dat","w"); fwrite(hU0, sizeof(float), nx, file); fclose(file); return 0; }
11455317de0ecea357d8d729c23cade81d9f3b10.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> __host__ void cpuFunction(){} __device__ void gpuFunction(){} __global__ void fd(float *u0, float *u1, float *u2, int nx) { int ix = threadIdx.x + blockIdx.x*blockDim.x; if (ix > 0 && ix < nx-1) u2[ix] = u1[ix+1]+u1[ix-1]-u0[ix]; } __global__ void update(float *u0, float *u1, float *u2) { int ix = threadIdx.x + blockIdx.x*blockDim.x; u0[ix] = u1[ix]; u1[ix] = u2[ix]; } int main() { int ix, it, nx = 100, nt = 100; size_t size = nx*sizeof(float); float xmax = 1.0, dx = xmax/(nx+1); float *hU0, *hU1; float *u0, *u1, *u2, c = 0.1, dt = dx/c, x, a = 1000; hU0 = (float*) malloc(size); hU1 = (float*) malloc(size); cudaMalloc((void **)&u0, size); cudaMalloc((void **)&u1, size); cudaMalloc((void **)&u2, size); for (ix=0; ix<nx; ix++) { x = ix*dx; hU0[ix] = exp(-a*pow(x-0.5*xmax,2.0)); hU1[ix] = exp(-a*pow(x-0.5*xmax-c*dt,2.0)); } cudaMemcpy(u0, hU0, size, cudaMemcpyHostToDevice); cudaMemcpy(u1, hU1, size, cudaMemcpyHostToDevice); for (it=0; it<nt; it++) { fd<<<1,nx>>>(u0, u1, u2, nx); update<<<1,nx>>>(u0, u1, u2); } cudaMemcpy(hU0, u2, size, cudaMemcpyDeviceToHost); FILE *file = fopen("u.dat","w"); fwrite(hU0, sizeof(float), nx, file); fclose(file); return 0; }
2756938a369675764c99fecb6a44d5ba56ab5acd.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> #include <functional> #include <cmath> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <learning_cuda/sparse/norm/norm.cuh> template <typename T> csr_matrix_t<T>* _random_device_csr(int m, int n, float density) { csr_matrix_t<T> *csr_d; csr_d = (csr_matrix_t<T> *) malloc(sizeof(csr_matrix_t<T>)); csr_d->m = m; csr_d->n = n; thrust::host_vector<T> data_h, indices_h, indptr_h; indptr_h.push_back(0); int nnz = 0; for(int i = 0; i < m; i++) { for(int j = 0; j < n; j++) { if (( (float) std::rand() / RAND_MAX) < density) { T element = std::rand() % 25; data_h.push_back(element); indices_h.push_back(j); // std::cout << element << " "; nnz++; } else { // std::cout << 0 << " "; } } indptr_h.push_back(nnz); // std::cout << std::endl; } csr_d->nnz = nnz; csr_d->data.resize(nnz); csr_d->indices.resize(nnz); csr_d->indptr.resize(m + 1); thrust::copy(data_h.begin(), data_h.end(), csr_d->data.begin()); thrust::copy(indices_h.begin(), indices_h.end(), csr_d->indices.begin()); thrust::copy(indptr_h.begin(), indptr_h.end(), csr_d->indptr.begin()); return csr_d; } int main() { csr_matrix_t<int> *csr_d = _random_device_csr<int>(2000, 500000, 0.1); // std::cout << "CSR data: "; // thrust::copy(csr_d->data.begin(), csr_d->data.end(), std::ostream_iterator<int>(std::cout, " ")); // std::cout << std::endl; // std::cout << "CSR indices: "; // thrust::copy(csr_d-> indices.begin(), csr_d->indices.end(), std::ostream_iterator<int>(std::cout, " ")); // std::cout << std::endl; // std::cout << "CSR indptr: "; // thrust::copy(csr_d->indptr.begin(), csr_d->indptr.end(), std::ostream_iterator<int>(std::cout, " ")); // std::cout << std::endl; hipDeviceSynchronize(); naive::norm<int, 32>(*csr_d, L1Op<int>()); hipDeviceSynchronize(); warp::norm<int, 32>(*csr_d, L1Op<int>()); delete csr_d; return 0; }
2756938a369675764c99fecb6a44d5ba56ab5acd.cu
#include <iostream> #include <cstdlib> #include <functional> #include <cmath> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <learning_cuda/sparse/norm/norm.cuh> template <typename T> csr_matrix_t<T>* _random_device_csr(int m, int n, float density) { csr_matrix_t<T> *csr_d; csr_d = (csr_matrix_t<T> *) malloc(sizeof(csr_matrix_t<T>)); csr_d->m = m; csr_d->n = n; thrust::host_vector<T> data_h, indices_h, indptr_h; indptr_h.push_back(0); int nnz = 0; for(int i = 0; i < m; i++) { for(int j = 0; j < n; j++) { if (( (float) std::rand() / RAND_MAX) < density) { T element = std::rand() % 25; data_h.push_back(element); indices_h.push_back(j); // std::cout << element << " "; nnz++; } else { // std::cout << 0 << " "; } } indptr_h.push_back(nnz); // std::cout << std::endl; } csr_d->nnz = nnz; csr_d->data.resize(nnz); csr_d->indices.resize(nnz); csr_d->indptr.resize(m + 1); thrust::copy(data_h.begin(), data_h.end(), csr_d->data.begin()); thrust::copy(indices_h.begin(), indices_h.end(), csr_d->indices.begin()); thrust::copy(indptr_h.begin(), indptr_h.end(), csr_d->indptr.begin()); return csr_d; } int main() { csr_matrix_t<int> *csr_d = _random_device_csr<int>(2000, 500000, 0.1); // std::cout << "CSR data: "; // thrust::copy(csr_d->data.begin(), csr_d->data.end(), std::ostream_iterator<int>(std::cout, " ")); // std::cout << std::endl; // std::cout << "CSR indices: "; // thrust::copy(csr_d-> indices.begin(), csr_d->indices.end(), std::ostream_iterator<int>(std::cout, " ")); // std::cout << std::endl; // std::cout << "CSR indptr: "; // thrust::copy(csr_d->indptr.begin(), csr_d->indptr.end(), std::ostream_iterator<int>(std::cout, " ")); // std::cout << std::endl; cudaDeviceSynchronize(); naive::norm<int, 32>(*csr_d, L1Op<int>()); cudaDeviceSynchronize(); warp::norm<int, 32>(*csr_d, L1Op<int>()); delete csr_d; return 0; }
d020d951a86b2e0700fd9ca39a646e0452ef3aee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <stdio.h> #define TYPE1 double #define TYPE2 float #define TYPE3 float #define TYPE4 double #define SPEED 0 double fun_ref( double x){ int k, n = 5; double t1; double d1 = 1.0; t1 = x; for ( k = 1; k <= n; k++ ) { d1 = 2.0 * d1; t1 = t1+ sin(d1 * x)/d1; } return t1; } __global__ void fun_gpu(double x[], double y[], int nthreads, int speed){ //y = fun(x) //speed = % int tid = blockDim.x * blockIdx.x + threadIdx.x; if(blockIdx.x % 10< speed){ int k, n = 5; if (tid < nthreads) { double t1; float d1 = 1.0; float x_temp = x[tid]; t1 = x[tid]; for ( k = 1; k <= n; k++ ) { d1 = 2.0 * d1; double sin_res = sin(d1 * x_temp); t1 = t1 + sin_res/d1; } y[tid] = t1; } } else{ int k, n = 5; if (tid < nthreads) { double t1; double d1 = 1.0; double x_temp = x[tid]; t1 = x[tid]; for ( k = 1; k <= n; k++ ) { d1 = 2.0 * d1; double sin_res = sin(d1 * x_temp); t1 = t1 + sin_res/d1; } y[tid] = t1; } } } int main( int argc, char **argv) { int i,n = 1000000; double h, t1, t2, dppi; double s1; //cuda def hipEvent_t start, stop; float elapsedTime; int speed = atoi(argv[1]); printf("running with speed %d \n", speed); double *d_x, *d_y, *h_x, *h_y ; size_t size = n*sizeof(double); h_x = (double*) malloc(size); h_y = (double*) malloc(size); hipMalloc(&d_x, size); hipMalloc(&d_y, size); t1 = -1.0; dppi = acos(t1); s1 = 0.0; t1 = 0.0; h = dppi / n; for ( i = 1; i <= n; i++){ h_x[i-1] = i * h; } /* Copy vectors from host memory to device memory */ hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice); int threads_per_block = 256; int block_count = (n + threads_per_block - 1)/threads_per_block; hipEventCreate(&start); hipEventRecord(start,0); for (int i =0;i < 10; i ++) hipLaunchKernelGGL(( fun_gpu), dim3(block_count), dim3(threads_per_block), 0, 0, d_x, d_y, n,speed); hipDeviceSynchronize(); hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); printf("Elapsed time : %f ms\n" ,elapsedTime); hipMemcpy(h_y, d_y, size, hipMemcpyDeviceToHost); for ( i = 1; i <= n; i++) { t2 = h_y[i-1]; s1 = s1 + sqrt(h*h + (t2 - t1) * (t2 - t1)); t1 = t2; } double ref_value = 5.7957763224; printf("%.10f\n",s1); printf("abs err %.8f rel err %.8f\n", fabs(s1-ref_value), fabs((s1-ref_value)/ref_value) ); return 0; }
d020d951a86b2e0700fd9ca39a646e0452ef3aee.cu
#include <iostream> #include <math.h> #include <stdio.h> #define TYPE1 double #define TYPE2 float #define TYPE3 float #define TYPE4 double #define SPEED 0 double fun_ref( double x){ int k, n = 5; double t1; double d1 = 1.0; t1 = x; for ( k = 1; k <= n; k++ ) { d1 = 2.0 * d1; t1 = t1+ sin(d1 * x)/d1; } return t1; } __global__ void fun_gpu(double x[], double y[], int nthreads, int speed){ //y = fun(x) //speed = % int tid = blockDim.x * blockIdx.x + threadIdx.x; if(blockIdx.x % 10< speed){ int k, n = 5; if (tid < nthreads) { double t1; float d1 = 1.0; float x_temp = x[tid]; t1 = x[tid]; for ( k = 1; k <= n; k++ ) { d1 = 2.0 * d1; double sin_res = sin(d1 * x_temp); t1 = t1 + sin_res/d1; } y[tid] = t1; } } else{ int k, n = 5; if (tid < nthreads) { double t1; double d1 = 1.0; double x_temp = x[tid]; t1 = x[tid]; for ( k = 1; k <= n; k++ ) { d1 = 2.0 * d1; double sin_res = sin(d1 * x_temp); t1 = t1 + sin_res/d1; } y[tid] = t1; } } } int main( int argc, char **argv) { int i,n = 1000000; double h, t1, t2, dppi; double s1; //cuda def cudaEvent_t start, stop; float elapsedTime; int speed = atoi(argv[1]); printf("running with speed %d \n", speed); double *d_x, *d_y, *h_x, *h_y ; size_t size = n*sizeof(double); h_x = (double*) malloc(size); h_y = (double*) malloc(size); cudaMalloc(&d_x, size); cudaMalloc(&d_y, size); t1 = -1.0; dppi = acos(t1); s1 = 0.0; t1 = 0.0; h = dppi / n; for ( i = 1; i <= n; i++){ h_x[i-1] = i * h; } /* Copy vectors from host memory to device memory */ cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice); int threads_per_block = 256; int block_count = (n + threads_per_block - 1)/threads_per_block; cudaEventCreate(&start); cudaEventRecord(start,0); for (int i =0;i < 10; i ++) fun_gpu<<<block_count, threads_per_block>>>(d_x, d_y, n,speed); cudaDeviceSynchronize(); cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); printf("Elapsed time : %f ms\n" ,elapsedTime); cudaMemcpy(h_y, d_y, size, cudaMemcpyDeviceToHost); for ( i = 1; i <= n; i++) { t2 = h_y[i-1]; s1 = s1 + sqrt(h*h + (t2 - t1) * (t2 - t1)); t1 = t2; } double ref_value = 5.7957763224; printf("%.10f\n",s1); printf("abs err %.8f rel err %.8f\n", fabs(s1-ref_value), fabs((s1-ref_value)/ref_value) ); return 0; }
a8f1474051a797a373222ef34c69098b6d3a09a4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernelGetOmega.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int N = 1; double *omega = NULL; hipMalloc(&omega, XSIZE*YSIZE); double *kSqr = NULL; hipMalloc(&kSqr, XSIZE*YSIZE); const double sigma2 = 1; const double sigma4 = 1; const double lambda = 1; const double g = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernelGetOmega), dim3(gridBlock),dim3(threadBlock), 0, 0, N,omega,kSqr,sigma2,sigma4,lambda,g); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernelGetOmega), dim3(gridBlock),dim3(threadBlock), 0, 0, N,omega,kSqr,sigma2,sigma4,lambda,g); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernelGetOmega), dim3(gridBlock),dim3(threadBlock), 0, 0, N,omega,kSqr,sigma2,sigma4,lambda,g); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a8f1474051a797a373222ef34c69098b6d3a09a4.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernelGetOmega.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int N = 1; double *omega = NULL; cudaMalloc(&omega, XSIZE*YSIZE); double *kSqr = NULL; cudaMalloc(&kSqr, XSIZE*YSIZE); const double sigma2 = 1; const double sigma4 = 1; const double lambda = 1; const double g = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernelGetOmega<<<gridBlock,threadBlock>>>(N,omega,kSqr,sigma2,sigma4,lambda,g); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernelGetOmega<<<gridBlock,threadBlock>>>(N,omega,kSqr,sigma2,sigma4,lambda,g); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernelGetOmega<<<gridBlock,threadBlock>>>(N,omega,kSqr,sigma2,sigma4,lambda,g); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6e811920b4c61e4bba50083baa72449736c3b84c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdbool.h> #include <sys/time.h> #include <iostream> #include <stdio.h> #define LEVEL 100 #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> __device__ void printSolution(int *color , int *graph, int V) { printf("Solution Exists:" " Following are the assigned colors \n"); for (int i = 0; i < V; i++) printf(" %d ", color[i]); printf("\n"); } /* Function to check if the color can be safely assigned */ __device__ bool isSafe (int v, int *graph, int *color, int c, int V) { for (int i = 0; i < V; i++) if (graph[v*V + i] == 1 && c == color[i]) return false; return true; } // __device__ // void graphColoringUtil(int graph[][100], int m, int color[], int v) { // if(found==false) { // if (v == V) { // printSolution(color,graph); // return; // } // for (int c = 1; c <= m; c++) { // /* Check if assignment of color c to v is fine*/ // color[v] = c; // if (isSafe(v, graph, color, c)) { // graphColoringUtil (graph, m, color, v+1); // } // } // return; // } // } __global__ void graphColoringUtilParallel(int *graph, int *m, int *color, int v, bool *found, int *V, int *temp, bool *flag, hiprandState_t state, unsigned int seed) { if (*flag) { // hiprandState_t state; /* we have to initialize the state */ hiprand_init(seed, /* the seed controls the sequence of random values that are produced */ 0, /* the sequence number is only important with multiple cores */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &state); *flag = false; } if(*found==false) { // for (int i = 1; i <= *m; ++i) { // while (1) { if (v == *V) { printSolution(color,graph, *V); *found = true; return; } color[v] = hiprand(&state)%(*m) + 1; // color[v] = (*temp)%(*m) + 1; if (isSafe(v, graph, color, color[v], *V)) { if (v < LEVEL) { int *tempColors = new int[100]; // hipMallocManaged(&color, (*V)*sizeof(int)); for (int j = 0; j <= v; ++j) { tempColors[j] = color[j]; } hipLaunchKernelGGL(( graphColoringUtilParallel), dim3(1),dim3(4), 0, 0, graph, m, tempColors, v+1, found, V, temp, flag, state, seed); // hipDeviceSynchronize(); delete [] tempColors; // #pragma omp task firstprivate(v) // { // int id = omp_get_thread_num(); // printf("Thread assigned %d\n",id ); // graphColoringUtilParallel(graph, m, tempColors, v+1); // generate task of serial function // graphColoringUtilParallel<<<1,8>>>(graph, m, tempColors, *v+1, found, V); // } } // else{ // #pragma omp taskwait // graphColoringUtil(graph, m, color, v+1); // } } // } for (int i = 0; i < *V; i++) printf("thread id %d %d ", threadIdx.x, color[i] ); printf("\n"); return; } } void graphColoring(int *graph, int *m, int *V, bool *found) { // Initialize all color values as 0. int *color, *temp;//, *start; // = new int[V]; hipMallocManaged(&color, (*V)*sizeof(int)); hipMallocManaged(&temp, sizeof(int)); // hipMallocManaged(&start, sizeof(int)); // *start = 0; for (int i = 0; i < *V; i++) color[i] = 0; // #pragma omp parallel shared(found) // { // #pragma omp single // { // graphColoringUtilParallel(graph, m, color, 0 ); // } // } bool *flag; hipMallocManaged(&flag, sizeof(bool)); *flag = true; hiprandState_t state; hipLaunchKernelGGL(( graphColoringUtilParallel), dim3(1),dim3(1), 0, 0, graph, m, color, 0, found, V, temp, flag, state, time(NULL)); hipDeviceSynchronize(); hipFree(color); hipFree(temp); } int main() { srand(time(NULL)); int _vertices, _colors; std::cout << "Enter number of vertices: "; std::cin >> _vertices; std::cout << "Enter number of colours: "; std::cin >> _colors; struct timeval TimeValue_Start; struct timezone TimeZone_Start; struct timeval TimeValue_Final; struct timezone TimeZone_Final; long time_start, time_end; double time_overhead; // Number of vertices, colors int *V, *m, *graph; bool *found; hipMallocManaged(&V, sizeof(int)); hipMallocManaged(&m, sizeof(int)); hipMallocManaged(&graph, ((_vertices*2) + 1)*sizeof(int)); hipMallocManaged(&found, sizeof(bool)); *V = _vertices; *m = _colors; *found = false; /* Example Graph (3)---(2) | / | | / | | / | (0)---(1) {{0, 1, 1, 1}, {1, 0, 1, 0}, {1, 1, 0, 1}, {1, 0, 1, 0}, }; */ for(int i=0;i<_vertices;i++) { for (int j=0;j<_vertices;j++) { if(i==j) graph[i*_vertices + j]=0; else { graph[i*_vertices + j] = rand()%2; graph[j*_vertices + i] = graph[i*_vertices + j]; } } } printf("Adjacency Matrix\n"); for(int i=0;i<_vertices;i++) { for (int j=0;j<_vertices;j++) printf("%d ", graph[i*_vertices + j]); printf("\n"); } gettimeofday(&TimeValue_Start, &TimeZone_Start); graphColoring (graph, m, V, found); if(*found==false) printf("No solution exists\n"); gettimeofday(&TimeValue_Final, &TimeZone_Final); time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead = (time_end - time_start)/1000000.0; printf("\n Time in Seconds (T) : %lf",time_overhead); hipFree(V); hipFree(m); hipFree(found); hipFree(graph); return 0; }
6e811920b4c61e4bba50083baa72449736c3b84c.cu
#include <stdlib.h> #include <stdbool.h> #include <sys/time.h> #include <iostream> #include <stdio.h> #define LEVEL 100 #include <curand.h> #include <curand_kernel.h> __device__ void printSolution(int *color , int *graph, int V) { printf("Solution Exists:" " Following are the assigned colors \n"); for (int i = 0; i < V; i++) printf(" %d ", color[i]); printf("\n"); } /* Function to check if the color can be safely assigned */ __device__ bool isSafe (int v, int *graph, int *color, int c, int V) { for (int i = 0; i < V; i++) if (graph[v*V + i] == 1 && c == color[i]) return false; return true; } // __device__ // void graphColoringUtil(int graph[][100], int m, int color[], int v) { // if(found==false) { // if (v == V) { // printSolution(color,graph); // return; // } // for (int c = 1; c <= m; c++) { // /* Check if assignment of color c to v is fine*/ // color[v] = c; // if (isSafe(v, graph, color, c)) { // graphColoringUtil (graph, m, color, v+1); // } // } // return; // } // } __global__ void graphColoringUtilParallel(int *graph, int *m, int *color, int v, bool *found, int *V, int *temp, bool *flag, curandState_t state, unsigned int seed) { if (*flag) { // curandState_t state; /* we have to initialize the state */ curand_init(seed, /* the seed controls the sequence of random values that are produced */ 0, /* the sequence number is only important with multiple cores */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &state); *flag = false; } if(*found==false) { // for (int i = 1; i <= *m; ++i) { // while (1) { if (v == *V) { printSolution(color,graph, *V); *found = true; return; } color[v] = curand(&state)%(*m) + 1; // color[v] = (*temp)%(*m) + 1; if (isSafe(v, graph, color, color[v], *V)) { if (v < LEVEL) { int *tempColors = new int[100]; // cudaMallocManaged(&color, (*V)*sizeof(int)); for (int j = 0; j <= v; ++j) { tempColors[j] = color[j]; } graphColoringUtilParallel<<<1,4>>>(graph, m, tempColors, v+1, found, V, temp, flag, state, seed); // cudaDeviceSynchronize(); delete [] tempColors; // #pragma omp task firstprivate(v) // { // int id = omp_get_thread_num(); // printf("Thread assigned %d\n",id ); // graphColoringUtilParallel(graph, m, tempColors, v+1); // generate task of serial function // graphColoringUtilParallel<<<1,8>>>(graph, m, tempColors, *v+1, found, V); // } } // else{ // #pragma omp taskwait // graphColoringUtil(graph, m, color, v+1); // } } // } for (int i = 0; i < *V; i++) printf("thread id %d %d ", threadIdx.x, color[i] ); printf("\n"); return; } } void graphColoring(int *graph, int *m, int *V, bool *found) { // Initialize all color values as 0. int *color, *temp;//, *start; // = new int[V]; cudaMallocManaged(&color, (*V)*sizeof(int)); cudaMallocManaged(&temp, sizeof(int)); // cudaMallocManaged(&start, sizeof(int)); // *start = 0; for (int i = 0; i < *V; i++) color[i] = 0; // #pragma omp parallel shared(found) // { // #pragma omp single // { // graphColoringUtilParallel(graph, m, color, 0 ); // } // } bool *flag; cudaMallocManaged(&flag, sizeof(bool)); *flag = true; curandState_t state; graphColoringUtilParallel<<<1,1>>>(graph, m, color, 0, found, V, temp, flag, state, time(NULL)); cudaDeviceSynchronize(); cudaFree(color); cudaFree(temp); } int main() { srand(time(NULL)); int _vertices, _colors; std::cout << "Enter number of vertices: "; std::cin >> _vertices; std::cout << "Enter number of colours: "; std::cin >> _colors; struct timeval TimeValue_Start; struct timezone TimeZone_Start; struct timeval TimeValue_Final; struct timezone TimeZone_Final; long time_start, time_end; double time_overhead; // Number of vertices, colors int *V, *m, *graph; bool *found; cudaMallocManaged(&V, sizeof(int)); cudaMallocManaged(&m, sizeof(int)); cudaMallocManaged(&graph, ((_vertices*2) + 1)*sizeof(int)); cudaMallocManaged(&found, sizeof(bool)); *V = _vertices; *m = _colors; *found = false; /* Example Graph (3)---(2) | / | | / | | / | (0)---(1) {{0, 1, 1, 1}, {1, 0, 1, 0}, {1, 1, 0, 1}, {1, 0, 1, 0}, }; */ for(int i=0;i<_vertices;i++) { for (int j=0;j<_vertices;j++) { if(i==j) graph[i*_vertices + j]=0; else { graph[i*_vertices + j] = rand()%2; graph[j*_vertices + i] = graph[i*_vertices + j]; } } } printf("Adjacency Matrix\n"); for(int i=0;i<_vertices;i++) { for (int j=0;j<_vertices;j++) printf("%d ", graph[i*_vertices + j]); printf("\n"); } gettimeofday(&TimeValue_Start, &TimeZone_Start); graphColoring (graph, m, V, found); if(*found==false) printf("No solution exists\n"); gettimeofday(&TimeValue_Final, &TimeZone_Final); time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead = (time_end - time_start)/1000000.0; printf("\n Time in Seconds (T) : %lf",time_overhead); cudaFree(V); cudaFree(m); cudaFree(found); cudaFree(graph); return 0; }
66a9a5fa0ec1d66b09ff7b5c8d959215e9a58a47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// stuff happening // nvall -o share mdCudaShared.cu -g -G -lrt -lm #include <stdio.h> #include <math.h> #include <time.h> #include <stdlib.h> #include <errno.h> #include <string.h> #define LINUX 1 // is this on a linux machine?? #define NEAREST 0 // Are we going to use nearest algorithm #define OPT 0 // N^2 or optimized code?? #define EPS 1 #define SIG 1e-2 #define CUT 2.5 #define RCUT (CUT*SIG) #define CUT2 CUT*CUT #define PI 3.14159265 #define DT 0.001 // 0.001 second time increments definitely want to change this #define N_BODY_NUM 5000 #define XMAX (BOX_SIZE/2.0) #define XMIN -(BOX_SIZE/2.0) #define YMAX (BOX_SIZE/2.0) #define YMIN -(BOX_SIZE/2.0) #define T0 1 #define MAX_TRIALS 100 #define ITERS 100 #define BOX_SIZE 10.0 #define GRID_NUM ((BOX_SIZE)/(RCUT)) #define SPAN_WIDTH 256 #define BLOCK_LENGTH(GRID_NUM,BOX_SIZE) (BOX_SIZE/GRID_NUM) // size of block that contains GRID_BLOCK_NUM #define EST_NUM(GRID_NUM,N_BODY_NUM) (N_BODY_NUM/(GRID_NUM*GRID_NUM)) typedef struct sim_param_t { int npart; float dt; float eps_lj; float sig_lj; }params; typedef struct molecule_t { float* x; float* v; float* a; float* F; }mols; __device__ void compute_forces_naive(float x1, int k, float* x, float* F); __device__ void box_reflect(int k, float* x, float* v, float* a); __device__ void reflect(float wall, float* x, float* v, float* a); __device__ void verletInt2(int k, float dt, float* x, float* v, float* a); __device__ float compute_LJ_Scalar(float r2, float eps, float sig2); __device__ void verletInt1(int k, float dt, float* x, float* v, float* a); int init_particles(int n, float* x, float* v, params* param); void init_particles_va(int n, float* v,float* a, params* param); // Just a prototype and declaration struct timespec diff(struct timespec start, struct timespec end); void cudaErrorCheck(hipError_t err); // shared memory kernel __global__ void kernel_VanDerWaals(float* x, float* v, float* a, float* F, int particles) { __shared__ float s_x[SPAN_WIDTH]; __shared__ float s_v[SPAN_WIDTH]; __shared__ float s_a[SPAN_WIDTH]; __shared__ float s_F[SPAN_WIDTH]; int i,k,r,tile; const int gtid = blockIdx.x * blockDim.x + threadIdx.x; float myX = x[gtid]; float dt = 0.0001; for(r=0;r<ITERS;r++){ for(tile=0,i=0; i < 2*particles; i+=blockDim.x, tile++) { int idx = tile * blockDim.x + threadIdx.x; s_x[threadIdx.x] = x[idx]; s_v[threadIdx.x] = v[idx]; s_a[threadIdx.x] = a[idx]; s_F[threadIdx.x] = F[idx]; __syncthreads(); for(k = 0; k < blockDim.x; k++) { if(i!=k) { verletInt1(k, dt, s_x, s_v, s_a); box_reflect(k, x, v, a); compute_forces_naive(myX, k, s_x, s_F); verletInt2(k, dt, x, s_v, s_a); } } x[idx] = s_x[threadIdx.x]; v[idx] = s_v[threadIdx.x]; a[idx] = s_a[threadIdx.x]; __syncthreads(); } } } int main(int argc, char **argv){ int nsize = N_BODY_NUM; if(argc==2) { nsize = atoi(argv[1]); } struct timespec time1,time2; struct timespec time_stamp; hipError_t err = hipSuccess; // start timing of entire program clock_gettime(CLOCK_REALTIME, &time1); // Timing related variables float elapsed_gpu[2]; // global information params param; param.npart = nsize; param.dt = DT; param.eps_lj = EPS; param.sig_lj = SIG; // declare size in bytes size_t size = 2 * (param.npart) * sizeof(float); // Arrays on GPU global memory mols d_mol; // Arrays on the host memory mols h_mol; // Allocate arrays on host memory h_mol.x = (float *) malloc(size); h_mol.v = (float *) malloc(size); h_mol.a = (float *) malloc(size); h_mol.F = (float *) malloc(size); hipEvent_t start1,stop1; err=hipEventCreate(&start1); cudaErrorCheck(err); err = hipDeviceSynchronize(); cudaErrorCheck(err); err = hipEventRecord(start1,0); cudaErrorCheck(err); printf("About to hipMalloc\n"); err = hipMalloc((void**) &d_mol.x, size); cudaErrorCheck(err); err = hipMalloc((void**) &d_mol.v, size); cudaErrorCheck(err); err = hipMalloc((void**) &d_mol.a, size); cudaErrorCheck(err); err = hipMalloc((void**) &d_mol.F, size); cudaErrorCheck(err); printf("Finished the hipMalloc\n"); // Initialize the host arrays printf("\nInitializing the Particles ..."); param.npart = init_particles(param.npart, h_mol.x, h_mol.v, &param); init_particles_va(param.npart, h_mol.v, h_mol.a, &param); printf("\t... done\n\n"); // Transfer the arrays to the GPU memory printf("About to hipMemcpy\n"); err = hipMemcpy(d_mol.x, h_mol.x, size , hipMemcpyHostToDevice); cudaErrorCheck(err); err = hipMemcpy(d_mol.v, h_mol.v, size , hipMemcpyHostToDevice); cudaErrorCheck(err); err = hipMemcpy(d_mol.a, h_mol.a, size , hipMemcpyHostToDevice); cudaErrorCheck(err); err = hipMemcpy(d_mol.F, h_mol.F, size , hipMemcpyHostToDevice); cudaErrorCheck(err); printf("Finished hipMemcpy\n"); // create timer and start the timer hipEvent_t start,stop; err=hipEventCreate(&start); cudaErrorCheck(err); err = hipDeviceSynchronize(); cudaErrorCheck(err); err = hipEventRecord(start,0); cudaErrorCheck(err); /// gives the ceiling function for # of blocks --> Launch the kernel int blocksPerGrid = ((param.npart+255)/256); printf("\n%d\n",blocksPerGrid); dim3 dimGrid(blocksPerGrid); dim3 dimBlock(256,1); // Generate actual cuda call printf("Making call to kernel\n"); hipLaunchKernelGGL(( kernel_VanDerWaals), dim3(blocksPerGrid),dim3(dimBlock) , 0, 0, d_mol.x, d_mol.v, d_mol.a, d_mol.F, param.npart); // Transfer the results back to the host printf("Waiting for computation to complete...\n"); // just added this line for debugging purposes err = hipDeviceSynchronize(); cudaErrorCheck(err); err = hipEventCreate(&stop); cudaErrorCheck(err); err = hipEventRecord(stop,0); cudaErrorCheck(err); err = hipEventSynchronize(stop); cudaErrorCheck(err); // Check if kernel execution generated an error err = hipGetLastError(); cudaErrorCheck(err); err = hipMemcpy( h_mol.x , d_mol.x , size ,hipMemcpyDeviceToHost); cudaErrorCheck(err); printf("Memcpy #1 complete ...\n"); err = hipMemcpy( h_mol.v , d_mol.v , size ,hipMemcpyDeviceToHost); cudaErrorCheck(err); printf("Memcpy #2 complete ...\n"); err = hipMemcpy( h_mol.a , d_mol.a , size ,hipMemcpyDeviceToHost); cudaErrorCheck(err); printf("Memcpy #3 complete ...\n"); err = hipMemcpy( h_mol.F , d_mol.F , size ,hipMemcpyDeviceToHost); cudaErrorCheck(err); printf("Memcpy #4 complete ...\n"); printf("Complete!\n"); // Stop and destroy the timer err = hipDeviceSynchronize(); cudaErrorCheck(err); err = hipEventCreate(&stop1); cudaErrorCheck(err); err = hipEventRecord(stop1,0); cudaErrorCheck(err); err = hipEventSynchronize(stop1); cudaErrorCheck(err); // Get the time hipEventElapsedTime(&elapsed_gpu[0],start,stop); // inlcuding kernel call only hipEventElapsedTime(&elapsed_gpu[1],start1,stop1); // including memcopy // Clean up our mess err = hipEventDestroy(start); cudaErrorCheck(err); err = hipEventDestroy(stop); cudaErrorCheck(err); err = hipEventDestroy(start1); cudaErrorCheck(err); err = hipEventDestroy(stop1); cudaErrorCheck(err); clock_gettime(CLOCK_REALTIME, &time2); time_stamp = diff(time1,time2); printf("\nFinal times\n"); printf("ArraySize, GPU time (msec)\n"); //printf("GPU time: %f (msec)\t Array Size: %d\n", elapsed_gpu[i],BASE+DELTA*i); printf("Time to run kernel: %f\n",elapsed_gpu[0]); printf("Time to run kernel with memcopy: %f\n",elapsed_gpu[1]); printf("Time to run serial code: %lf\n",time_stamp.tv_sec + time_stamp.tv_nsec/1e9); err = hipFree(d_mol.x); cudaErrorCheck(err); err = hipFree(d_mol.v); cudaErrorCheck(err); err = hipFree(d_mol.a); cudaErrorCheck(err); err = hipFree(d_mol.F); cudaErrorCheck(err); free(h_mol.x); free(h_mol.v); free(h_mol.a); free(h_mol.F); printf("We actually did it! \n"); return EXIT_SUCCESS; } struct timespec diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } void cudaErrorCheck(hipError_t err) { if(err!=hipSuccess) { fprintf(stderr, "hipError_t (error code %s) \n",hipGetErrorString(err)); exit(EXIT_FAILURE); } } int init_particles(int n, float* x, float* v, params* param) { float sig = param->sig_lj; float min_r2 = sig*sig; float r2,dx,dy; int i,j,trial; for(i = 0; i < n; i++) { r2 = 0; /* Choose new point via rejection sampling */ for(trial = 0; (trial < MAX_TRIALS) && (r2 < min_r2); trial++) { x[2*i] = (float) (BOX_SIZE*drand48()) - BOX_SIZE/2.0; x[2*i+1] = (float) (BOX_SIZE*drand48()) - BOX_SIZE/2.0; for(j=0; j < i; j++) { dx = x[2*i] - x[2*j]; dy = x[2*i+1] - x[2*j+1]; r2 = dx*dx + dy*dy; //printf("Sample%d:%d %f %f %f %f\n",i,j,min_r2,r2,dx,dy); if(r2 < min_r2) break; } } /* If it takes too many trials, bail and declare number set up */ if(i > 0 && r2 < min_r2) return i; } return n; } void init_particles_va(int n, float* v,float* a, params* param) { float R,T; int i; for(i=0; i < n; i++) { R = T0 * sqrt(-2.0 * log(drand48())); T = 2 * PI * drand48(); v[2*i] = (R * cos(T)); v[2*i+1] = (R * sin(T)); // printf("SampleVel%d %f %f\n",i,v[2*i],v[2*i+1]); a[2*i] = (R * cos(T))/param->dt; a[2*i+1] = (R * sin(T))/param->dt; } } __device__ inline float compute_LJ_Scalar(float r2, float eps, float sig2) { if(r2 < (CUT2 * sig2)) // { float frac2 = sig2/r2; float frac6 = frac2*frac2*frac2; return 24.0*eps/r2 * frac6 *(1.0-2.0*frac6); } return 0; } __device__ inline void verletInt1(int k, float dt, float* x, float* v, float* a) { // int two_i = 2*k; // assumes that we havbe 2D data v[k] = a[k] * (dt/2.0); // spltwo_it up for a 2D // v[two_i+1] = a[two_i+1] * (dt/2.0); x[k] = v[k] * dt; // x[two_i+1] = v[two_i+1] * dt; } __device__ inline void verletInt2(int k, float dt, float* x, float* v, float* a) { // int two_i = 2*k; int v0 = v[k]; // int v1 = v[two_i+1]; v[k] = a[k] * dt/2.0; // spltwo_it up for 2D // v[two_i+1] = a[two_i+1] * dt/2.0; a[k] += (v[k]-v0)/dt; // a[two_i+1] += (v[two_i+1]-v1)/dt; } // should check for reflection inbetween __device__ inline void reflect(float wall, float* x, float* v, float* a) { //printf("reflected!"); *x = (2*wall-(*x)); *v = -(*v); *a = -(*a); } __device__ inline void box_reflect(int k, float* x, float* v, float* a) { // int two_i = 2*k; if(x[k] < XMIN) reflect(XMIN,&x[k],&v[k],&a[k]); if(x[k] > XMAX) reflect(XMAX,&x[k],&v[k],&a[k]); // if(x[two_i+1] < YMIN) reflect(YMIN,&x[two_i+1],&v[two_i+1],&a[two_i+1]); // if(x[two_i+1] > YMAX) reflect(YMAX,&x[two_i+1],&v[two_i+1],&a[two_i+1]); } // now only executes over the bodies declared! // n is me and k is them __device__ inline void compute_forces_naive(float x1, int n, float* x, float* F) { float eps = EPS; float sig = SIG; // float sig2 = sig*sig; float dx,lj_scalar; dx = x1 - x[2*n]; // dy = y1 - x[2*n+1]; lj_scalar = compute_LJ_Scalar(dx*dx,eps,sig); F[2*n] += lj_scalar * dx; // pos account for the direction of the vector from base molecule // F[2*n+1] += lj_scalar * dy; }
66a9a5fa0ec1d66b09ff7b5c8d959215e9a58a47.cu
/// stuff happening // nvall -o share mdCudaShared.cu -g -G -lrt -lm #include <stdio.h> #include <math.h> #include <time.h> #include <stdlib.h> #include <errno.h> #include <string.h> #define LINUX 1 // is this on a linux machine?? #define NEAREST 0 // Are we going to use nearest algorithm #define OPT 0 // N^2 or optimized code?? #define EPS 1 #define SIG 1e-2 #define CUT 2.5 #define RCUT (CUT*SIG) #define CUT2 CUT*CUT #define PI 3.14159265 #define DT 0.001 // 0.001 second time increments definitely want to change this #define N_BODY_NUM 5000 #define XMAX (BOX_SIZE/2.0) #define XMIN -(BOX_SIZE/2.0) #define YMAX (BOX_SIZE/2.0) #define YMIN -(BOX_SIZE/2.0) #define T0 1 #define MAX_TRIALS 100 #define ITERS 100 #define BOX_SIZE 10.0 #define GRID_NUM ((BOX_SIZE)/(RCUT)) #define SPAN_WIDTH 256 #define BLOCK_LENGTH(GRID_NUM,BOX_SIZE) (BOX_SIZE/GRID_NUM) // size of block that contains GRID_BLOCK_NUM #define EST_NUM(GRID_NUM,N_BODY_NUM) (N_BODY_NUM/(GRID_NUM*GRID_NUM)) typedef struct sim_param_t { int npart; float dt; float eps_lj; float sig_lj; }params; typedef struct molecule_t { float* x; float* v; float* a; float* F; }mols; __device__ void compute_forces_naive(float x1, int k, float* x, float* F); __device__ void box_reflect(int k, float* x, float* v, float* a); __device__ void reflect(float wall, float* x, float* v, float* a); __device__ void verletInt2(int k, float dt, float* x, float* v, float* a); __device__ float compute_LJ_Scalar(float r2, float eps, float sig2); __device__ void verletInt1(int k, float dt, float* x, float* v, float* a); int init_particles(int n, float* x, float* v, params* param); void init_particles_va(int n, float* v,float* a, params* param); // Just a prototype and declaration struct timespec diff(struct timespec start, struct timespec end); void cudaErrorCheck(cudaError_t err); // shared memory kernel __global__ void kernel_VanDerWaals(float* x, float* v, float* a, float* F, int particles) { __shared__ float s_x[SPAN_WIDTH]; __shared__ float s_v[SPAN_WIDTH]; __shared__ float s_a[SPAN_WIDTH]; __shared__ float s_F[SPAN_WIDTH]; int i,k,r,tile; const int gtid = blockIdx.x * blockDim.x + threadIdx.x; float myX = x[gtid]; float dt = 0.0001; for(r=0;r<ITERS;r++){ for(tile=0,i=0; i < 2*particles; i+=blockDim.x, tile++) { int idx = tile * blockDim.x + threadIdx.x; s_x[threadIdx.x] = x[idx]; s_v[threadIdx.x] = v[idx]; s_a[threadIdx.x] = a[idx]; s_F[threadIdx.x] = F[idx]; __syncthreads(); for(k = 0; k < blockDim.x; k++) { if(i!=k) { verletInt1(k, dt, s_x, s_v, s_a); box_reflect(k, x, v, a); compute_forces_naive(myX, k, s_x, s_F); verletInt2(k, dt, x, s_v, s_a); } } x[idx] = s_x[threadIdx.x]; v[idx] = s_v[threadIdx.x]; a[idx] = s_a[threadIdx.x]; __syncthreads(); } } } int main(int argc, char **argv){ int nsize = N_BODY_NUM; if(argc==2) { nsize = atoi(argv[1]); } struct timespec time1,time2; struct timespec time_stamp; cudaError_t err = cudaSuccess; // start timing of entire program clock_gettime(CLOCK_REALTIME, &time1); // Timing related variables float elapsed_gpu[2]; // global information params param; param.npart = nsize; param.dt = DT; param.eps_lj = EPS; param.sig_lj = SIG; // declare size in bytes size_t size = 2 * (param.npart) * sizeof(float); // Arrays on GPU global memory mols d_mol; // Arrays on the host memory mols h_mol; // Allocate arrays on host memory h_mol.x = (float *) malloc(size); h_mol.v = (float *) malloc(size); h_mol.a = (float *) malloc(size); h_mol.F = (float *) malloc(size); cudaEvent_t start1,stop1; err=cudaEventCreate(&start1); cudaErrorCheck(err); err = cudaThreadSynchronize(); cudaErrorCheck(err); err = cudaEventRecord(start1,0); cudaErrorCheck(err); printf("About to cudaMalloc\n"); err = cudaMalloc((void**) &d_mol.x, size); cudaErrorCheck(err); err = cudaMalloc((void**) &d_mol.v, size); cudaErrorCheck(err); err = cudaMalloc((void**) &d_mol.a, size); cudaErrorCheck(err); err = cudaMalloc((void**) &d_mol.F, size); cudaErrorCheck(err); printf("Finished the cudaMalloc\n"); // Initialize the host arrays printf("\nInitializing the Particles ..."); param.npart = init_particles(param.npart, h_mol.x, h_mol.v, &param); init_particles_va(param.npart, h_mol.v, h_mol.a, &param); printf("\t... done\n\n"); // Transfer the arrays to the GPU memory printf("About to cudaMemcpy\n"); err = cudaMemcpy(d_mol.x, h_mol.x, size , cudaMemcpyHostToDevice); cudaErrorCheck(err); err = cudaMemcpy(d_mol.v, h_mol.v, size , cudaMemcpyHostToDevice); cudaErrorCheck(err); err = cudaMemcpy(d_mol.a, h_mol.a, size , cudaMemcpyHostToDevice); cudaErrorCheck(err); err = cudaMemcpy(d_mol.F, h_mol.F, size , cudaMemcpyHostToDevice); cudaErrorCheck(err); printf("Finished cudaMemcpy\n"); // create timer and start the timer cudaEvent_t start,stop; err=cudaEventCreate(&start); cudaErrorCheck(err); err = cudaThreadSynchronize(); cudaErrorCheck(err); err = cudaEventRecord(start,0); cudaErrorCheck(err); /// gives the ceiling function for # of blocks --> Launch the kernel int blocksPerGrid = ((param.npart+255)/256); printf("\n%d\n",blocksPerGrid); dim3 dimGrid(blocksPerGrid); dim3 dimBlock(256,1); // Generate actual cuda call printf("Making call to kernel\n"); kernel_VanDerWaals<<< blocksPerGrid,dimBlock >>>(d_mol.x, d_mol.v, d_mol.a, d_mol.F, param.npart); // Transfer the results back to the host printf("Waiting for computation to complete...\n"); // just added this line for debugging purposes err = cudaThreadSynchronize(); cudaErrorCheck(err); err = cudaEventCreate(&stop); cudaErrorCheck(err); err = cudaEventRecord(stop,0); cudaErrorCheck(err); err = cudaEventSynchronize(stop); cudaErrorCheck(err); // Check if kernel execution generated an error err = cudaGetLastError(); cudaErrorCheck(err); err = cudaMemcpy( h_mol.x , d_mol.x , size ,cudaMemcpyDeviceToHost); cudaErrorCheck(err); printf("Memcpy #1 complete ...\n"); err = cudaMemcpy( h_mol.v , d_mol.v , size ,cudaMemcpyDeviceToHost); cudaErrorCheck(err); printf("Memcpy #2 complete ...\n"); err = cudaMemcpy( h_mol.a , d_mol.a , size ,cudaMemcpyDeviceToHost); cudaErrorCheck(err); printf("Memcpy #3 complete ...\n"); err = cudaMemcpy( h_mol.F , d_mol.F , size ,cudaMemcpyDeviceToHost); cudaErrorCheck(err); printf("Memcpy #4 complete ...\n"); printf("Complete!\n"); // Stop and destroy the timer err = cudaThreadSynchronize(); cudaErrorCheck(err); err = cudaEventCreate(&stop1); cudaErrorCheck(err); err = cudaEventRecord(stop1,0); cudaErrorCheck(err); err = cudaEventSynchronize(stop1); cudaErrorCheck(err); // Get the time cudaEventElapsedTime(&elapsed_gpu[0],start,stop); // inlcuding kernel call only cudaEventElapsedTime(&elapsed_gpu[1],start1,stop1); // including memcopy // Clean up our mess err = cudaEventDestroy(start); cudaErrorCheck(err); err = cudaEventDestroy(stop); cudaErrorCheck(err); err = cudaEventDestroy(start1); cudaErrorCheck(err); err = cudaEventDestroy(stop1); cudaErrorCheck(err); clock_gettime(CLOCK_REALTIME, &time2); time_stamp = diff(time1,time2); printf("\nFinal times\n"); printf("ArraySize, GPU time (msec)\n"); //printf("GPU time: %f (msec)\t Array Size: %d\n", elapsed_gpu[i],BASE+DELTA*i); printf("Time to run kernel: %f\n",elapsed_gpu[0]); printf("Time to run kernel with memcopy: %f\n",elapsed_gpu[1]); printf("Time to run serial code: %lf\n",time_stamp.tv_sec + time_stamp.tv_nsec/1e9); err = cudaFree(d_mol.x); cudaErrorCheck(err); err = cudaFree(d_mol.v); cudaErrorCheck(err); err = cudaFree(d_mol.a); cudaErrorCheck(err); err = cudaFree(d_mol.F); cudaErrorCheck(err); free(h_mol.x); free(h_mol.v); free(h_mol.a); free(h_mol.F); printf("We actually did it! \n"); return EXIT_SUCCESS; } struct timespec diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } void cudaErrorCheck(cudaError_t err) { if(err!=cudaSuccess) { fprintf(stderr, "cudaError (error code %s) \n",cudaGetErrorString(err)); exit(EXIT_FAILURE); } } int init_particles(int n, float* x, float* v, params* param) { float sig = param->sig_lj; float min_r2 = sig*sig; float r2,dx,dy; int i,j,trial; for(i = 0; i < n; i++) { r2 = 0; /* Choose new point via rejection sampling */ for(trial = 0; (trial < MAX_TRIALS) && (r2 < min_r2); trial++) { x[2*i] = (float) (BOX_SIZE*drand48()) - BOX_SIZE/2.0; x[2*i+1] = (float) (BOX_SIZE*drand48()) - BOX_SIZE/2.0; for(j=0; j < i; j++) { dx = x[2*i] - x[2*j]; dy = x[2*i+1] - x[2*j+1]; r2 = dx*dx + dy*dy; //printf("Sample%d:%d %f %f %f %f\n",i,j,min_r2,r2,dx,dy); if(r2 < min_r2) break; } } /* If it takes too many trials, bail and declare number set up */ if(i > 0 && r2 < min_r2) return i; } return n; } void init_particles_va(int n, float* v,float* a, params* param) { float R,T; int i; for(i=0; i < n; i++) { R = T0 * sqrt(-2.0 * log(drand48())); T = 2 * PI * drand48(); v[2*i] = (R * cos(T)); v[2*i+1] = (R * sin(T)); // printf("SampleVel%d %f %f\n",i,v[2*i],v[2*i+1]); a[2*i] = (R * cos(T))/param->dt; a[2*i+1] = (R * sin(T))/param->dt; } } __device__ inline float compute_LJ_Scalar(float r2, float eps, float sig2) { if(r2 < (CUT2 * sig2)) // { float frac2 = sig2/r2; float frac6 = frac2*frac2*frac2; return 24.0*eps/r2 * frac6 *(1.0-2.0*frac6); } return 0; } __device__ inline void verletInt1(int k, float dt, float* x, float* v, float* a) { // int two_i = 2*k; // assumes that we havbe 2D data v[k] = a[k] * (dt/2.0); // spltwo_it up for a 2D // v[two_i+1] = a[two_i+1] * (dt/2.0); x[k] = v[k] * dt; // x[two_i+1] = v[two_i+1] * dt; } __device__ inline void verletInt2(int k, float dt, float* x, float* v, float* a) { // int two_i = 2*k; int v0 = v[k]; // int v1 = v[two_i+1]; v[k] = a[k] * dt/2.0; // spltwo_it up for 2D // v[two_i+1] = a[two_i+1] * dt/2.0; a[k] += (v[k]-v0)/dt; // a[two_i+1] += (v[two_i+1]-v1)/dt; } // should check for reflection inbetween __device__ inline void reflect(float wall, float* x, float* v, float* a) { //printf("reflected!"); *x = (2*wall-(*x)); *v = -(*v); *a = -(*a); } __device__ inline void box_reflect(int k, float* x, float* v, float* a) { // int two_i = 2*k; if(x[k] < XMIN) reflect(XMIN,&x[k],&v[k],&a[k]); if(x[k] > XMAX) reflect(XMAX,&x[k],&v[k],&a[k]); // if(x[two_i+1] < YMIN) reflect(YMIN,&x[two_i+1],&v[two_i+1],&a[two_i+1]); // if(x[two_i+1] > YMAX) reflect(YMAX,&x[two_i+1],&v[two_i+1],&a[two_i+1]); } // now only executes over the bodies declared! // n is me and k is them __device__ inline void compute_forces_naive(float x1, int n, float* x, float* F) { float eps = EPS; float sig = SIG; // float sig2 = sig*sig; float dx,lj_scalar; dx = x1 - x[2*n]; // dy = y1 - x[2*n+1]; lj_scalar = compute_LJ_Scalar(dx*dx,eps,sig); F[2*n] += lj_scalar * dx; // pos account for the direction of the vector from base molecule // F[2*n+1] += lj_scalar * dy; }
294ae7d3411ad104fc61b35c237e805456d3fa1c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "Common.h" #include "ConvexGPU.h" namespace ConvexGPU { NeuralNetwork::NeuralNetwork() {} NeuralNetwork::NeuralNetwork(std::vector <int> _structure, hardwareMode m_hardwareMode) { m_networkStructure = _structure; m_networkLength = _structure.size(); m_learningRate = 0.1; m_activationFunction = SIGMOID; m_hardwareMode = CPU; generateNetwork(); initNetwork(); } NeuralNetwork::NeuralNetwork(const char* _path) { deserialise(_path); } void NeuralNetwork::generateNetwork() { m_weightMatrixes.clear(); m_biasMatrixes.clear(); m_networkErrors.clear(); for (int i = 1; i < m_networkLength; i++) { Matrix <double>* weightMatrix = new Matrix<double>(m_networkStructure.at(i - 1), m_networkStructure.at(i)); weightMatrix->m_hardwareMode = m_hardwareMode; for (int i = 0; i < weightMatrix->dimensions[1]; i++) { for (int j = 0; j < weightMatrix->dimensions[0]; j++) { weightMatrix->at2D(i, j) = randomNumber(-1.0f, 1.0f); } } m_weightMatrixes.push_back(weightMatrix); } for (int i = 0; i < m_networkLength; i++) { std::vector <double> biasMatrix; std::vector <double> errorMatrix; for (int j = 0; j < m_networkStructure.at(i); j++) { biasMatrix.push_back(randomNumber(-1.0f, 1.0f)); errorMatrix.push_back(0); } m_biasMatrixes.push_back(biasMatrix); m_networkErrors.push_back(errorMatrix); } } void NeuralNetwork::initNetwork() { for (int i = 1; i < m_networkStructure.size(); i++) { Matrix <double>* result = new Matrix<double>(1, m_networkStructure[i]); result->m_hardwareMode = m_hardwareMode; m_layerMatrixes.push_back(result); } } Matrix<double>* NeuralNetwork::feed(Matrix<double>* _input) { return feed(_input, 0, m_weightMatrixes.size()); } //TODO Write normalisation kernel and implement biases in previous layer Matrix<double>* NeuralNetwork::feed(Matrix<double>* _input, int _rangeStart, int _rangeEnd) { m_activations.clear(); m_activations.push_back(_input); for (int i = _rangeStart; i < _rangeEnd; i++) { Matrix<double>* result = m_layerMatrixes.at(i); result->m_hardwareMode = m_hardwareMode; result->fill(0); if (m_hardwareMode == CPU) { matrixMultiplyCPU(m_activations[m_activations.size() - 1], m_weightMatrixes[i], result); } else if (m_hardwareMode == CUDA) { m_activations[m_activations.size() - 1]->copyMemoryToDevice(); m_weightMatrixes[i]->copyMemoryToDevice(); matrixMultiplyGPU(m_activations[m_activations.size() - 1], m_weightMatrixes[i], result); //result->copyMemoryToHost(); } for (int j = 0; j < result->dimensions.at(1); j++) { result->at2D(0, j) = normalise(result->at2D(0, j) + m_biasMatrixes.at(i + 1).at(j)); } //if (m_hardwareMode == CUDA) result->copyMemoryToDevice(); m_activations.push_back(result); } return m_activations[m_activations.size() - 1]; } Matrix<double>* NeuralNetwork::train(Matrix<double>* _input, Matrix<double>* _targetOutput) { return train(_input, _targetOutput, 0, m_networkLength - 1); } //TODO Check if this still works (cost reduces) Matrix<double>* NeuralNetwork::train(Matrix<double>* _input, Matrix<double>* _targetOutput, int _rangeStart, int _rangeEnd) { Matrix<double>* feedResult = feed(_input, _rangeStart, _rangeEnd); m_cost = 0; m_globalError = 0; for (int i = 0; i < feedResult->size(); i++) { double gradient = -deriveNormalise(feedResult->at2D(0, i)); double error = feedResult->at2D(0, i) - _targetOutput->at2D(0, i); m_networkErrors.at(_rangeEnd).at(i) = error * gradient; m_cost += error; } if (m_learningRate != 0) { for (int i = _rangeEnd - 1; i >= _rangeStart; i--) { std::vector <double> nextLayerErrors = m_networkErrors.at(i + 1); //if(m_hardwareMode == CUDA) m_weightMatrixes.at(i)->copyMemoryToHost(); for (int j = 0; j < m_networkStructure.at(i); j++) { double sum = 0; for (int k = 0; k < nextLayerErrors.size(); k++) { //double* weight = &m_weightMatrixes.at(i).at2D(j, k); // OF INTEREST double* weight = &m_weightMatrixes.at(i)->at2D(k, j); // OF INTEREST *weight += m_learningRate * nextLayerErrors.at(k) * m_activations.at(i)->at2D(0, j); sum += *weight * nextLayerErrors.at(k); } double currentError = sum * deriveNormalise(m_activations.at(i)->at2D(0, j)); m_networkErrors.at(i).at(j) = currentError; m_globalError += abs(currentError); m_biasMatrixes.at(i).at(j) += m_learningRate * currentError; } if(m_hardwareMode == CUDA) m_weightMatrixes.at(i)->copyMemoryToDevice(); } } return feedResult; } double NeuralNetwork::train(ConvexGPU::ImageClassDataset* _dataset, bool _assessPerformance) { double cost = 0; double score = 0; if (_assessPerformance == true) { for (int n = 0; n < _dataset->m_size; n++) { Matrix <double> targetResult(1, 10); targetResult.m_hardwareMode = CPU; targetResult.fill(0); targetResult[_dataset->m_labels[n]] = 1; Matrix<double>* feedResult = train(&_dataset->m_imagesFlattened[n], &targetResult); targetResult.freeMemory(); cost += m_cost / _dataset->m_size; //TODO: Solve with iterators unsigned int maxElementIndex = feedResult->at(0); double maxElement = 0; for (int i = 0; i < feedResult->dimensions[1]; i++) { if (feedResult->at(i) > maxElement) { maxElement = feedResult->at(i); maxElementIndex = i; } } //std::vector<double>::iterator maxElement = std::max_element(feedResult.begin(), feedResult.end()); //int maxElementIndex = std::distance(feedResult.begin(), maxElement); if (maxElementIndex == _dataset->m_labels[n]) { score++; } } this->m_score = score / _dataset->m_size; return cost; } else { for (int n = 0; n < _dataset->m_size; n++) { Matrix<double> targetResult(1, 10); targetResult.fill(0); targetResult[_dataset->m_labels[n]] = 1; Matrix<double>* feedResult = train(&_dataset->m_imagesFlattened[n], &targetResult); if (m_hardwareMode == CUDA) targetResult.freeMemoryGPU(); else if (m_hardwareMode == CPU) targetResult.freeMemoryCPU(); cost += m_cost / _dataset->m_size; } return cost; } } void NeuralNetwork::trainSequence(ConvexGPU::ImageClassDataset* _dataset, int _epochs, const char* _path) { std::cout << "[CONVEX] Training start" << std::endl; double minCost = assess(_dataset); int nonImprovementCount = 0; for (int n = 0; n < _epochs; n++) { //if (n % 2 == 0) m_hardwareMode = ConvexGPU::CPU; //else m_hardwareMode = ConvexGPU::CUDA; double cost = train(_dataset, n % 1 == 0); std::cout << "[CONVEX] Train epoch " << n << ": " << cost << " (" << round((float)m_score * 10000) / 100 << "%)" << std::endl; if (cost < minCost) { //serialise(_path); minCost = cost; nonImprovementCount = 0; } else { nonImprovementCount++; if (nonImprovementCount == 16) { double newLearningRate = m_learningRate * 0.90; //deserialise(_path); m_learningRate = newLearningRate; nonImprovementCount = 0; std::cout << "[CONVEX] Adjusting learning rate to " << m_learningRate << std::endl; } } } } double NeuralNetwork::assess(ConvexGPU::ImageClassDataset* _dataset) { std::cout << "[CONVEX] Assessing network..."; double learningRateTemp = m_learningRate; m_learningRate = 0; double cost = train(_dataset); m_learningRate = learningRateTemp; std::cout << " done" << std::endl; std::cout << "[CONVEX] Cost: " << cost << " (" << round((float)m_score * 10000) / 100 << "%)" << std::endl; return cost; } template <typename T> std::vector<T> flattenVector(const std::vector<std::vector<T>>& v) { std::size_t total_size = 0; for (const auto& sub : v) total_size += sub.size(); std::vector<T> result; result.reserve(total_size); for (const auto& sub : v) result.insert(result.end(), sub.begin(), sub.end()); return result; } void NeuralNetwork::freeMemory() { for (int i = 0; i < m_activations.size() - 2; i++) { if (m_hardwareMode == CUDA) m_activations[i]->freeMemoryGPU(); else if (m_hardwareMode == CPU) m_activations[i]->freeMemoryCPU(); } } std::ofstream* NeuralNetwork::serialise(std::ofstream* _stream, bool _swapEndianness) { writeVariable(&m_learningRate, _stream, _swapEndianness); writeVariable(&m_activationFunction, _stream, _swapEndianness); //writeVariable(&m_hardwareMode, _stream, _swapEndianness); writeVector(&m_networkStructure, _stream, _swapEndianness); writeVector(&m_biasMatrixes, _stream, _swapEndianness); writeMatrixVector(&m_weightMatrixes, _stream, _swapEndianness); return _stream; } void NeuralNetwork::serialise(const char * _path, bool _swapEndianness) { std::cout << "[CONVEX] Saving network to " << _path << "..."; std::ofstream file; file.open(_path, std::ios::binary); serialise(&file, _swapEndianness); std::cout << " done" << std::endl; file.close(); } std::ifstream* NeuralNetwork::deserialise(std::ifstream* _stream, bool _swapEndianness) { readVariable(&m_learningRate, _stream, _swapEndianness); readVariable(&m_activationFunction, _stream, _swapEndianness); //readVariable(&m_hardwareMode, _stream, _swapEndianness); m_networkStructure.clear(); readVector(&m_networkStructure, _stream, _swapEndianness); m_biasMatrixes.clear(); readVector(&m_biasMatrixes, _stream, _swapEndianness); for (int i = 0; i < m_weightMatrixes.size(); i++) { if (m_hardwareMode == CUDA) m_weightMatrixes[i]->freeMemoryGPU(); if (m_hardwareMode == CPU) m_weightMatrixes[i]->freeMemoryCPU(); } m_weightMatrixes.clear(); readMatrixVector(&m_weightMatrixes, _stream, _swapEndianness); m_networkErrors.clear(); for (int i = 0; i < m_networkLength; i++) { std::vector <double> errorMatrix; for (int j = 0; j < m_networkStructure.at(i); j++) { errorMatrix.push_back(0); } m_networkErrors.push_back(errorMatrix); } initNetwork(); return _stream; } void NeuralNetwork::deserialise(const char* _path, bool _swapEndianness) { std::cout << "[CONVEX] Loading network from " << _path << "..."; std::ifstream file; file.open(_path, std::ios::binary); if (file.is_open()) { deserialise(&file, _swapEndianness); std::cout << " done" << std::endl; file.close(); } else { std::cout << " failed" << std::endl; std::cerr << "[CONVEX] ERROR: Could not open " << _path << std::endl; } } double NeuralNetwork::normalise(double _input) { switch (m_activationFunction) { case SIGMOID: return 1 / (1 + exp(-_input)); break; case TANH: return 2 / (1 + exp(-2 * _input)); break; case RELU: return (_input < 0) ? 0 : _input; break; case NONE: return _input; break; default: std::cerr << "[CONVEX ERROR] Invalid activation function" << std::endl; return 0; break; } } double NeuralNetwork::deriveNormalise(double _input) { switch (m_activationFunction) { case SIGMOID: return (1 / (1 + exp(-_input)))*(1 - (1 / (1 + exp(-_input)))); //return _input * (1 - _input); break; case TANH: return 1 - ::pow((2 / (1 + exp(-2 * _input))), 2); break; case RELU: return _input; break; case NONE: return _input; break; default: std::cerr << "[CONVEX ERROR] Invalid activation function" << std::endl; return 0; break; } } double* NeuralNetwork::getWeight(int _n1Layer, int _n1Neuron, int _n2Layer, int _n2Neuron) { return &m_weightMatrixes.at(_n2Layer)->at2D(_n2Neuron, _n1Neuron); } void NeuralNetwork::matrixMultiplyCPU(Matrix <double>* _matrixA, Matrix <double>* _matrixB, Matrix <double>* output) { unsigned int rows = (unsigned int)_matrixA->dimensions[0]; unsigned int cols = (unsigned int)_matrixB->dimensions[1]; #pragma omp parallel for { for (unsigned int i = 0; i < rows; i++) { for (unsigned int j = 0; j < cols; j++) { for (unsigned int k = 0; k < _matrixB->dimensions[0]; k++) { output->at2D(i, j) += _matrixA->at2D(i, k) * _matrixB->at2D(j, k); } } } } } }
294ae7d3411ad104fc61b35c237e805456d3fa1c.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include "Common.h" #include "ConvexGPU.h" namespace ConvexGPU { NeuralNetwork::NeuralNetwork() {} NeuralNetwork::NeuralNetwork(std::vector <int> _structure, hardwareMode m_hardwareMode) { m_networkStructure = _structure; m_networkLength = _structure.size(); m_learningRate = 0.1; m_activationFunction = SIGMOID; m_hardwareMode = CPU; generateNetwork(); initNetwork(); } NeuralNetwork::NeuralNetwork(const char* _path) { deserialise(_path); } void NeuralNetwork::generateNetwork() { m_weightMatrixes.clear(); m_biasMatrixes.clear(); m_networkErrors.clear(); for (int i = 1; i < m_networkLength; i++) { Matrix <double>* weightMatrix = new Matrix<double>(m_networkStructure.at(i - 1), m_networkStructure.at(i)); weightMatrix->m_hardwareMode = m_hardwareMode; for (int i = 0; i < weightMatrix->dimensions[1]; i++) { for (int j = 0; j < weightMatrix->dimensions[0]; j++) { weightMatrix->at2D(i, j) = randomNumber(-1.0f, 1.0f); } } m_weightMatrixes.push_back(weightMatrix); } for (int i = 0; i < m_networkLength; i++) { std::vector <double> biasMatrix; std::vector <double> errorMatrix; for (int j = 0; j < m_networkStructure.at(i); j++) { biasMatrix.push_back(randomNumber(-1.0f, 1.0f)); errorMatrix.push_back(0); } m_biasMatrixes.push_back(biasMatrix); m_networkErrors.push_back(errorMatrix); } } void NeuralNetwork::initNetwork() { for (int i = 1; i < m_networkStructure.size(); i++) { Matrix <double>* result = new Matrix<double>(1, m_networkStructure[i]); result->m_hardwareMode = m_hardwareMode; m_layerMatrixes.push_back(result); } } Matrix<double>* NeuralNetwork::feed(Matrix<double>* _input) { return feed(_input, 0, m_weightMatrixes.size()); } //TODO Write normalisation kernel and implement biases in previous layer Matrix<double>* NeuralNetwork::feed(Matrix<double>* _input, int _rangeStart, int _rangeEnd) { m_activations.clear(); m_activations.push_back(_input); for (int i = _rangeStart; i < _rangeEnd; i++) { Matrix<double>* result = m_layerMatrixes.at(i); result->m_hardwareMode = m_hardwareMode; result->fill(0); if (m_hardwareMode == CPU) { matrixMultiplyCPU(m_activations[m_activations.size() - 1], m_weightMatrixes[i], result); } else if (m_hardwareMode == CUDA) { m_activations[m_activations.size() - 1]->copyMemoryToDevice(); m_weightMatrixes[i]->copyMemoryToDevice(); matrixMultiplyGPU(m_activations[m_activations.size() - 1], m_weightMatrixes[i], result); //result->copyMemoryToHost(); } for (int j = 0; j < result->dimensions.at(1); j++) { result->at2D(0, j) = normalise(result->at2D(0, j) + m_biasMatrixes.at(i + 1).at(j)); } //if (m_hardwareMode == CUDA) result->copyMemoryToDevice(); m_activations.push_back(result); } return m_activations[m_activations.size() - 1]; } Matrix<double>* NeuralNetwork::train(Matrix<double>* _input, Matrix<double>* _targetOutput) { return train(_input, _targetOutput, 0, m_networkLength - 1); } //TODO Check if this still works (cost reduces) Matrix<double>* NeuralNetwork::train(Matrix<double>* _input, Matrix<double>* _targetOutput, int _rangeStart, int _rangeEnd) { Matrix<double>* feedResult = feed(_input, _rangeStart, _rangeEnd); m_cost = 0; m_globalError = 0; for (int i = 0; i < feedResult->size(); i++) { double gradient = -deriveNormalise(feedResult->at2D(0, i)); double error = feedResult->at2D(0, i) - _targetOutput->at2D(0, i); m_networkErrors.at(_rangeEnd).at(i) = error * gradient; m_cost += error; } if (m_learningRate != 0) { for (int i = _rangeEnd - 1; i >= _rangeStart; i--) { std::vector <double> nextLayerErrors = m_networkErrors.at(i + 1); //if(m_hardwareMode == CUDA) m_weightMatrixes.at(i)->copyMemoryToHost(); for (int j = 0; j < m_networkStructure.at(i); j++) { double sum = 0; for (int k = 0; k < nextLayerErrors.size(); k++) { //double* weight = &m_weightMatrixes.at(i).at2D(j, k); // OF INTEREST double* weight = &m_weightMatrixes.at(i)->at2D(k, j); // OF INTEREST *weight += m_learningRate * nextLayerErrors.at(k) * m_activations.at(i)->at2D(0, j); sum += *weight * nextLayerErrors.at(k); } double currentError = sum * deriveNormalise(m_activations.at(i)->at2D(0, j)); m_networkErrors.at(i).at(j) = currentError; m_globalError += abs(currentError); m_biasMatrixes.at(i).at(j) += m_learningRate * currentError; } if(m_hardwareMode == CUDA) m_weightMatrixes.at(i)->copyMemoryToDevice(); } } return feedResult; } double NeuralNetwork::train(ConvexGPU::ImageClassDataset* _dataset, bool _assessPerformance) { double cost = 0; double score = 0; if (_assessPerformance == true) { for (int n = 0; n < _dataset->m_size; n++) { Matrix <double> targetResult(1, 10); targetResult.m_hardwareMode = CPU; targetResult.fill(0); targetResult[_dataset->m_labels[n]] = 1; Matrix<double>* feedResult = train(&_dataset->m_imagesFlattened[n], &targetResult); targetResult.freeMemory(); cost += m_cost / _dataset->m_size; //TODO: Solve with iterators unsigned int maxElementIndex = feedResult->at(0); double maxElement = 0; for (int i = 0; i < feedResult->dimensions[1]; i++) { if (feedResult->at(i) > maxElement) { maxElement = feedResult->at(i); maxElementIndex = i; } } //std::vector<double>::iterator maxElement = std::max_element(feedResult.begin(), feedResult.end()); //int maxElementIndex = std::distance(feedResult.begin(), maxElement); if (maxElementIndex == _dataset->m_labels[n]) { score++; } } this->m_score = score / _dataset->m_size; return cost; } else { for (int n = 0; n < _dataset->m_size; n++) { Matrix<double> targetResult(1, 10); targetResult.fill(0); targetResult[_dataset->m_labels[n]] = 1; Matrix<double>* feedResult = train(&_dataset->m_imagesFlattened[n], &targetResult); if (m_hardwareMode == CUDA) targetResult.freeMemoryGPU(); else if (m_hardwareMode == CPU) targetResult.freeMemoryCPU(); cost += m_cost / _dataset->m_size; } return cost; } } void NeuralNetwork::trainSequence(ConvexGPU::ImageClassDataset* _dataset, int _epochs, const char* _path) { std::cout << "[CONVEX] Training start" << std::endl; double minCost = assess(_dataset); int nonImprovementCount = 0; for (int n = 0; n < _epochs; n++) { //if (n % 2 == 0) m_hardwareMode = ConvexGPU::CPU; //else m_hardwareMode = ConvexGPU::CUDA; double cost = train(_dataset, n % 1 == 0); std::cout << "[CONVEX] Train epoch " << n << ": " << cost << " (" << round((float)m_score * 10000) / 100 << "%)" << std::endl; if (cost < minCost) { //serialise(_path); minCost = cost; nonImprovementCount = 0; } else { nonImprovementCount++; if (nonImprovementCount == 16) { double newLearningRate = m_learningRate * 0.90; //deserialise(_path); m_learningRate = newLearningRate; nonImprovementCount = 0; std::cout << "[CONVEX] Adjusting learning rate to " << m_learningRate << std::endl; } } } } double NeuralNetwork::assess(ConvexGPU::ImageClassDataset* _dataset) { std::cout << "[CONVEX] Assessing network..."; double learningRateTemp = m_learningRate; m_learningRate = 0; double cost = train(_dataset); m_learningRate = learningRateTemp; std::cout << " done" << std::endl; std::cout << "[CONVEX] Cost: " << cost << " (" << round((float)m_score * 10000) / 100 << "%)" << std::endl; return cost; } template <typename T> std::vector<T> flattenVector(const std::vector<std::vector<T>>& v) { std::size_t total_size = 0; for (const auto& sub : v) total_size += sub.size(); std::vector<T> result; result.reserve(total_size); for (const auto& sub : v) result.insert(result.end(), sub.begin(), sub.end()); return result; } void NeuralNetwork::freeMemory() { for (int i = 0; i < m_activations.size() - 2; i++) { if (m_hardwareMode == CUDA) m_activations[i]->freeMemoryGPU(); else if (m_hardwareMode == CPU) m_activations[i]->freeMemoryCPU(); } } std::ofstream* NeuralNetwork::serialise(std::ofstream* _stream, bool _swapEndianness) { writeVariable(&m_learningRate, _stream, _swapEndianness); writeVariable(&m_activationFunction, _stream, _swapEndianness); //writeVariable(&m_hardwareMode, _stream, _swapEndianness); writeVector(&m_networkStructure, _stream, _swapEndianness); writeVector(&m_biasMatrixes, _stream, _swapEndianness); writeMatrixVector(&m_weightMatrixes, _stream, _swapEndianness); return _stream; } void NeuralNetwork::serialise(const char * _path, bool _swapEndianness) { std::cout << "[CONVEX] Saving network to " << _path << "..."; std::ofstream file; file.open(_path, std::ios::binary); serialise(&file, _swapEndianness); std::cout << " done" << std::endl; file.close(); } std::ifstream* NeuralNetwork::deserialise(std::ifstream* _stream, bool _swapEndianness) { readVariable(&m_learningRate, _stream, _swapEndianness); readVariable(&m_activationFunction, _stream, _swapEndianness); //readVariable(&m_hardwareMode, _stream, _swapEndianness); m_networkStructure.clear(); readVector(&m_networkStructure, _stream, _swapEndianness); m_biasMatrixes.clear(); readVector(&m_biasMatrixes, _stream, _swapEndianness); for (int i = 0; i < m_weightMatrixes.size(); i++) { if (m_hardwareMode == CUDA) m_weightMatrixes[i]->freeMemoryGPU(); if (m_hardwareMode == CPU) m_weightMatrixes[i]->freeMemoryCPU(); } m_weightMatrixes.clear(); readMatrixVector(&m_weightMatrixes, _stream, _swapEndianness); m_networkErrors.clear(); for (int i = 0; i < m_networkLength; i++) { std::vector <double> errorMatrix; for (int j = 0; j < m_networkStructure.at(i); j++) { errorMatrix.push_back(0); } m_networkErrors.push_back(errorMatrix); } initNetwork(); return _stream; } void NeuralNetwork::deserialise(const char* _path, bool _swapEndianness) { std::cout << "[CONVEX] Loading network from " << _path << "..."; std::ifstream file; file.open(_path, std::ios::binary); if (file.is_open()) { deserialise(&file, _swapEndianness); std::cout << " done" << std::endl; file.close(); } else { std::cout << " failed" << std::endl; std::cerr << "[CONVEX] ERROR: Could not open " << _path << std::endl; } } double NeuralNetwork::normalise(double _input) { switch (m_activationFunction) { case SIGMOID: return 1 / (1 + exp(-_input)); break; case TANH: return 2 / (1 + exp(-2 * _input)); break; case RELU: return (_input < 0) ? 0 : _input; break; case NONE: return _input; break; default: std::cerr << "[CONVEX ERROR] Invalid activation function" << std::endl; return 0; break; } } double NeuralNetwork::deriveNormalise(double _input) { switch (m_activationFunction) { case SIGMOID: return (1 / (1 + exp(-_input)))*(1 - (1 / (1 + exp(-_input)))); //return _input * (1 - _input); break; case TANH: return 1 - std::pow((2 / (1 + exp(-2 * _input))), 2); break; case RELU: return _input; break; case NONE: return _input; break; default: std::cerr << "[CONVEX ERROR] Invalid activation function" << std::endl; return 0; break; } } double* NeuralNetwork::getWeight(int _n1Layer, int _n1Neuron, int _n2Layer, int _n2Neuron) { return &m_weightMatrixes.at(_n2Layer)->at2D(_n2Neuron, _n1Neuron); } void NeuralNetwork::matrixMultiplyCPU(Matrix <double>* _matrixA, Matrix <double>* _matrixB, Matrix <double>* output) { unsigned int rows = (unsigned int)_matrixA->dimensions[0]; unsigned int cols = (unsigned int)_matrixB->dimensions[1]; #pragma omp parallel for { for (unsigned int i = 0; i < rows; i++) { for (unsigned int j = 0; j < cols; j++) { for (unsigned int k = 0; k < _matrixB->dimensions[0]; k++) { output->at2D(i, j) += _matrixA->at2D(i, k) * _matrixB->at2D(j, k); } } } } } }
e59076adc2bfd46077e8729c41a146f550b2f93c.hip
// !!! This is a file automatically generated by hipify!!! #include <float.h> #include <stdio.h> #include <hip/hip_runtime.h> #include "indices.hpp" #include "params.hpp" // Kernels used for collaborative filtering and aggregation //Sum the passed values in a warp to the first thread of this warp. template<typename T> __device__ inline T warpReduceSum(T val) { for (int offset = warpSize/2; offset > 0; offset /= 2) val += __shfl_down(val,offset); return val; } //Sum the passed values in a block to the first thread of a block. template<typename T> __inline__ __device__ float blockReduceSum(T* shared, T val, int tid, int tcount) { int lane = tid % warpSize; int wid = tid / warpSize; val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (tid < tcount / warpSize) ? shared[lane] : 0; if (wid==0) val = warpReduceSum(val); //Final reduce within first warp return val; } //Returns absolute value of the passed real number raised to the power of two __device__ __forceinline__ float abspow2(float & a) { return a * a; } //Integer logarithm base 2. template <typename IntType> __device__ __inline__ uint ilog2(IntType n) { uint l; for (l = 0; n; n >>= 1, ++l); return l; } //Orthogonal transformation. template <typename T> __device__ __inline__ void rotate(T& a, T& b) { T tmp; tmp = a; a = tmp + b; b = tmp - b; } //Fast Walsh-Hadamard transform. template <typename T> __device__ __inline__ void fwht(T *data, uint n) { unsigned l2 = ilog2(n) - 1; for ( uint i = 0; i < l2; ++i ) { for (uint j = 0; j < n; j += (1 << (i + 1))) for (uint k = 0; k < (uint)(1 << i); ++k) rotate(data[j + k], data[j + k + (uint)(1 << i)]); } } //Based on blockIdx it computes the addresses to the arrays in global memory __device__ inline void get_block_addresses( const uint2 & start_point, //IN: first reference patch of a batch const uint & patch_stack_size, //IN: maximal size of a 3D group const uint2 & stacks_dim, //IN: Size of area, where reference patches could be located const Params & params, //IN: Denoising parameters uint2 & outer_address, //OUT: Coordinetes of reference patch in the image uint & start_idx) //OUT: Address of a first element of the 3D group in stacks array { //One block handles one patch_stack, data are in array one after one. start_idx = patch_stack_size * idx2(blockIdx.x,blockIdx.y,gridDim.x); outer_address.x = start_point.x + (blockIdx.x * params.p); outer_address.y = start_point.y + (blockIdx.y * params.p); //Ensure, that the bottom most patches will be taken as reference patches regardless the p parameter. if (outer_address.y >= stacks_dim.y && outer_address.y < stacks_dim.y + params.p - 1) outer_address.y = stacks_dim.y - 1; //Ensure, that the right most patches will be taken as reference patches regardless the p parameter. if (outer_address.x >= stacks_dim.x && outer_address.x < stacks_dim.x + params.p - 1) outer_address.x = stacks_dim.x - 1; } /* Gather patches form image based on matching stored in 3D array stacks Used parameters: p,k,N Division: One block handles one patch_stack, threads match to the pixels of a patch */ __global__ void get_block( const uint2 start_point, //IN: first reference patch of a batch const uchar* __restrict image, //IN: image const ushort* __restrict stacks, //IN: array of adresses of similar patches const uint* __restrict g_num_patches_in_stack, //IN: numbers of patches in 3D groups float* __restrict patch_stack, //OUT: assembled 3D groups const uint2 image_dim, //IN: image dimensions const uint2 stacks_dim, //IN: dimensions limiting addresses of reference patches const Params params) //IN: denoising parameters { uint startidx; uint2 outer_address; get_block_addresses(start_point, params.k*params.k*(params.N+1), stacks_dim, params, outer_address, startidx); if (outer_address.x >= stacks_dim.x || outer_address.y >= stacks_dim.y) return; patch_stack += startidx; const ushort* z_ptr = &stacks[ idx3(0, blockIdx.x, blockIdx.y, params.N, gridDim.x) ]; uint num_patches = g_num_patches_in_stack[ idx2(blockIdx.x, blockIdx.y, gridDim.x) ]; patch_stack[ idx3(threadIdx.x, threadIdx.y, 0, params.k, params.k) ] = (float)(image[ idx2(outer_address.x+threadIdx.x, outer_address.y+threadIdx.y, image_dim.x)]); for(uint i = 0; i < num_patches; ++i) { int x = (int)((signed char)(z_ptr[i] & 0xFF)); int y = (int)((signed char)((z_ptr[i] >> 8) & 0xFF)); patch_stack[ idx3(threadIdx.x, threadIdx.y, i+1, params.k, params.k) ] = (float)(image[ idx2(outer_address.x+x+threadIdx.x, outer_address.y+y+threadIdx.y, image_dim.x)]); } } /* 1) Do the Walsh-Hadamard 1D transform on the z axis of 3D stack. 2) Treshold every pixel and count the number of non-zero coefficients 3) Do the inverse Walsh-Hadamard 1D transform on the z axis of 3D stack. Used parameters: L3D,N,k,p Division: Each block delas with one transformed patch stack. (number of threads in block should be k*k) */ __global__ void hard_treshold_block( const uint2 start_point, //IN: first reference patch of a batch float* __restrict patch_stack, //IN/OUT: 3D groups with thransfomed patches float* __restrict w_P, //OUT: weight of each 3D group const uint* __restrict g_num_patches_in_stack, //IN: numbers of patches in 3D groups uint2 stacks_dim, //IN: dimensions limiting addresses of reference patches const Params params, //IN: denoising parameters const uint sigma //IN: noise variance ) { extern __shared__ float data[]; int paramN = params.N+1; uint tcount = blockDim.x*blockDim.y; uint tid = idx2(threadIdx.x, threadIdx.y, blockDim.x); uint patch_stack_size = tcount * paramN; uint startidx; uint2 outer_address; get_block_addresses(start_point, patch_stack_size, stacks_dim, params, outer_address, startidx); if (outer_address.x >= stacks_dim.x || outer_address.y >= stacks_dim.y) return; uint num_patches = g_num_patches_in_stack[ idx2(blockIdx.x, blockIdx.y, gridDim.x) ]+1; //+1 for the reference patch. float* s_patch_stack = data + (tid * (num_patches+1)); //+1 for avoiding bank conflicts //TODO:sometimes patch_stack = patch_stack + startidx + tid; //Load to the shared memory for(uint i = 0; i < num_patches; ++i) s_patch_stack[i] = patch_stack[ i*tcount ]; //1D Transform fwht(s_patch_stack, num_patches); //Hard-thresholding + counting of nonzero coefficients uint nonzero = 0; float threshold = params.L3D * sqrtf((float)(num_patches * sigma)); for(int i = 0; i < num_patches; ++i) { if (fabsf(s_patch_stack[ i ]) < threshold) { s_patch_stack[ i ] = 0.0f; } else ++nonzero; } //Inverse 1D Transform fwht(s_patch_stack, num_patches); //Normalize and save to global memory for (uint i = 0; i < num_patches; ++i) { patch_stack[ i*tcount ] = s_patch_stack[i] / num_patches; } //Reuse the shared memory for 32 partial sums __syncthreads(); uint* shared = (uint*)data; //Sum the number of non-zero coefficients for a 3D group nonzero = blockReduceSum<uint>(shared, nonzero, tid, tcount); //Save the weight of a 3D group (1/nonzero coefficients) if (tid == 0) { if (nonzero < 1) nonzero = 1; w_P[ idx2(blockIdx.x, blockIdx.y, gridDim.x ) ] = 1.0f/(float)nonzero; } } /* Fills two buffers: numerator and denominator in order to compute weighted average of pixels Used parameters: k,N,p Division: Each block delas with one transformed patch stack. */ __global__ void aggregate_block( const uint2 start_point, //IN: first reference patch of a batch const float* __restrict patch_stack, //IN: 3D groups with thransfomed patches const float* __restrict w_P, //IN: weight for each 3D group const ushort* __restrict stacks, //IN: array of adresses of similar patches const float* __restrict kaiser_window, //IN: kaiser window float* __restrict numerator, //IN/OUT: numerator aggregation buffer (have to be initialized to 0) float* __restrict denominator, //IN/OUT: denominator aggregation buffer (have to be initialized to 0) const uint* __restrict g_num_patches_in_stack, //IN: numbers of patches in 3D groups const uint2 image_dim, //IN: image dimensions const uint2 stacks_dim, //IN: dimensions limiting addresses of reference patches const Params params //IN: denoising parameters ) { uint startidx; uint2 outer_address; get_block_addresses(start_point, params.k*params.k*(params.N+1), stacks_dim, params, outer_address, startidx); if (outer_address.x >= stacks_dim.x || outer_address.y >= stacks_dim.y) return; patch_stack += startidx; uint num_patches = g_num_patches_in_stack[ idx2(blockIdx.x, blockIdx.y, gridDim.x) ]+1; float wp = w_P[ idx2(blockIdx.x, blockIdx.y, gridDim.x ) ]; const ushort* z_ptr = &stacks[ idx3(0, blockIdx.x, blockIdx.y, params.N, gridDim.x) ]; float kaiser_value = kaiser_window[ idx2(threadIdx.x, threadIdx.y, params.k) ]; for(uint z = 0; z < num_patches; ++z) { int x = 0; int y = 0; if (z > 0) { x = (int)((signed char)(z_ptr[z-1] & 0xFF)); y = (int)((signed char)((z_ptr[z-1] >> 8) & 0xFF)); } float value = ( patch_stack[ idx3(threadIdx.x, threadIdx.y, z, params.k, params.k) ]); int idx = idx2(outer_address.x + x + threadIdx.x, outer_address.y + y + threadIdx.y, image_dim.x); atomicAdd(numerator + idx, value * kaiser_value * wp); atomicAdd(denominator + idx, kaiser_value * wp); } } /* Divide numerator with denominator and round result to image_o */ __global__ void aggregate_final( const float* __restrict numerator, //IN: numerator aggregation buffer const float* __restrict denominator, //IN: denominator aggregation buffer const uint2 image_dim, //IN: image dimensions uchar*__restrict result) //OUT: image estimate { uint idx = blockIdx.x * blockDim.x + threadIdx.x; uint idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= image_dim.x || idy >= image_dim.y) return; int value = lrintf(numerator[ idx2(idx,idy,image_dim.x) ] / denominator[ idx2(idx,idy,image_dim.x) ] ); if (value < 0) value = 0; if (value > 255) value = 255; result[ idx2(idx,idy,image_dim.x) ] = (uchar)value; } extern "C" void run_get_block( const uint2 start_point, const uchar* __restrict image, const ushort* __restrict stacks, const uint* __restrict num_patches_in_stack, float* __restrict patch_stack, const uint2 image_dim, const uint2 stacks_dim, const Params params, const dim3 num_threads, const dim3 num_blocks) { hipLaunchKernelGGL(( get_block), dim3(num_blocks),dim3(num_threads), 0, 0, start_point, image, stacks, num_patches_in_stack, patch_stack, image_dim, stacks_dim, params ); } extern "C" void run_hard_treshold_block( const uint2 start_point, float* __restrict patch_stack, float* __restrict w_P, const uint* __restrict num_patches_in_stack, const uint2 stacks_dim, const Params params, const uint sigma, const dim3 num_threads, const dim3 num_blocks, const uint shared_memory_size) { hipLaunchKernelGGL(( hard_treshold_block), dim3(num_blocks), dim3(num_threads), shared_memory_size, 0, start_point, patch_stack, w_P, num_patches_in_stack, stacks_dim, params, sigma ); } extern "C" void run_aggregate_block( const uint2 start_point, const float* __restrict patch_stack, const float* __restrict w_P, const ushort* __restrict stacks, const float* __restrict kaiser_window, float* __restrict numerator, float* __restrict denominator, const uint* __restrict num_patches_in_stack, const uint2 image_dim, const uint2 stacks_dim, const Params params, const dim3 num_threads, const dim3 num_blocks) { hipLaunchKernelGGL(( aggregate_block), dim3(num_blocks),dim3(num_threads), 0, 0, start_point, patch_stack, w_P, stacks, kaiser_window, numerator, denominator, num_patches_in_stack, image_dim, stacks_dim, params ); } extern "C" void run_aggregate_final( const float* __restrict numerator, const float* __restrict denominator, const uint2 image_dim, uchar*__restrict denoised_image, const dim3 num_threads, const dim3 num_blocks ) { hipLaunchKernelGGL(( aggregate_final), dim3(num_blocks),dim3(num_threads), 0, 0, numerator, denominator, image_dim, denoised_image ); }
e59076adc2bfd46077e8729c41a146f550b2f93c.cu
#include <float.h> #include <stdio.h> #include <cuda.h> #include "indices.hpp" #include "params.hpp" // Kernels used for collaborative filtering and aggregation //Sum the passed values in a warp to the first thread of this warp. template<typename T> __device__ inline T warpReduceSum(T val) { for (int offset = warpSize/2; offset > 0; offset /= 2) val += __shfl_down(val,offset); return val; } //Sum the passed values in a block to the first thread of a block. template<typename T> __inline__ __device__ float blockReduceSum(T* shared, T val, int tid, int tcount) { int lane = tid % warpSize; int wid = tid / warpSize; val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (tid < tcount / warpSize) ? shared[lane] : 0; if (wid==0) val = warpReduceSum(val); //Final reduce within first warp return val; } //Returns absolute value of the passed real number raised to the power of two __device__ __forceinline__ float abspow2(float & a) { return a * a; } //Integer logarithm base 2. template <typename IntType> __device__ __inline__ uint ilog2(IntType n) { uint l; for (l = 0; n; n >>= 1, ++l); return l; } //Orthogonal transformation. template <typename T> __device__ __inline__ void rotate(T& a, T& b) { T tmp; tmp = a; a = tmp + b; b = tmp - b; } //Fast Walsh-Hadamard transform. template <typename T> __device__ __inline__ void fwht(T *data, uint n) { unsigned l2 = ilog2(n) - 1; for ( uint i = 0; i < l2; ++i ) { for (uint j = 0; j < n; j += (1 << (i + 1))) for (uint k = 0; k < (uint)(1 << i); ++k) rotate(data[j + k], data[j + k + (uint)(1 << i)]); } } //Based on blockIdx it computes the addresses to the arrays in global memory __device__ inline void get_block_addresses( const uint2 & start_point, //IN: first reference patch of a batch const uint & patch_stack_size, //IN: maximal size of a 3D group const uint2 & stacks_dim, //IN: Size of area, where reference patches could be located const Params & params, //IN: Denoising parameters uint2 & outer_address, //OUT: Coordinetes of reference patch in the image uint & start_idx) //OUT: Address of a first element of the 3D group in stacks array { //One block handles one patch_stack, data are in array one after one. start_idx = patch_stack_size * idx2(blockIdx.x,blockIdx.y,gridDim.x); outer_address.x = start_point.x + (blockIdx.x * params.p); outer_address.y = start_point.y + (blockIdx.y * params.p); //Ensure, that the bottom most patches will be taken as reference patches regardless the p parameter. if (outer_address.y >= stacks_dim.y && outer_address.y < stacks_dim.y + params.p - 1) outer_address.y = stacks_dim.y - 1; //Ensure, that the right most patches will be taken as reference patches regardless the p parameter. if (outer_address.x >= stacks_dim.x && outer_address.x < stacks_dim.x + params.p - 1) outer_address.x = stacks_dim.x - 1; } /* Gather patches form image based on matching stored in 3D array stacks Used parameters: p,k,N Division: One block handles one patch_stack, threads match to the pixels of a patch */ __global__ void get_block( const uint2 start_point, //IN: first reference patch of a batch const uchar* __restrict image, //IN: image const ushort* __restrict stacks, //IN: array of adresses of similar patches const uint* __restrict g_num_patches_in_stack, //IN: numbers of patches in 3D groups float* __restrict patch_stack, //OUT: assembled 3D groups const uint2 image_dim, //IN: image dimensions const uint2 stacks_dim, //IN: dimensions limiting addresses of reference patches const Params params) //IN: denoising parameters { uint startidx; uint2 outer_address; get_block_addresses(start_point, params.k*params.k*(params.N+1), stacks_dim, params, outer_address, startidx); if (outer_address.x >= stacks_dim.x || outer_address.y >= stacks_dim.y) return; patch_stack += startidx; const ushort* z_ptr = &stacks[ idx3(0, blockIdx.x, blockIdx.y, params.N, gridDim.x) ]; uint num_patches = g_num_patches_in_stack[ idx2(blockIdx.x, blockIdx.y, gridDim.x) ]; patch_stack[ idx3(threadIdx.x, threadIdx.y, 0, params.k, params.k) ] = (float)(image[ idx2(outer_address.x+threadIdx.x, outer_address.y+threadIdx.y, image_dim.x)]); for(uint i = 0; i < num_patches; ++i) { int x = (int)((signed char)(z_ptr[i] & 0xFF)); int y = (int)((signed char)((z_ptr[i] >> 8) & 0xFF)); patch_stack[ idx3(threadIdx.x, threadIdx.y, i+1, params.k, params.k) ] = (float)(image[ idx2(outer_address.x+x+threadIdx.x, outer_address.y+y+threadIdx.y, image_dim.x)]); } } /* 1) Do the Walsh-Hadamard 1D transform on the z axis of 3D stack. 2) Treshold every pixel and count the number of non-zero coefficients 3) Do the inverse Walsh-Hadamard 1D transform on the z axis of 3D stack. Used parameters: L3D,N,k,p Division: Each block delas with one transformed patch stack. (number of threads in block should be k*k) */ __global__ void hard_treshold_block( const uint2 start_point, //IN: first reference patch of a batch float* __restrict patch_stack, //IN/OUT: 3D groups with thransfomed patches float* __restrict w_P, //OUT: weight of each 3D group const uint* __restrict g_num_patches_in_stack, //IN: numbers of patches in 3D groups uint2 stacks_dim, //IN: dimensions limiting addresses of reference patches const Params params, //IN: denoising parameters const uint sigma //IN: noise variance ) { extern __shared__ float data[]; int paramN = params.N+1; uint tcount = blockDim.x*blockDim.y; uint tid = idx2(threadIdx.x, threadIdx.y, blockDim.x); uint patch_stack_size = tcount * paramN; uint startidx; uint2 outer_address; get_block_addresses(start_point, patch_stack_size, stacks_dim, params, outer_address, startidx); if (outer_address.x >= stacks_dim.x || outer_address.y >= stacks_dim.y) return; uint num_patches = g_num_patches_in_stack[ idx2(blockIdx.x, blockIdx.y, gridDim.x) ]+1; //+1 for the reference patch. float* s_patch_stack = data + (tid * (num_patches+1)); //+1 for avoiding bank conflicts //TODO:sometimes patch_stack = patch_stack + startidx + tid; //Load to the shared memory for(uint i = 0; i < num_patches; ++i) s_patch_stack[i] = patch_stack[ i*tcount ]; //1D Transform fwht(s_patch_stack, num_patches); //Hard-thresholding + counting of nonzero coefficients uint nonzero = 0; float threshold = params.L3D * sqrtf((float)(num_patches * sigma)); for(int i = 0; i < num_patches; ++i) { if (fabsf(s_patch_stack[ i ]) < threshold) { s_patch_stack[ i ] = 0.0f; } else ++nonzero; } //Inverse 1D Transform fwht(s_patch_stack, num_patches); //Normalize and save to global memory for (uint i = 0; i < num_patches; ++i) { patch_stack[ i*tcount ] = s_patch_stack[i] / num_patches; } //Reuse the shared memory for 32 partial sums __syncthreads(); uint* shared = (uint*)data; //Sum the number of non-zero coefficients for a 3D group nonzero = blockReduceSum<uint>(shared, nonzero, tid, tcount); //Save the weight of a 3D group (1/nonzero coefficients) if (tid == 0) { if (nonzero < 1) nonzero = 1; w_P[ idx2(blockIdx.x, blockIdx.y, gridDim.x ) ] = 1.0f/(float)nonzero; } } /* Fills two buffers: numerator and denominator in order to compute weighted average of pixels Used parameters: k,N,p Division: Each block delas with one transformed patch stack. */ __global__ void aggregate_block( const uint2 start_point, //IN: first reference patch of a batch const float* __restrict patch_stack, //IN: 3D groups with thransfomed patches const float* __restrict w_P, //IN: weight for each 3D group const ushort* __restrict stacks, //IN: array of adresses of similar patches const float* __restrict kaiser_window, //IN: kaiser window float* __restrict numerator, //IN/OUT: numerator aggregation buffer (have to be initialized to 0) float* __restrict denominator, //IN/OUT: denominator aggregation buffer (have to be initialized to 0) const uint* __restrict g_num_patches_in_stack, //IN: numbers of patches in 3D groups const uint2 image_dim, //IN: image dimensions const uint2 stacks_dim, //IN: dimensions limiting addresses of reference patches const Params params //IN: denoising parameters ) { uint startidx; uint2 outer_address; get_block_addresses(start_point, params.k*params.k*(params.N+1), stacks_dim, params, outer_address, startidx); if (outer_address.x >= stacks_dim.x || outer_address.y >= stacks_dim.y) return; patch_stack += startidx; uint num_patches = g_num_patches_in_stack[ idx2(blockIdx.x, blockIdx.y, gridDim.x) ]+1; float wp = w_P[ idx2(blockIdx.x, blockIdx.y, gridDim.x ) ]; const ushort* z_ptr = &stacks[ idx3(0, blockIdx.x, blockIdx.y, params.N, gridDim.x) ]; float kaiser_value = kaiser_window[ idx2(threadIdx.x, threadIdx.y, params.k) ]; for(uint z = 0; z < num_patches; ++z) { int x = 0; int y = 0; if (z > 0) { x = (int)((signed char)(z_ptr[z-1] & 0xFF)); y = (int)((signed char)((z_ptr[z-1] >> 8) & 0xFF)); } float value = ( patch_stack[ idx3(threadIdx.x, threadIdx.y, z, params.k, params.k) ]); int idx = idx2(outer_address.x + x + threadIdx.x, outer_address.y + y + threadIdx.y, image_dim.x); atomicAdd(numerator + idx, value * kaiser_value * wp); atomicAdd(denominator + idx, kaiser_value * wp); } } /* Divide numerator with denominator and round result to image_o */ __global__ void aggregate_final( const float* __restrict numerator, //IN: numerator aggregation buffer const float* __restrict denominator, //IN: denominator aggregation buffer const uint2 image_dim, //IN: image dimensions uchar*__restrict result) //OUT: image estimate { uint idx = blockIdx.x * blockDim.x + threadIdx.x; uint idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= image_dim.x || idy >= image_dim.y) return; int value = lrintf(numerator[ idx2(idx,idy,image_dim.x) ] / denominator[ idx2(idx,idy,image_dim.x) ] ); if (value < 0) value = 0; if (value > 255) value = 255; result[ idx2(idx,idy,image_dim.x) ] = (uchar)value; } extern "C" void run_get_block( const uint2 start_point, const uchar* __restrict image, const ushort* __restrict stacks, const uint* __restrict num_patches_in_stack, float* __restrict patch_stack, const uint2 image_dim, const uint2 stacks_dim, const Params params, const dim3 num_threads, const dim3 num_blocks) { get_block<<<num_blocks,num_threads>>>( start_point, image, stacks, num_patches_in_stack, patch_stack, image_dim, stacks_dim, params ); } extern "C" void run_hard_treshold_block( const uint2 start_point, float* __restrict patch_stack, float* __restrict w_P, const uint* __restrict num_patches_in_stack, const uint2 stacks_dim, const Params params, const uint sigma, const dim3 num_threads, const dim3 num_blocks, const uint shared_memory_size) { hard_treshold_block<<<num_blocks, num_threads, shared_memory_size>>>( start_point, patch_stack, w_P, num_patches_in_stack, stacks_dim, params, sigma ); } extern "C" void run_aggregate_block( const uint2 start_point, const float* __restrict patch_stack, const float* __restrict w_P, const ushort* __restrict stacks, const float* __restrict kaiser_window, float* __restrict numerator, float* __restrict denominator, const uint* __restrict num_patches_in_stack, const uint2 image_dim, const uint2 stacks_dim, const Params params, const dim3 num_threads, const dim3 num_blocks) { aggregate_block<<<num_blocks,num_threads>>>( start_point, patch_stack, w_P, stacks, kaiser_window, numerator, denominator, num_patches_in_stack, image_dim, stacks_dim, params ); } extern "C" void run_aggregate_final( const float* __restrict numerator, const float* __restrict denominator, const uint2 image_dim, uchar*__restrict denoised_image, const dim3 num_threads, const dim3 num_blocks ) { aggregate_final<<<num_blocks,num_threads>>>( numerator, denominator, image_dim, denoised_image ); }
63138b4c89d7b82321c2a942463fa3c6e7f0de68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <iostream> #include <vector> int const threadsPerBlock = sizeof(unsigned long long) * 8; namespace { template <typename T> __device__ inline float devIoU( T const* const a, T const* const b) { if (a[4] != b[4]) return 0.0; T left = max(a[0], b[0]), right = min(a[2], b[2]); T top = max(a[1], b[1]), bottom = min(a[3], b[3]); T width = max(right - left, (T)0.0), height = max(bottom - top, (T)0.0); T interS = width * height; T Sa = (a[2] - a[0]) * (a[3] - a[1]); T Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / (Sa + Sb - interS); } template <typename T> __global__ void ml_voting_kernel( const int n_boxes, const int k_boxes, const T* dev_boxes, const int64_t* dev_labels, const T* dev_query_boxes, const T* dev_query_scores, const int64_t* dev_query_labels, T* dev_dets, const int scoring_method, const float beta, const float threshold) { const int col_start = blockIdx.x; const int row_start = blockIdx.y; const int col_size = min(k_boxes - col_start * threadsPerBlock, threadsPerBlock); const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); __shared__ T block_boxes[threadsPerBlock * 5]; if (threadIdx.x < row_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_labels[threadsPerBlock * row_start + threadIdx.x]; } __shared__ T block_query_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_query_boxes[threadIdx.x * 5 + 0] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0]; block_query_boxes[threadIdx.x * 5 + 1] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1]; block_query_boxes[threadIdx.x * 5 + 2] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2]; block_query_boxes[threadIdx.x * 5 + 3] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3]; block_query_boxes[threadIdx.x * 5 + 4] = dev_query_labels[threadsPerBlock * col_start + threadIdx.x]; } __syncthreads(); if (threadIdx.x < row_size) { const T* weights = dev_query_scores + threadsPerBlock * col_start; const T* cur_box = block_boxes + threadIdx.x * 5; int start = (row_start * threadsPerBlock + threadIdx.x) * k_boxes + col_start * threadsPerBlock; int offset; T iou, weight; T* query_box; T* dev_x1 = dev_dets + 0; T* dev_y1 = dev_dets + 1; T* dev_x2 = dev_dets + 2; T* dev_y2 = dev_dets + 3; T* dev_score_weight = dev_dets + 4; T* dev_score = dev_dets + 5; T* dev_box_weight = dev_dets + 6; for(int i = 0; i < col_size; i++) { offset = (start + i) * 7; query_box = block_query_boxes + i * 5; iou = devIoU<T>(cur_box, query_box); if (iou >= threshold) { weight = weights[i]; dev_x1[offset] = query_box[0] * weight; dev_y1[offset] = query_box[1] * weight; dev_x2[offset] = query_box[2] * weight; dev_y2[offset] = query_box[3] * weight; dev_score_weight[offset] = (T)1.0; switch (scoring_method) { case 0 : // 'ID' case 2 : // 'AVG' case 5 : // 'QUASI_SUM' dev_score[offset] = weight; break; case 1 : // 'TEMP_AVG' if (weight != (T)0.0) { dev_score[offset] = (T)1.0 /((T)1.0 + powf((T)1.0 / weight - (T)1.0, (T)1.0 / (T)beta)); } else { dev_score[offset] = weight; } break; case 3 : // 'IOU_AVG' dev_score_weight[offset] = iou; dev_score[offset] = iou * weight; break; case 4 : // 'GENERALIZED_AVG' dev_score[offset] = powf(weight, (T)beta); break; } dev_box_weight[offset] = weight; } else { dev_x1[offset] = (T)0.0; dev_y1[offset] = (T)0.0; dev_x2[offset] = (T)0.0; dev_y2[offset] = (T)0.0; dev_score_weight[offset] = (T)0.0; dev_score[offset] = (T)0.0; dev_box_weight[offset] = (T)0.0; } } } } } // namespace namespace pet { std::tuple<at::Tensor, at::Tensor, at::Tensor> ml_voting_cuda( const at::Tensor& boxes, const at::Tensor& scores, const at::Tensor& labels, const at::Tensor& query_boxes, const at::Tensor& query_scores, const at::Tensor& query_labels, const int scoring_method, const float beta, const float threshold) { AT_ASSERTM(boxes.is_cuda(), "boxes must be a CUDA tensor"); AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); AT_ASSERTM(labels.is_cuda(), "labels must be a CUDA tensor"); AT_ASSERTM(query_boxes.is_cuda(), "query_boxes must be a CUDA tensor"); AT_ASSERTM(query_scores.is_cuda(), "query_scores must be a CUDA tensor"); AT_ASSERTM(query_labels.is_cuda(), "query_labels must be a CUDA tensor"); at::hip::HIPGuardMasqueradingAsCUDA device_guard(boxes.device()); int boxes_num = boxes.size(0); int query_boxes_num = query_boxes.size(0); const int col_blocks = at::cuda::ATenCeilDiv(query_boxes_num, threadsPerBlock); const int row_blocks = at::cuda::ATenCeilDiv(boxes_num, threadsPerBlock); at::Tensor dev_dets = at::empty({boxes_num, query_boxes_num, 7}, boxes.options()); dim3 blocks(col_blocks, row_blocks); dim3 threads(threadsPerBlock); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( boxes.scalar_type(), "ml_voting_kernel_cuda", [&] { hipLaunchKernelGGL(( ml_voting_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, boxes_num, query_boxes_num, boxes.data_ptr<scalar_t>(), labels.data_ptr<int64_t>(), query_boxes.data_ptr<scalar_t>(), query_scores.data_ptr<scalar_t>(), query_labels.data_ptr<int64_t>(), dev_dets.data_ptr<scalar_t>(), scoring_method, beta, threshold); }); auto dev_sum = dev_dets.sum(1); auto num = dev_sum.select(1, 4); auto boxes_ws = dev_sum.select(1, 6); auto dev_boxes = dev_sum.narrow(1, 0, 4).div(boxes_ws.unsqueeze(-1)).contiguous(); auto score_sum = dev_sum.select(1, 5); auto dev_scores = scores; switch (scoring_method) { case 0 : // 'ID' break; case 1 : // 'TEMP_AVG' case 2 : // 'AVG' case 3 : // 'IOU_AVG' dev_scores = score_sum.div(num); break; case 4 : // 'GENERALIZED_AVG' dev_scores = score_sum.div(num).pow(1 / beta); break; case 5 : // 'QUASI_SUM' dev_scores = score_sum.div(num.pow(beta)); break; } AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(dev_boxes, dev_scores, labels); } } // namespace pet
63138b4c89d7b82321c2a942463fa3c6e7f0de68.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <iostream> #include <vector> int const threadsPerBlock = sizeof(unsigned long long) * 8; namespace { template <typename T> __device__ inline float devIoU( T const* const a, T const* const b) { if (a[4] != b[4]) return 0.0; T left = max(a[0], b[0]), right = min(a[2], b[2]); T top = max(a[1], b[1]), bottom = min(a[3], b[3]); T width = max(right - left, (T)0.0), height = max(bottom - top, (T)0.0); T interS = width * height; T Sa = (a[2] - a[0]) * (a[3] - a[1]); T Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / (Sa + Sb - interS); } template <typename T> __global__ void ml_voting_kernel( const int n_boxes, const int k_boxes, const T* dev_boxes, const int64_t* dev_labels, const T* dev_query_boxes, const T* dev_query_scores, const int64_t* dev_query_labels, T* dev_dets, const int scoring_method, const float beta, const float threshold) { const int col_start = blockIdx.x; const int row_start = blockIdx.y; const int col_size = min(k_boxes - col_start * threadsPerBlock, threadsPerBlock); const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); __shared__ T block_boxes[threadsPerBlock * 5]; if (threadIdx.x < row_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_labels[threadsPerBlock * row_start + threadIdx.x]; } __shared__ T block_query_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_query_boxes[threadIdx.x * 5 + 0] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0]; block_query_boxes[threadIdx.x * 5 + 1] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1]; block_query_boxes[threadIdx.x * 5 + 2] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2]; block_query_boxes[threadIdx.x * 5 + 3] = dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3]; block_query_boxes[threadIdx.x * 5 + 4] = dev_query_labels[threadsPerBlock * col_start + threadIdx.x]; } __syncthreads(); if (threadIdx.x < row_size) { const T* weights = dev_query_scores + threadsPerBlock * col_start; const T* cur_box = block_boxes + threadIdx.x * 5; int start = (row_start * threadsPerBlock + threadIdx.x) * k_boxes + col_start * threadsPerBlock; int offset; T iou, weight; T* query_box; T* dev_x1 = dev_dets + 0; T* dev_y1 = dev_dets + 1; T* dev_x2 = dev_dets + 2; T* dev_y2 = dev_dets + 3; T* dev_score_weight = dev_dets + 4; T* dev_score = dev_dets + 5; T* dev_box_weight = dev_dets + 6; for(int i = 0; i < col_size; i++) { offset = (start + i) * 7; query_box = block_query_boxes + i * 5; iou = devIoU<T>(cur_box, query_box); if (iou >= threshold) { weight = weights[i]; dev_x1[offset] = query_box[0] * weight; dev_y1[offset] = query_box[1] * weight; dev_x2[offset] = query_box[2] * weight; dev_y2[offset] = query_box[3] * weight; dev_score_weight[offset] = (T)1.0; switch (scoring_method) { case 0 : // 'ID' case 2 : // 'AVG' case 5 : // 'QUASI_SUM' dev_score[offset] = weight; break; case 1 : // 'TEMP_AVG' if (weight != (T)0.0) { dev_score[offset] = (T)1.0 /((T)1.0 + powf((T)1.0 / weight - (T)1.0, (T)1.0 / (T)beta)); } else { dev_score[offset] = weight; } break; case 3 : // 'IOU_AVG' dev_score_weight[offset] = iou; dev_score[offset] = iou * weight; break; case 4 : // 'GENERALIZED_AVG' dev_score[offset] = powf(weight, (T)beta); break; } dev_box_weight[offset] = weight; } else { dev_x1[offset] = (T)0.0; dev_y1[offset] = (T)0.0; dev_x2[offset] = (T)0.0; dev_y2[offset] = (T)0.0; dev_score_weight[offset] = (T)0.0; dev_score[offset] = (T)0.0; dev_box_weight[offset] = (T)0.0; } } } } } // namespace namespace pet { std::tuple<at::Tensor, at::Tensor, at::Tensor> ml_voting_cuda( const at::Tensor& boxes, const at::Tensor& scores, const at::Tensor& labels, const at::Tensor& query_boxes, const at::Tensor& query_scores, const at::Tensor& query_labels, const int scoring_method, const float beta, const float threshold) { AT_ASSERTM(boxes.is_cuda(), "boxes must be a CUDA tensor"); AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); AT_ASSERTM(labels.is_cuda(), "labels must be a CUDA tensor"); AT_ASSERTM(query_boxes.is_cuda(), "query_boxes must be a CUDA tensor"); AT_ASSERTM(query_scores.is_cuda(), "query_scores must be a CUDA tensor"); AT_ASSERTM(query_labels.is_cuda(), "query_labels must be a CUDA tensor"); at::cuda::CUDAGuard device_guard(boxes.device()); int boxes_num = boxes.size(0); int query_boxes_num = query_boxes.size(0); const int col_blocks = at::cuda::ATenCeilDiv(query_boxes_num, threadsPerBlock); const int row_blocks = at::cuda::ATenCeilDiv(boxes_num, threadsPerBlock); at::Tensor dev_dets = at::empty({boxes_num, query_boxes_num, 7}, boxes.options()); dim3 blocks(col_blocks, row_blocks); dim3 threads(threadsPerBlock); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( boxes.scalar_type(), "ml_voting_kernel_cuda", [&] { ml_voting_kernel<scalar_t><<<blocks, threads, 0, stream>>>( boxes_num, query_boxes_num, boxes.data_ptr<scalar_t>(), labels.data_ptr<int64_t>(), query_boxes.data_ptr<scalar_t>(), query_scores.data_ptr<scalar_t>(), query_labels.data_ptr<int64_t>(), dev_dets.data_ptr<scalar_t>(), scoring_method, beta, threshold); }); auto dev_sum = dev_dets.sum(1); auto num = dev_sum.select(1, 4); auto boxes_ws = dev_sum.select(1, 6); auto dev_boxes = dev_sum.narrow(1, 0, 4).div(boxes_ws.unsqueeze(-1)).contiguous(); auto score_sum = dev_sum.select(1, 5); auto dev_scores = scores; switch (scoring_method) { case 0 : // 'ID' break; case 1 : // 'TEMP_AVG' case 2 : // 'AVG' case 3 : // 'IOU_AVG' dev_scores = score_sum.div(num); break; case 4 : // 'GENERALIZED_AVG' dev_scores = score_sum.div(num).pow(1 / beta); break; case 5 : // 'QUASI_SUM' dev_scores = score_sum.div(num.pow(beta)); break; } AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(dev_boxes, dev_scores, labels); } } // namespace pet
0f646fb89347be56651771967faba24f2d15c512.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2016-2018, Nils Moehrle * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD 3-Clause license. See the LICENSE.txt file for details. */ #include <chrono> #include <iostream> #include "util/system.h" #include "util/arguments.h" #include "util/tokenizer.h" #include "mve/scene.h" #include "mve/depthmap.h" #include "mve/image_io.h" #include "mve/image_tools.h" #include "mve/mesh_io_ply.h" #include "ogl/camera.h" #include "ogl/mesh_renderer.h" #include "ogl/check_gl_error.h" #include "sim/window.h" #define BVHTREE_NUM_BINS 0 #include "acc/bvh_tree.h" #include "cacc/math.h" #include "cacc/util.h" #include "cacc/image.h" #include "cacc/matrix.h" #include "cacc/tracing.h" #include "cacc/bvh_tree.h" #include "cacc/array_texture.h" #include "cacc/graphics_resource.h" #include <cuda_gl_interop.h> typedef unsigned int uint; typedef acc::BVHTree<uint, math::Vec3f> BVHTree; const char *fragment_shader = R"( #version 330 core layout(location=0) out float depth; uniform float znear; uniform float zfar; void main(void) { gl_FragDepth = gl_FragCoord.z; depth = (zfar * znear) / ((znear - zfar) * gl_FragCoord.z + zfar); } )"; const char *vertex_shader = R"( #version 330 core in vec4 pos; uniform mat4 viewmat; uniform mat4 projmat; void main(void) { gl_Position = projmat * (viewmat * pos); } )"; struct Arguments { std::string scene_dir; std::string mesh; std::string image_name = "original"; int width = 1920; int height = 1080; }; Arguments parse_args(int argc, char **argv) { util::Arguments args; args.set_exit_on_error(true); args.set_nonopt_maxnum(2); args.set_nonopt_minnum(2); args.set_helptext_indent(28); args.set_usage("Usage: " + std::string(argv[0]) + " [OPTS] SCENE MESH"); args.set_description("Test app for cuda and opengl interoperability"); args.add_option('r', "resolution", true, "resolution [1920x1080]"); args.parse(argc, argv); Arguments conf; conf.scene_dir = args.get_nth_nonopt(0); conf.mesh = args.get_nth_nonopt(1); for (util::ArgResult const* i = args.next_option(); i != 0; i = args.next_option()) { switch (i->opt->sopt) { case 'r': { util::Tokenizer tok; tok.split(i->arg, 'x'); if (tok.size() != 2) throw std::invalid_argument("Invalid resolution"); conf.width = tok.get_as<int>(0); conf.height = tok.get_as<int>(1); } break; default: throw std::invalid_argument("Invalid option"); } } return conf; } /* TODO extract and merge with capture_trajectory. */ void fill_ogl_camera(mve::CameraInfo const & camera_info, int width, int height, float znear, float zfar, ogl::Camera * ogl_camera) { /* Get all parameters and check them. */ float const dimension_aspect = static_cast<float>(width) / height; float const pixel_aspect = camera_info.paspect; float const image_aspect = dimension_aspect * pixel_aspect; float const focal_length = camera_info.flen; float const ppx = camera_info.ppoint[0]; float const ppy = camera_info.ppoint[1]; /* Fill OpenGL view matrix */ camera_info.fill_world_to_cam(ogl_camera->view.begin()); camera_info.fill_cam_to_world(ogl_camera->inv_view.begin()); /* Construct OpenGL projection matrix. */ math::Matrix4f& proj = ogl_camera->proj; proj.fill(0.0f); proj[0] = 2.0f * focal_length * (image_aspect > 1.0f ? 1.0f : 1.0f / image_aspect); proj[2] = -2.0f * (0.5f - ppx); proj[5] = -2.0f * focal_length * (image_aspect > 1.0f ? image_aspect : 1.0f); proj[6] = -2.0f * (ppy - 0.5f); proj[10] = -(-zfar - znear) / (zfar - znear); proj[11] = -2.0f * zfar * znear / (zfar - znear); proj[14] = 1.0f; camera_info.fill_camera_pos(ogl_camera->pos.begin()); ogl_camera->z_near = znear; ogl_camera->z_far = zfar; } void setup_fbo(GLuint *fbo, GLuint *rbo, GLuint * dbo, int width, int height) { glGenRenderbuffers(1, rbo); glBindRenderbuffer(GL_RENDERBUFFER, *rbo); glRenderbufferStorage(GL_RENDERBUFFER, GL_R32F, width, height); ogl::check_gl_error(); glBindRenderbuffer(GL_RENDERBUFFER, 0); glGenRenderbuffers(1, dbo); glBindRenderbuffer(GL_RENDERBUFFER, *dbo); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, width, height); ogl::check_gl_error(); glBindRenderbuffer(GL_RENDERBUFFER, 0); glGenFramebuffers(1, fbo); glBindFramebuffer(GL_FRAMEBUFFER, *fbo); glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, *rbo); glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, *dbo); ogl::check_gl_error(); if(GL_FRAMEBUFFER_COMPLETE != glCheckFramebufferStatus(GL_FRAMEBUFFER)) { std::cerr << "Could not initialize framebuffer" << std::endl; std::exit(EXIT_FAILURE); } glBindFramebuffer(GL_FRAMEBUFFER, 0); } __global__ void copy(cacc::ArrayTexture<float>::Accessor tex, cacc::Image<float, cacc::DEVICE>::Data image) { int const bx = blockIdx.x; int const tx = threadIdx.x; int const by = blockIdx.y; int const ty = threadIdx.y; int const x = bx * blockDim.x + tx; int const y = by * blockDim.y + ty; if (x >= image.width || y >= image.height) return; int const stride = image.pitch / sizeof(float); image.data_ptr[y * stride + x] = tex[x][y]; } __global__ void __launch_bounds__(TRACING_BLOCK_SIZE) raycast(cacc::Vec3f origin, cacc::Mat3f invcalib, cacc::Mat3f c2w_rot, cacc::BVHTree<cacc::DEVICE>::Accessor const bvh_tree, cacc::Image<float, cacc::DEVICE>::Data image) { int const bx = blockIdx.x; int const tx = threadIdx.x; int const by = blockIdx.y; int const ty = threadIdx.y; int const x = bx * blockDim.x + tx; int const y = by * blockDim.y + ty; if (x >= image.width || y >= image.height) return; int const stride = image.pitch / sizeof(float); cacc::Ray ray; ray.origin = origin; cacc::Vec3f v = invcalib * cacc::Vec3f((float)x + 0.5f, (float)y + 0.5f, 1.0f); ray.dir = (c2w_rot * v.normalize()).normalize(); ray.set_tmin(0.001f); ray.set_tmax(1000.0f); uint hit_face_id; if (cacc::tracing::trace(bvh_tree, ray, &hit_face_id)) { //ERROR face_id != tri_id cacc::Tri tri = bvh_tree.load_tri(hit_face_id); image.data_ptr[y * stride + x] = 1000.0f; cacc::intersect(ray, tri, image.data_ptr + y * stride + x); } else { image.data_ptr[y * stride + x] = 0.0f; } } int main(int argc, char **argv) { util::system::register_segfault_handler(); util::system::print_build_timestamp(argv[0]); Arguments args = parse_args(argc, argv); Window window("", 640, 480); int device = cacc::get_cuda_device(3, 5); cacc::set_cuda_device(device); cacc::set_cuda_gl_device(device); mve::TriangleMesh::Ptr mesh; try { mesh = mve::geom::load_ply_mesh(args.mesh); } catch (std::exception& e) { throw std::runtime_error(std::string("Could not load mesh: ") + e.what()); } std::vector<uint> const & faces = mesh->get_faces(); std::vector<math::Vec3f> const & vertices = mesh->get_vertices(); BVHTree::Ptr bvh_tree = BVHTree::create(faces, vertices); ogl::ShaderProgram::Ptr sp = ogl::ShaderProgram::create(); sp->load_vert_code(vertex_shader); sp->load_frag_code(fragment_shader); ogl::MeshRenderer::Ptr mr = ogl::MeshRenderer::create(mesh); int width = args.width; int height = args.height; std::string image_name = args.image_name; mve::Scene::Ptr scene; try { scene = mve::Scene::create(args.scene_dir); } catch (std::exception& e) { std::cerr << "Could not open scene: " << e.what() << std::endl; std::exit(EXIT_FAILURE); } GLuint fbo, rbo, dbo; setup_fbo(&fbo, &rbo, &dbo, width, height); ogl::Camera ogl_cam; float const znear = 0.1f; float const zfar = 1000.0f; mve::View::Ptr view = scene->get_view_by_id(27); mve::CameraInfo const & camera = view->get_camera(); math::Vec3f origin; camera.fill_camera_pos(origin.begin()); math::Matrix3f invcalib; camera.fill_inverse_calibration(invcalib.begin(), width, height); math::Matrix3f c2w_rot; camera.fill_cam_to_world_rot(c2w_rot.begin()); fill_ogl_camera(camera, width, height, znear, zfar, &ogl_cam); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); glEnable(GL_DEPTH_TEST); glBindFramebuffer(GL_FRAMEBUFFER, fbo); glViewport(0, 0, width, height); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); sp->bind(); sp->send_uniform("znear", znear); sp->send_uniform("zfar", zfar); sp->send_uniform("viewmat", ogl_cam.view); sp->send_uniform("projmat", ogl_cam.proj); mr->set_shader(sp); mr->draw(); glFlush(); ogl::check_gl_error(); glBindFramebuffer(GL_FRAMEBUFFER, 0); end = std::chrono::system_clock::now(); std::cout << "Rasterization: " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << "us" << std::endl; mve::FloatImage::Ptr depth = mve::FloatImage::create(width, height, 1); { cacc::GraphicsResource res; CHECK(hipGraphicsGLRegisterImage(&res.ptr(), rbo, GL_RENDERBUFFER, hipGraphicsRegisterFlagsNone)); cacc::MappedArrayTexture<float> tex(&res); cacc::Image<float, cacc::DEVICE>::Ptr dimage; dimage = cacc::Image<float, cacc::DEVICE>::create(width, height); cacc::Image<float, cacc::HOST>::Ptr image; image = cacc::Image<float, cacc::HOST>::create(width, height); dim3 block(16, 16); dim3 grid((width + 15) / 16, (height + 15) / 16); hipLaunchKernelGGL(( copy), dim3(grid), dim3(block), 0, 0, tex.accessor(), dimage->cdata()); *image = *dimage; cacc::Image<float, cacc::HOST>::Data data = image->cdata(); for (int y = 0; y < data.height; ++y) { for (int x = 0; x < data.width; ++x) { depth->at(x, y, 0) = data.data_ptr[y * (data.pitch / sizeof(float)) + x]; } } } mve::image::flip<float>(depth, mve::image::FLIP_VERTICAL); mve::image::depthmap_convert_conventions<float>(depth, invcalib, true); mve::FloatImage::Ptr rdepth = mve::FloatImage::create(width, height, 1); start = std::chrono::system_clock::now(); #pragma omp parallel for for (int y = 0; y < rdepth->height(); ++y) { for (int x = 0; x < rdepth->width(); ++x) { BVHTree::Ray ray; ray.origin = origin; math::Vec3f v = invcalib * math::Vec3f ((float)x + 0.5f, (float)y + 0.5f, 1.0f); ray.dir = c2w_rot.mult(v.normalized()).normalize(); ray.tmin = 0.0f; ray.tmax = std::numeric_limits<float>::infinity(); BVHTree::Hit hit; if (!bvh_tree->intersect(ray, &hit)) continue; rdepth->at(x, y, 0) = (hit.t * ray.dir).norm(); } } end = std::chrono::system_clock::now(); std::cout << "CPU tracing: " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << "us" << std::endl; mve::image::depthmap_convert_conventions<float>(rdepth, invcalib, false); cacc::select_cuda_device(3, 5); cacc::BVHTree<cacc::DEVICE>::Ptr dbvh_tree; dbvh_tree = cacc::BVHTree<cacc::DEVICE>::create<uint, math::Vec3f>(bvh_tree); cacc::Image<float, cacc::DEVICE>::Ptr dimage; dimage = cacc::Image<float, cacc::DEVICE>::create(width, height); cacc::Image<float, cacc::HOST>::Ptr image; image = cacc::Image<float, cacc::HOST>::create(width, height); start = std::chrono::system_clock::now(); { dim3 block(16, 8); dim3 grid(cacc::divup(width, block.x), cacc::divup(height, block.y)); hipLaunchKernelGGL(( raycast), dim3(grid), dim3(block), 0, 0, cacc::Vec3f(origin.begin()), cacc::Mat3f(invcalib.begin()), cacc::Mat3f(c2w_rot.begin()), dbvh_tree->accessor(), dimage->cdata()); CHECK(hipDeviceSynchronize()); } end = std::chrono::system_clock::now(); std::cout << "GPU tracing: " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << "us" << std::endl; *image = *dimage; CHECK(hipDeviceSynchronize()); mve::FloatImage::Ptr ddepth = mve::FloatImage::create(width, height, 1); cacc::Image<float, cacc::HOST>::Data data = image->cdata(); for (int y = 0; y < data.height; ++y) { for (int x = 0; x < data.width; ++x) { ddepth->at(x, y, 0) = data.data_ptr[y * (data.pitch / sizeof(float)) + x]; } } mve::FloatImage::Ptr error = mve::FloatImage::create(width, height, 1); for (int i = 0; i < depth->get_value_amount(); ++i) { error->at(i) = std::abs(ddepth->at(i) - depth->at(i)); } mve::image::save_pfm_file(error, "/tmp/error.pfm"); mve::image::save_pfm_file(ddepth, "/tmp/depth.pfm"); return EXIT_SUCCESS; }
0f646fb89347be56651771967faba24f2d15c512.cu
/* * Copyright (C) 2016-2018, Nils Moehrle * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD 3-Clause license. See the LICENSE.txt file for details. */ #include <chrono> #include <iostream> #include "util/system.h" #include "util/arguments.h" #include "util/tokenizer.h" #include "mve/scene.h" #include "mve/depthmap.h" #include "mve/image_io.h" #include "mve/image_tools.h" #include "mve/mesh_io_ply.h" #include "ogl/camera.h" #include "ogl/mesh_renderer.h" #include "ogl/check_gl_error.h" #include "sim/window.h" #define BVHTREE_NUM_BINS 0 #include "acc/bvh_tree.h" #include "cacc/math.h" #include "cacc/util.h" #include "cacc/image.h" #include "cacc/matrix.h" #include "cacc/tracing.h" #include "cacc/bvh_tree.h" #include "cacc/array_texture.h" #include "cacc/graphics_resource.h" #include <cuda_gl_interop.h> typedef unsigned int uint; typedef acc::BVHTree<uint, math::Vec3f> BVHTree; const char *fragment_shader = R"( #version 330 core layout(location=0) out float depth; uniform float znear; uniform float zfar; void main(void) { gl_FragDepth = gl_FragCoord.z; depth = (zfar * znear) / ((znear - zfar) * gl_FragCoord.z + zfar); } )"; const char *vertex_shader = R"( #version 330 core in vec4 pos; uniform mat4 viewmat; uniform mat4 projmat; void main(void) { gl_Position = projmat * (viewmat * pos); } )"; struct Arguments { std::string scene_dir; std::string mesh; std::string image_name = "original"; int width = 1920; int height = 1080; }; Arguments parse_args(int argc, char **argv) { util::Arguments args; args.set_exit_on_error(true); args.set_nonopt_maxnum(2); args.set_nonopt_minnum(2); args.set_helptext_indent(28); args.set_usage("Usage: " + std::string(argv[0]) + " [OPTS] SCENE MESH"); args.set_description("Test app for cuda and opengl interoperability"); args.add_option('r', "resolution", true, "resolution [1920x1080]"); args.parse(argc, argv); Arguments conf; conf.scene_dir = args.get_nth_nonopt(0); conf.mesh = args.get_nth_nonopt(1); for (util::ArgResult const* i = args.next_option(); i != 0; i = args.next_option()) { switch (i->opt->sopt) { case 'r': { util::Tokenizer tok; tok.split(i->arg, 'x'); if (tok.size() != 2) throw std::invalid_argument("Invalid resolution"); conf.width = tok.get_as<int>(0); conf.height = tok.get_as<int>(1); } break; default: throw std::invalid_argument("Invalid option"); } } return conf; } /* TODO extract and merge with capture_trajectory. */ void fill_ogl_camera(mve::CameraInfo const & camera_info, int width, int height, float znear, float zfar, ogl::Camera * ogl_camera) { /* Get all parameters and check them. */ float const dimension_aspect = static_cast<float>(width) / height; float const pixel_aspect = camera_info.paspect; float const image_aspect = dimension_aspect * pixel_aspect; float const focal_length = camera_info.flen; float const ppx = camera_info.ppoint[0]; float const ppy = camera_info.ppoint[1]; /* Fill OpenGL view matrix */ camera_info.fill_world_to_cam(ogl_camera->view.begin()); camera_info.fill_cam_to_world(ogl_camera->inv_view.begin()); /* Construct OpenGL projection matrix. */ math::Matrix4f& proj = ogl_camera->proj; proj.fill(0.0f); proj[0] = 2.0f * focal_length * (image_aspect > 1.0f ? 1.0f : 1.0f / image_aspect); proj[2] = -2.0f * (0.5f - ppx); proj[5] = -2.0f * focal_length * (image_aspect > 1.0f ? image_aspect : 1.0f); proj[6] = -2.0f * (ppy - 0.5f); proj[10] = -(-zfar - znear) / (zfar - znear); proj[11] = -2.0f * zfar * znear / (zfar - znear); proj[14] = 1.0f; camera_info.fill_camera_pos(ogl_camera->pos.begin()); ogl_camera->z_near = znear; ogl_camera->z_far = zfar; } void setup_fbo(GLuint *fbo, GLuint *rbo, GLuint * dbo, int width, int height) { glGenRenderbuffers(1, rbo); glBindRenderbuffer(GL_RENDERBUFFER, *rbo); glRenderbufferStorage(GL_RENDERBUFFER, GL_R32F, width, height); ogl::check_gl_error(); glBindRenderbuffer(GL_RENDERBUFFER, 0); glGenRenderbuffers(1, dbo); glBindRenderbuffer(GL_RENDERBUFFER, *dbo); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, width, height); ogl::check_gl_error(); glBindRenderbuffer(GL_RENDERBUFFER, 0); glGenFramebuffers(1, fbo); glBindFramebuffer(GL_FRAMEBUFFER, *fbo); glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, *rbo); glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, *dbo); ogl::check_gl_error(); if(GL_FRAMEBUFFER_COMPLETE != glCheckFramebufferStatus(GL_FRAMEBUFFER)) { std::cerr << "Could not initialize framebuffer" << std::endl; std::exit(EXIT_FAILURE); } glBindFramebuffer(GL_FRAMEBUFFER, 0); } __global__ void copy(cacc::ArrayTexture<float>::Accessor tex, cacc::Image<float, cacc::DEVICE>::Data image) { int const bx = blockIdx.x; int const tx = threadIdx.x; int const by = blockIdx.y; int const ty = threadIdx.y; int const x = bx * blockDim.x + tx; int const y = by * blockDim.y + ty; if (x >= image.width || y >= image.height) return; int const stride = image.pitch / sizeof(float); image.data_ptr[y * stride + x] = tex[x][y]; } __global__ void __launch_bounds__(TRACING_BLOCK_SIZE) raycast(cacc::Vec3f origin, cacc::Mat3f invcalib, cacc::Mat3f c2w_rot, cacc::BVHTree<cacc::DEVICE>::Accessor const bvh_tree, cacc::Image<float, cacc::DEVICE>::Data image) { int const bx = blockIdx.x; int const tx = threadIdx.x; int const by = blockIdx.y; int const ty = threadIdx.y; int const x = bx * blockDim.x + tx; int const y = by * blockDim.y + ty; if (x >= image.width || y >= image.height) return; int const stride = image.pitch / sizeof(float); cacc::Ray ray; ray.origin = origin; cacc::Vec3f v = invcalib * cacc::Vec3f((float)x + 0.5f, (float)y + 0.5f, 1.0f); ray.dir = (c2w_rot * v.normalize()).normalize(); ray.set_tmin(0.001f); ray.set_tmax(1000.0f); uint hit_face_id; if (cacc::tracing::trace(bvh_tree, ray, &hit_face_id)) { //ERROR face_id != tri_id cacc::Tri tri = bvh_tree.load_tri(hit_face_id); image.data_ptr[y * stride + x] = 1000.0f; cacc::intersect(ray, tri, image.data_ptr + y * stride + x); } else { image.data_ptr[y * stride + x] = 0.0f; } } int main(int argc, char **argv) { util::system::register_segfault_handler(); util::system::print_build_timestamp(argv[0]); Arguments args = parse_args(argc, argv); Window window("", 640, 480); int device = cacc::get_cuda_device(3, 5); cacc::set_cuda_device(device); cacc::set_cuda_gl_device(device); mve::TriangleMesh::Ptr mesh; try { mesh = mve::geom::load_ply_mesh(args.mesh); } catch (std::exception& e) { throw std::runtime_error(std::string("Could not load mesh: ") + e.what()); } std::vector<uint> const & faces = mesh->get_faces(); std::vector<math::Vec3f> const & vertices = mesh->get_vertices(); BVHTree::Ptr bvh_tree = BVHTree::create(faces, vertices); ogl::ShaderProgram::Ptr sp = ogl::ShaderProgram::create(); sp->load_vert_code(vertex_shader); sp->load_frag_code(fragment_shader); ogl::MeshRenderer::Ptr mr = ogl::MeshRenderer::create(mesh); int width = args.width; int height = args.height; std::string image_name = args.image_name; mve::Scene::Ptr scene; try { scene = mve::Scene::create(args.scene_dir); } catch (std::exception& e) { std::cerr << "Could not open scene: " << e.what() << std::endl; std::exit(EXIT_FAILURE); } GLuint fbo, rbo, dbo; setup_fbo(&fbo, &rbo, &dbo, width, height); ogl::Camera ogl_cam; float const znear = 0.1f; float const zfar = 1000.0f; mve::View::Ptr view = scene->get_view_by_id(27); mve::CameraInfo const & camera = view->get_camera(); math::Vec3f origin; camera.fill_camera_pos(origin.begin()); math::Matrix3f invcalib; camera.fill_inverse_calibration(invcalib.begin(), width, height); math::Matrix3f c2w_rot; camera.fill_cam_to_world_rot(c2w_rot.begin()); fill_ogl_camera(camera, width, height, znear, zfar, &ogl_cam); std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); glEnable(GL_DEPTH_TEST); glBindFramebuffer(GL_FRAMEBUFFER, fbo); glViewport(0, 0, width, height); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); sp->bind(); sp->send_uniform("znear", znear); sp->send_uniform("zfar", zfar); sp->send_uniform("viewmat", ogl_cam.view); sp->send_uniform("projmat", ogl_cam.proj); mr->set_shader(sp); mr->draw(); glFlush(); ogl::check_gl_error(); glBindFramebuffer(GL_FRAMEBUFFER, 0); end = std::chrono::system_clock::now(); std::cout << "Rasterization: " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << "us" << std::endl; mve::FloatImage::Ptr depth = mve::FloatImage::create(width, height, 1); { cacc::GraphicsResource res; CHECK(cudaGraphicsGLRegisterImage(&res.ptr(), rbo, GL_RENDERBUFFER, cudaGraphicsRegisterFlagsNone)); cacc::MappedArrayTexture<float> tex(&res); cacc::Image<float, cacc::DEVICE>::Ptr dimage; dimage = cacc::Image<float, cacc::DEVICE>::create(width, height); cacc::Image<float, cacc::HOST>::Ptr image; image = cacc::Image<float, cacc::HOST>::create(width, height); dim3 block(16, 16); dim3 grid((width + 15) / 16, (height + 15) / 16); copy<<<grid, block>>>(tex.accessor(), dimage->cdata()); *image = *dimage; cacc::Image<float, cacc::HOST>::Data data = image->cdata(); for (int y = 0; y < data.height; ++y) { for (int x = 0; x < data.width; ++x) { depth->at(x, y, 0) = data.data_ptr[y * (data.pitch / sizeof(float)) + x]; } } } mve::image::flip<float>(depth, mve::image::FLIP_VERTICAL); mve::image::depthmap_convert_conventions<float>(depth, invcalib, true); mve::FloatImage::Ptr rdepth = mve::FloatImage::create(width, height, 1); start = std::chrono::system_clock::now(); #pragma omp parallel for for (int y = 0; y < rdepth->height(); ++y) { for (int x = 0; x < rdepth->width(); ++x) { BVHTree::Ray ray; ray.origin = origin; math::Vec3f v = invcalib * math::Vec3f ((float)x + 0.5f, (float)y + 0.5f, 1.0f); ray.dir = c2w_rot.mult(v.normalized()).normalize(); ray.tmin = 0.0f; ray.tmax = std::numeric_limits<float>::infinity(); BVHTree::Hit hit; if (!bvh_tree->intersect(ray, &hit)) continue; rdepth->at(x, y, 0) = (hit.t * ray.dir).norm(); } } end = std::chrono::system_clock::now(); std::cout << "CPU tracing: " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << "us" << std::endl; mve::image::depthmap_convert_conventions<float>(rdepth, invcalib, false); cacc::select_cuda_device(3, 5); cacc::BVHTree<cacc::DEVICE>::Ptr dbvh_tree; dbvh_tree = cacc::BVHTree<cacc::DEVICE>::create<uint, math::Vec3f>(bvh_tree); cacc::Image<float, cacc::DEVICE>::Ptr dimage; dimage = cacc::Image<float, cacc::DEVICE>::create(width, height); cacc::Image<float, cacc::HOST>::Ptr image; image = cacc::Image<float, cacc::HOST>::create(width, height); start = std::chrono::system_clock::now(); { dim3 block(16, 8); dim3 grid(cacc::divup(width, block.x), cacc::divup(height, block.y)); raycast<<<grid, block>>>(cacc::Vec3f(origin.begin()), cacc::Mat3f(invcalib.begin()), cacc::Mat3f(c2w_rot.begin()), dbvh_tree->accessor(), dimage->cdata()); CHECK(cudaDeviceSynchronize()); } end = std::chrono::system_clock::now(); std::cout << "GPU tracing: " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << "us" << std::endl; *image = *dimage; CHECK(cudaDeviceSynchronize()); mve::FloatImage::Ptr ddepth = mve::FloatImage::create(width, height, 1); cacc::Image<float, cacc::HOST>::Data data = image->cdata(); for (int y = 0; y < data.height; ++y) { for (int x = 0; x < data.width; ++x) { ddepth->at(x, y, 0) = data.data_ptr[y * (data.pitch / sizeof(float)) + x]; } } mve::FloatImage::Ptr error = mve::FloatImage::create(width, height, 1); for (int i = 0; i < depth->get_value_amount(); ++i) { error->at(i) = std::abs(ddepth->at(i) - depth->at(i)); } mve::image::save_pfm_file(error, "/tmp/error.pfm"); mve::image::save_pfm_file(ddepth, "/tmp/depth.pfm"); return EXIT_SUCCESS; }
72ee324c9ebc3987acc53f40993b6116e11a44a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "BrokenLineFitOnGPU.h" #include "CUDACore/device_unique_ptr.h" void HelixFitOnGPU::launchBrokenLineKernels(HitsView const *hv, uint32_t hitsInFit, uint32_t maxNumberOfTuples, hipStream_t stream) { assert(tuples_d); auto blockSize = 64; auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; // Fit internals auto hitsGPU_ = cms::hip::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix3xNd<4>) / sizeof(double), stream); auto hits_geGPU_ = cms::hip::make_device_unique<float[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix6x4f) / sizeof(float), stream); auto fast_fit_resultsGPU_ = cms::hip::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Vector4d) / sizeof(double), stream); for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { // fit triplets hipLaunchKernelGGL(( kernelBLFastFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 3, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelBLFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 3, offset); cudaCheck(hipGetLastError()); // fit quads hipLaunchKernelGGL(( kernelBLFastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 4, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelBLFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 4, offset); cudaCheck(hipGetLastError()); if (fit5as4_) { // fit penta (only first 4) hipLaunchKernelGGL(( kernelBLFastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelBLFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); } else { // fit penta (all 5) hipLaunchKernelGGL(( kernelBLFastFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelBLFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); } } // loop on concurrent fits }
72ee324c9ebc3987acc53f40993b6116e11a44a0.cu
#include "hip/hip_runtime.h" #include "BrokenLineFitOnGPU.h" #include "CUDACore/device_unique_ptr.h" void HelixFitOnGPU::launchBrokenLineKernels(HitsView const *hv, uint32_t hitsInFit, uint32_t maxNumberOfTuples, hipStream_t stream) { assert(tuples_d); auto blockSize = 64; auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; // Fit internals auto hitsGPU_ = cms::hip::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix3xNd<4>) / sizeof(double), stream); auto hits_geGPU_ = cms::hip::make_device_unique<float[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix6x4f) / sizeof(float), stream); auto fast_fit_resultsGPU_ = cms::hip::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Vector4d) / sizeof(double), stream); for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { // fit triplets kernelBLFastFit<3><<<numberOfBlocks, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 3, offset); cudaCheck(hipGetLastError()); kernelBLFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 3, offset); cudaCheck(hipGetLastError()); // fit quads kernelBLFastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 4, offset); cudaCheck(hipGetLastError()); kernelBLFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 4, offset); cudaCheck(hipGetLastError()); if (fit5as4_) { // fit penta (only first 4) kernelBLFastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); kernelBLFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); } else { // fit penta (all 5) kernelBLFastFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); kernelBLFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); } } // loop on concurrent fits }
26577e382524d67f54e535c9461b0e3bb33a9713.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2012-14, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * * * * * * * */ #include <nvbio/basic/types.h> #include <nvbio/basic/vector.h> #include <nvbio/basic/dna.h> #include <nvbio/io/sequence/sequence.h> #include <nvbio/io/sequence/sequence_access.h> #include <nvbio/io/vcf.h> #include <nvbio/io/sequence/sequence_pac.h> #include <hipcub/hipcub.hpp> #include <mgpuhost.cuh> #include <moderngpu.cuh> #include <nvbio/strings/string_set.h> #include <nvbio/basic/thrust_view.h> #include <nvbio/basic/cuda/sort.h> #include <nvbio/basic/cuda/timer.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/basic/cuda/primitives.h> #include <nvbio/io/output/output_types.h> #include <moderngpu.cuh> #include <mgpuhost.cuh> #include "bam_io.h" #include "bam_sort.h" //#ifdef _OPENMP #include <omp.h> //#endif using namespace nvbio; int test_sorted(const H_KVP_batch& result); /** --------- Sorting Modules -------- **/ // generate sort keys void sortkey_gen(bamsort_context* context) { thrust::for_each(context->active_read_ids.begin(), context->active_read_ids.end(), generate_sort_keys(*context)); } // local sort of key-val pairs on the device void sort(bamsort_context* context) { thrust::sort_by_key(context->sort_keys.begin(), context->sort_keys.end(), context->active_read_ids.begin()); } // used by out-of-core merge for searching a sorted array // to find the position until which all the elements are less than the pivot // TODO: ensure thrust uses binary search uint32 find_split_idx(bamsort_context* context, const uint64 len, const uint64 pivot) { return thrust::distance(context->patch_searched.begin(), thrust::partition_point(context->patch_searched.begin(), context->patch_searched.begin() + len, is_less(pivot))); } // out-of-core merge of two sorted batches // for single GPU: sequentially merge the pivot elements on the CPU to determine the partitions // since only one partition at a time can be merged on the device -- this is to avoid // extracting and sorting the pivot elements separately; // TODO: optimize if pivots have the same value void merge_batches_1GPU(bamsort_context* context, const H_KVP_batch* b1, const H_KVP_batch* b2, H_KVP_batch* out, float& merge_time, float& data_time, float& search_time) { cuda::Timer timer; // partition info uint64 b1_npivots = b1->keys.size() / PIVOT_SAMPLING_INTERVAL; uint64 b2_npivots = b2->keys.size() / PIVOT_SAMPLING_INTERVAL; uint64 b1_pivot = 1, b2_pivot = 1; uint64 p1L = 0, p1H = 0; // batch partition limits [L, H) uint64 p2L = 0, p2H = 0; uint64 p1_size = 0, p2_size = 0; uint64 out_idx = 0; // mgpu context int current_device; hipGetDevice(&current_device); mgpu::ContextPtr mgpu_ctxt = mgpu::CreateCudaDevice(current_device); while(1) { // check if we're in the last partition if(b1_pivot > b1_npivots || b2_pivot > b2_npivots) { break; // still need to merge the batch remainders } // find the next partition p1L = p1H; p2L = p2H; timer.start(); if(b1->keys[b1_pivot*PIVOT_SAMPLING_INTERVAL-1] <= b2->keys[b2_pivot*PIVOT_SAMPLING_INTERVAL-1]) { p1H = b1_pivot*PIVOT_SAMPLING_INTERVAL; // only need to search this patch since the pivots are sorted NVBIO_CUDA_ASSERT(context->patch_searched.size() <= PIVOT_SAMPLING_INTERVAL); thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + b2_pivot*PIVOT_SAMPLING_INTERVAL, context->patch_searched.begin()); p2H = p2L + find_split_idx(context, b2_pivot*PIVOT_SAMPLING_INTERVAL - p2L, b1->keys[b1_pivot*PIVOT_SAMPLING_INTERVAL-1]); b1_pivot++; // advance the pivot pointer } else { p2H = b2_pivot*PIVOT_SAMPLING_INTERVAL; NVBIO_CUDA_ASSERT(context->patch_searched.size() <= PIVOT_SAMPLING_INTERVAL); thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + b1_pivot*PIVOT_SAMPLING_INTERVAL, context->patch_searched.begin()); p1H = p1L + find_split_idx(context, b1_pivot*PIVOT_SAMPLING_INTERVAL - p1L, b2->keys[b2_pivot*PIVOT_SAMPLING_INTERVAL-1]); b2_pivot++; } timer.stop(); search_time += timer.seconds(); p1_size = p1H - p1L; p2_size = p2H - p2L; //printf("Partition sizes: %llu %llu \n", p1_size, p2_size); // if one of the batch partitions is empty, we are done if(p1_size == 0) { thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, out->keys.begin()+out_idx); thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, out->ids.begin()+out_idx); out_idx += p2_size; continue; } else if(p2_size == 0) { thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, out->keys.begin()+out_idx); thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, out->ids.begin()+out_idx); out_idx += p1_size; continue; } // TODO: if the sizes are less than a given threshold, merge on the CPU // transfer the partitions to the device NVBIO_CUDA_ASSERT(context->p1.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); NVBIO_CUDA_ASSERT(context->p2.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); NVBIO_CUDA_ASSERT(context->r.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); timer.start(); thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, context->p1.keys.begin()); thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, context->p2.keys.begin()); thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, context->p1.ids.begin()); thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, context->p2.ids.begin()); timer.stop(); data_time += timer.seconds(); // merge timer.start(); mgpu::MergePairs(context->p1.keys.begin(), context->p1.ids.begin(), p1_size, context->p2.keys.begin(), context->p2.ids.begin(), p2_size, context->r.keys.begin(), context->r.ids.begin(), *mgpu_ctxt); timer.stop(); merge_time += timer.seconds(); // transfer the results to the host timer.start(); thrust::copy(context->r.keys.begin(), context->r.keys.begin() + p1_size + p2_size, out->keys.begin()+out_idx); thrust::copy(context->r.ids.begin(), context->r.ids.begin() + p1_size + p2_size, out->ids.begin()+out_idx); timer.stop(); data_time += timer.seconds(); out_idx += p1_size + p2_size; } // merge the final pieces p1_size = b1->keys.size() - p1H; p2_size = b2->keys.size() - p2H; //printf("Final partition sizes: %llu %llu \n", p1_size, p2_size); // if one of the batch remainders is empty, we are done if(p1_size == 0) { thrust::copy(b2->keys.begin() + p2H, b2->keys.end(), out->keys.begin()+out_idx); thrust::copy(b2->ids.begin() + p2H, b2->ids.end(), out->ids.begin()+out_idx); return; } else if(p2_size == 0) { thrust::copy(b1->keys.begin() + p1H, b1->keys.end(), out->keys.begin()+out_idx); thrust::copy(b1->ids.begin() + p1H, b1->ids.end(), out->ids.begin()+out_idx); return; } NVBIO_CUDA_ASSERT(context->p1.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); NVBIO_CUDA_ASSERT(context->p2.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); NVBIO_CUDA_ASSERT(context->r.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); timer.start(); thrust::copy(b1->keys.begin() + p1H, b1->keys.end(), context->p1.keys.begin()); thrust::copy(b2->keys.begin() + p2H, b2->keys.end(), context->p2.keys.begin()); thrust::copy(b1->ids.begin() + p1H, b1->ids.end(), context->p1.ids.begin()); thrust::copy(b2->ids.begin() + p2H, b2->ids.end(), context->p2.ids.begin()); timer.stop(); data_time += timer.seconds(); timer.start(); mgpu::MergePairs(context->p1.keys.begin(), context->p1.ids.begin(), p1_size, context->p2.keys.begin(), context->p2.ids.begin(), p2_size, context->r.keys.begin(), context->r.ids.begin(), *mgpu_ctxt); timer.stop(); merge_time += timer.seconds(); timer.start(); thrust::copy(context->r.keys.begin(), context->r.keys.begin() + p1_size + p2_size, out->keys.begin()+out_idx); thrust::copy(context->r.ids.begin(), context->r.ids.begin() + p1_size + p2_size, out->ids.begin()+out_idx); timer.stop(); data_time += timer.seconds(); } // out-of-core merge of two sorted batches // two GPUs void merge_batches_2GPU(H_KVP_batch* b1, H_KVP_batch* b2, H_KVP_batch* out) { // partition info uint64 b1_npivots = (b1->keys.size()-1) / PIVOT_SAMPLING_INTERVAL; uint64 b2_npivots = (b2->keys.size()-1) / PIVOT_SAMPLING_INTERVAL; // 1. sort the pivots H_VectorU64 pivots(b1_npivots + b2_npivots); for(uint64 i = 0; i < b1_npivots; i++) { pivots[i] = b1->keys[(i+1)*PIVOT_SAMPLING_INTERVAL-1]; } for(uint64 i = 0; i < b2_npivots; i++) { pivots[b1_npivots + i] = b2->keys[(i+1)*PIVOT_SAMPLING_INTERVAL-1]; } thrust::sort(pivots.begin(), pivots.end()); printf("Merge: found and sorted pivots. Num pivots %llu \n", (uint64) pivots.size()); std::vector<H_KVP_batch*> batches(2); std::vector<H_VectorU64> pivot_idx(2); batches[0] = b1; batches[1] = b2; // 2. search each batch for the partition delimiters omp_set_num_threads(2); #pragma omp parallel { int tid = omp_get_thread_num(); hipSetDevice(tid); H_KVP_batch* b = batches[tid]; D_VectorU64 d_bkeys(D_BATCH_SIZE); pivot_idx[tid].resize(pivots.size()); uint64 num_processed = 0; uint64 pid = 0; while(num_processed < b->keys.size() && pid < pivots.size()) { uint64 batch_size = D_BATCH_SIZE; if(b->keys.size() - num_processed < D_BATCH_SIZE) { batch_size = b->keys.size() - num_processed; } thrust::copy(b->keys.begin() + num_processed, b->keys.begin() + num_processed + batch_size, d_bkeys.begin()); // find as many pivots as possible in the loaded partition while(1) { if(pid >= pivots.size() || pivots[pid] > b->keys[num_processed + batch_size - 1]) { break; // load the next batch } // pivot is in the loaded section uint64 offset = thrust::distance(d_bkeys.begin(), thrust::partition_point(d_bkeys.begin(), d_bkeys.begin() + batch_size, is_less(pivots[pid]))); pivot_idx[tid][pid] = num_processed + offset; pid++; } num_processed += batch_size; } if(pid < pivots.size()) { // if pid == 0, all elements in this batch are smaller than the elements in the second batch for(uint64 i = pid; i < pivots.size(); i++) { pivot_idx[tid][i] = b->keys.size(); } } printf("Thread %d processed %llu elements \n", tid, num_processed); } // 3. find partition offsets into output // TODO: optimize out empty partitions (when pivots are equal) uint64 num_partitions = pivots.size() + 1; H_VectorU64 p_offsets(num_partitions); p_offsets[0] = 0; for(uint64 i = 1; i < num_partitions; i++) { p_offsets[i] = pivot_idx[0][i-1] + pivot_idx[1][i-1]; } printf("Total number of partitions: %llu \n", num_partitions); std::vector<mgpu::ContextPtr> mgpu_ctxt(2); mgpu_ctxt[0] = mgpu::CreateCudaDevice(0); mgpu_ctxt[1] = mgpu::CreateCudaDevice(1); // 4. merge the partitions omp_set_num_threads(2); #pragma omp parallel { int tid = omp_get_thread_num(); hipSetDevice(tid); bamsort_context context; context.allocate_partition(); uint64 p1L, p1H; // batch partition limits [L, H) uint64 p2L, p2H; uint64 p1_size, p2_size; uint64 part_id = tid; while(part_id < num_partitions) { uint64 out_idx = p_offsets[part_id]; if(part_id == 0) { p1L = 0; p2L = 0; } else { p1L = pivot_idx[0][part_id-1]; p2L = pivot_idx[1][part_id-1]; } if(part_id == num_partitions - 1) { p1H = b1->keys.size(); p2H = b2->keys.size(); } else { p1H = pivot_idx[0][part_id]; p2H = pivot_idx[1][part_id]; } p1_size = p1H - p1L; p2_size = p2H - p2L; printf("Thread %d. Partition sizes: %llu %llu\n", tid, p1_size, p2_size); // if one of the batch partitions is empty, we are done if(p1_size == 0) { thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, out->keys.begin()+out_idx); thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, out->ids.begin()+out_idx); part_id += 2; continue; } else if(p2_size == 0) { thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, out->keys.begin()+out_idx); thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, out->ids.begin()+out_idx); part_id += 2; continue; } // transfer the partitions to the device thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, context.p1.keys.begin()); thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, context.p2.keys.begin()); thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, context.p1.ids.begin()); thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, context.p2.ids.begin()); // merge mgpu::MergePairs(context.p1.keys.begin(), context.p1.ids.begin(), p1_size, context.p2.keys.begin(), context.p2.ids.begin(), p2_size, context.r.keys.begin(), context.r.ids.begin(), *mgpu_ctxt[tid]); // transfer the results to the host thrust::copy(context.r.keys.begin(), context.r.keys.begin() + p1_size + p2_size, out->keys.begin()+out_idx); thrust::copy(context.r.ids.begin(), context.r.ids.begin() + p1_size + p2_size, out->ids.begin()+out_idx); part_id += 2; } } } /** ------ Sorting Pipelines ---------- **/ // full load -> sort -> store (no IO-compute overlapping) // single GPU void bamsort_pipeline_basic(const char* in_fname, const char* out_fname) { cuda::Timer timer, timer_alloc, timer_all; float sort_time = 0, keygen_time = 0, data_time = 0, merge_time = 0, merge_data_time = 0, merge_search_time = 0; //timer_all.start(); // 1. load BAM timer.start(); HTSBAMReader bam_reader(in_fname); BAM_alignment_batch_SoA h_batch(BAM_POSITIONS | BAM_REFIDS | BAM_FLAGS); bam_reader.read_aln_batch(h_batch, H_BATCH_SIZE); timer.stop(); printf("BAM load time: %.4fs\n", timer.seconds()); printf("Total number of alignments: %llu \n", h_batch.num_alns); timer_all.start(); // 2. split and sort int num_batches = 0; uint64 num_aln_loaded = 0; std::list<H_KVP_batch*> sorted_kvp_batches; // container for the sorted batches bamsort_context device_context; // device data for sorting while(num_aln_loaded < h_batch.num_alns) { // transfer the next batch to the device timer.start(); device_context.load_batch(h_batch, num_aln_loaded, D_BATCH_SIZE); timer.stop(); data_time += timer.seconds(); // generate the sort keys timer.start(); sortkey_gen(&device_context); timer.stop(); keygen_time += timer.seconds(); // sort timer.start(); sort(&device_context); timer.stop(); sort_time += timer.seconds(); // save sorted batches on the host timer.start(); H_KVP_batch* sorted_batch = new H_KVP_batch(); sorted_batch->keys.resize(device_context.sort_keys.size()); sorted_batch->ids.resize(device_context.sort_keys.size()); thrust::copy(device_context.sort_keys.begin(), device_context.sort_keys.end(), sorted_batch->keys.begin()); thrust::copy(device_context.active_read_ids.begin(), device_context.active_read_ids.end(), sorted_batch->ids.begin()); sorted_kvp_batches.push_back(sorted_batch); timer.stop(); data_time += timer.seconds(); num_batches += 1; num_aln_loaded += sorted_batch->ids.size(); printf("Processed %d batches and %llu reads \n", num_batches, num_aln_loaded); } printf("Local keygen-only time : %.4fs\n", keygen_time); printf("Local batch sorting-only time : %.4fs\n", sort_time); printf("Local device data allocation and transfer time : %.4fs\n", data_time); // free device data device_context.free_local_sort_batch(); // 3. merge H_KVP_batch* final_result = sorted_kvp_batches.front(); if(sorted_kvp_batches.size() != 1) { device_context.allocate_partition(); } timer.start(); float alloc_time = 0; while(sorted_kvp_batches.size() > 1) { timer_alloc.start(); H_KVP_batch* out = new H_KVP_batch(); H_KVP_batch* b1 = sorted_kvp_batches.front(); sorted_kvp_batches.pop_front(); H_KVP_batch* b2 = sorted_kvp_batches.front(); sorted_kvp_batches.pop_front(); // allocate space for the merged batches out->keys.resize(b1->keys.size() + b2->keys.size()); out->ids.resize(out->keys.size()); timer_alloc.stop(); alloc_time += timer_alloc.seconds(); // merge merge_batches_1GPU(&device_context, b1, b2, out, merge_time, merge_data_time, merge_search_time); sorted_kvp_batches.push_back(out); // free batch memory b1->free(); b2->free(); free(b1); free(b2); if(sorted_kvp_batches.size() == 1) { // merged down to one sequence final_result = out; break; } } timer.stop(); printf("Device merge-only time : %.4fs\n", merge_time); printf("Device merge data time : %.4fs\n", merge_data_time); printf("Device merge search time : %.4fs\n", merge_search_time); printf("Merge out alloc time : %.4fs\n", alloc_time); printf("Total merge time : %.4fs\n", timer.seconds()); timer_all.stop(); test_sorted(*final_result); timer.start(); //BAM_alignment_batch_SoA out_batch(h_batch.field_mask); //h_batch.shuffle(out_batch, final_result->ids); timer.stop(); printf("Shuffle time : %.4fs\n", timer.seconds()); // 4. write BAM output timer.start(); HTSBAMWriter bam_writer(out_fname); //bam_writer.write_hdr(bam_reader.header); //bam_writer.write_aln_batch(h_batch, final_result->ids, bam_reader.header); timer.stop(); printf("BAM write time : %.4fs\n", timer.seconds()); //timer_all.stop(); printf("Total BAMSORT time : %.4fs\n", timer_all.seconds()); } // full load -> sort -> store (no overlapping) // multi-GPU void bamsort_pipeline_multigpu(const char* in_fname, const char* out_fname) { cuda::Timer timer, timer_all; // 1. load BAM timer.start(); HTSBAMReader bam_reader(in_fname); BAM_alignment_batch_SoA h_batch(BAM_POSITIONS | BAM_REFIDS | BAM_FLAGS); bam_reader.read_aln_batch(h_batch, H_BATCH_SIZE); timer.stop(); printf("BAM load time: %.4fs\n", timer.seconds()); printf("Total number of alignments: %llu \n", h_batch.num_alns); int num_dev = 0; hipGetDeviceCount(&num_dev); printf("Total number of CPUs: %d\n", omp_get_num_procs()); printf("Total number of GPUs: %d\n", num_dev); for (int i = 0; i < num_dev; i++) { hipDeviceProp_t dprop; hipGetDeviceProperties(&dprop, i); printf(" %d: %s\n", i, dprop.name); } // 2. split and sort //TODO: figure out # devices that is appropriate to use based on load size (when num_dev > 2) if(h_batch.num_alns <= D_MIN_BATCH_SIZE) { printf("Running on a single device (load size is <= minimum single device size) \n"); num_dev = 1; } uint64 thread_batch_size = h_batch.num_alns/num_dev; std::vector<H_KVP_batch*> thread_sorted_batches(num_dev); timer_all.start(); omp_set_num_threads(num_dev); #pragma omp parallel { int tid = omp_get_thread_num(); hipSetDevice(tid); int devId; hipGetDevice(&devId); printf("Thread %d device %d\n", tid, devId); cuda::Timer ttimer; uint64 toffset = tid * thread_batch_size; uint64 tsize = thread_batch_size; if(tid == num_dev-1) { tsize = h_batch.num_alns - toffset; // remainder } printf("Thread %d offset %llu size %llu\n", tid, toffset, tsize); std::list<H_KVP_batch*> sorted_kvp_batches; // host container for the sorted batches bamsort_context device_context; uint64 num_aln_loaded = 0; ttimer.start(); while(num_aln_loaded < tsize) { // transfer the next batch to the device uint64 batch_size = D_BATCH_SIZE; if(tsize - num_aln_loaded < D_BATCH_SIZE) { batch_size = tsize - num_aln_loaded; } device_context.load_batch(h_batch, toffset + num_aln_loaded, batch_size); sortkey_gen(&device_context); sort(&device_context); // save sorted batches on the host H_KVP_batch* sorted_batch = new H_KVP_batch(); sorted_batch->keys.resize(device_context.sort_keys.size()); sorted_batch->ids.resize(device_context.sort_keys.size()); thrust::copy(device_context.sort_keys.begin(), device_context.sort_keys.end(), sorted_batch->keys.begin()); thrust::copy(device_context.active_read_ids.begin(), device_context.active_read_ids.end(), sorted_batch->ids.begin()); sorted_kvp_batches.push_back(sorted_batch); num_aln_loaded += sorted_batch->ids.size(); printf("Thread %d processed %llu records \n", tid, (uint64) sorted_batch->ids.size()); } ttimer.stop(); printf("Thread %d done with local sorting. Time %.4fs \n", tid, ttimer.seconds()); device_context.free_local_sort_batch(); // merge down to a single batch on each device if(sorted_kvp_batches.size() == 1) { thread_sorted_batches[tid] = sorted_kvp_batches.front(); } else { device_context.allocate_partition(); } ttimer.start(); while(sorted_kvp_batches.size() > 1) { H_KVP_batch* out = new H_KVP_batch(); H_KVP_batch* b1 = sorted_kvp_batches.front(); sorted_kvp_batches.pop_front(); H_KVP_batch* b2 = sorted_kvp_batches.front(); sorted_kvp_batches.pop_front(); // allocate space for the merged batches out->keys.resize(b1->keys.size() + b2->keys.size()); out->ids.resize(out->keys.size()); float t1, t2, t3; merge_batches_1GPU(&device_context, b1, b2, out, t1, t2, t3); sorted_kvp_batches.push_back(out); b1->free(); b2->free(); free(b1); free(b2); if(sorted_kvp_batches.size() == 1) { // merged down to one sequence thread_sorted_batches[tid] = out; break; } } ttimer.stop(); printf("Thread %d done with merging. Time %.4fs \n", tid, ttimer.seconds()); } H_KVP_batch* final_result = new H_KVP_batch(); if(num_dev == 2) { //TODO: generalize to any number of devices final_result->keys.resize(thread_sorted_batches[0]->keys.size() + thread_sorted_batches[1]->keys.size()); final_result->ids.resize(final_result->keys.size()); merge_batches_2GPU(thread_sorted_batches[0], thread_sorted_batches[1], final_result); } timer_all.stop(); printf("Total sort time : %.4fs\n", timer_all.seconds()); test_sorted(*final_result); // 4. write BAM output timer.start(); HTSBAMWriter bam_writer(out_fname); //bam_writer.write_header(bam_reader.header); //bam_writer.write_aln_batch(h_batch, final_result.ids, bam_reader.header); timer.stop(); printf("BAM write time : %.4fs\n", timer.seconds()); } // permute the sorted BAM file void generate_unsorted_bam(const char* in_fname, const char* out_fname) { try { // load BAMReader bam_reader(in_fname); BAM_header hdr = bam_reader.header; BAM_alignment_batch_raw sorted_batch; bam_reader.read_aln_batch_raw(sorted_batch, H_BATCH_SIZE); uint64 num_loaded = sorted_batch.offsets.size()-1; printf("Total number of loaded alignments: %llu \n", num_loaded); // generate permutation std::vector<uint64> perm(num_loaded); for (uint64 i=0; i<num_loaded; ++i) perm.push_back(i); std::srand(0); std::random_shuffle(perm.begin(), perm.end()); H_VectorU64 ids (num_loaded); for (uint64 i=0; i<num_loaded; ++i) ids.push_back(perm[i]); // write BAMWriter bam_writer(out_fname); bam_writer.write_header(hdr); bam_writer.write_aln_batch_raw(sorted_batch, ids); } catch (nvbio::runtime_error& e) { printf("%s\n", e.what()); exit(1); } } // concatenate BAM file contents multiple times void duplicate_unsorted_bam(const char* in_fname, const char* out_fname, int num_repeats) { try { BAMReader bam_reader(in_fname); BAM_header hdr = bam_reader.header; BAM_alignment_batch_raw sorted_batch; bam_reader.read_aln_batch_raw(sorted_batch, H_BATCH_SIZE); uint64 num_loaded = sorted_batch.offsets.size()-1; printf("Total number of loaded alignments: %llu \n", num_loaded); H_VectorU64 ids (num_loaded); thrust::sequence(ids.begin(), ids.end()); BAMWriter bam_writer(out_fname); bam_writer.write_header(hdr); for(int i = 0; i < num_repeats; i++) { bam_writer.write_aln_batch_raw(sorted_batch, ids); } } catch (nvbio::runtime_error& e) { printf("%s\n", e.what()); exit(1); } } //TODO: overlap load with device compute int main(int argc, char **argv) { if(argc < 3) { printf("Usage: ./bamsort <bam_file> <out_file> \n"); exit(1); } try { //generate_unsorted_bam(argv[1], argv[2]); //duplicate_unsorted_bam(argv[1], argv[2], 1000); bamsort_pipeline_basic(argv[1], argv[2]); //bamsort_pipeline_multigpu(argv[1], argv[2]); } catch (nvbio::runtime_error& e) { printf("%s\n", e.what()); exit(1); } return 0; } int test_sorted(const H_KVP_batch& result) { // check that all the keys are in ascending order uint64 k = 0; for(uint64 i = 0; i < result.keys.size(); i++) { if(k > result.keys[i]) { printf("Failed test; out of order: %llu %llu %llu %llu %llu \n", (uint64) k, (uint64) result.keys[i], (uint64) result.keys[i-1], i, i-1); return 0; } k = result.keys[i]; } printf("Passed test! \n"); return 1; }
26577e382524d67f54e535c9461b0e3bb33a9713.cu
/* * Copyright (c) 2012-14, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * * * * * * * */ #include <nvbio/basic/types.h> #include <nvbio/basic/vector.h> #include <nvbio/basic/dna.h> #include <nvbio/io/sequence/sequence.h> #include <nvbio/io/sequence/sequence_access.h> #include <nvbio/io/vcf.h> #include <nvbio/io/sequence/sequence_pac.h> #include <cub/cub.cuh> #include <mgpuhost.cuh> #include <moderngpu.cuh> #include <nvbio/strings/string_set.h> #include <nvbio/basic/thrust_view.h> #include <nvbio/basic/cuda/sort.h> #include <nvbio/basic/cuda/timer.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/basic/cuda/primitives.h> #include <nvbio/io/output/output_types.h> #include <moderngpu.cuh> #include <mgpuhost.cuh> #include "bam_io.h" #include "bam_sort.h" //#ifdef _OPENMP #include <omp.h> //#endif using namespace nvbio; int test_sorted(const H_KVP_batch& result); /** --------- Sorting Modules -------- **/ // generate sort keys void sortkey_gen(bamsort_context* context) { thrust::for_each(context->active_read_ids.begin(), context->active_read_ids.end(), generate_sort_keys(*context)); } // local sort of key-val pairs on the device void sort(bamsort_context* context) { thrust::sort_by_key(context->sort_keys.begin(), context->sort_keys.end(), context->active_read_ids.begin()); } // used by out-of-core merge for searching a sorted array // to find the position until which all the elements are less than the pivot // TODO: ensure thrust uses binary search uint32 find_split_idx(bamsort_context* context, const uint64 len, const uint64 pivot) { return thrust::distance(context->patch_searched.begin(), thrust::partition_point(context->patch_searched.begin(), context->patch_searched.begin() + len, is_less(pivot))); } // out-of-core merge of two sorted batches // for single GPU: sequentially merge the pivot elements on the CPU to determine the partitions // since only one partition at a time can be merged on the device -- this is to avoid // extracting and sorting the pivot elements separately; // TODO: optimize if pivots have the same value void merge_batches_1GPU(bamsort_context* context, const H_KVP_batch* b1, const H_KVP_batch* b2, H_KVP_batch* out, float& merge_time, float& data_time, float& search_time) { cuda::Timer timer; // partition info uint64 b1_npivots = b1->keys.size() / PIVOT_SAMPLING_INTERVAL; uint64 b2_npivots = b2->keys.size() / PIVOT_SAMPLING_INTERVAL; uint64 b1_pivot = 1, b2_pivot = 1; uint64 p1L = 0, p1H = 0; // batch partition limits [L, H) uint64 p2L = 0, p2H = 0; uint64 p1_size = 0, p2_size = 0; uint64 out_idx = 0; // mgpu context int current_device; cudaGetDevice(&current_device); mgpu::ContextPtr mgpu_ctxt = mgpu::CreateCudaDevice(current_device); while(1) { // check if we're in the last partition if(b1_pivot > b1_npivots || b2_pivot > b2_npivots) { break; // still need to merge the batch remainders } // find the next partition p1L = p1H; p2L = p2H; timer.start(); if(b1->keys[b1_pivot*PIVOT_SAMPLING_INTERVAL-1] <= b2->keys[b2_pivot*PIVOT_SAMPLING_INTERVAL-1]) { p1H = b1_pivot*PIVOT_SAMPLING_INTERVAL; // only need to search this patch since the pivots are sorted NVBIO_CUDA_ASSERT(context->patch_searched.size() <= PIVOT_SAMPLING_INTERVAL); thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + b2_pivot*PIVOT_SAMPLING_INTERVAL, context->patch_searched.begin()); p2H = p2L + find_split_idx(context, b2_pivot*PIVOT_SAMPLING_INTERVAL - p2L, b1->keys[b1_pivot*PIVOT_SAMPLING_INTERVAL-1]); b1_pivot++; // advance the pivot pointer } else { p2H = b2_pivot*PIVOT_SAMPLING_INTERVAL; NVBIO_CUDA_ASSERT(context->patch_searched.size() <= PIVOT_SAMPLING_INTERVAL); thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + b1_pivot*PIVOT_SAMPLING_INTERVAL, context->patch_searched.begin()); p1H = p1L + find_split_idx(context, b1_pivot*PIVOT_SAMPLING_INTERVAL - p1L, b2->keys[b2_pivot*PIVOT_SAMPLING_INTERVAL-1]); b2_pivot++; } timer.stop(); search_time += timer.seconds(); p1_size = p1H - p1L; p2_size = p2H - p2L; //printf("Partition sizes: %llu %llu \n", p1_size, p2_size); // if one of the batch partitions is empty, we are done if(p1_size == 0) { thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, out->keys.begin()+out_idx); thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, out->ids.begin()+out_idx); out_idx += p2_size; continue; } else if(p2_size == 0) { thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, out->keys.begin()+out_idx); thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, out->ids.begin()+out_idx); out_idx += p1_size; continue; } // TODO: if the sizes are less than a given threshold, merge on the CPU // transfer the partitions to the device NVBIO_CUDA_ASSERT(context->p1.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); NVBIO_CUDA_ASSERT(context->p2.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); NVBIO_CUDA_ASSERT(context->r.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); timer.start(); thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, context->p1.keys.begin()); thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, context->p2.keys.begin()); thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, context->p1.ids.begin()); thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, context->p2.ids.begin()); timer.stop(); data_time += timer.seconds(); // merge timer.start(); mgpu::MergePairs(context->p1.keys.begin(), context->p1.ids.begin(), p1_size, context->p2.keys.begin(), context->p2.ids.begin(), p2_size, context->r.keys.begin(), context->r.ids.begin(), *mgpu_ctxt); timer.stop(); merge_time += timer.seconds(); // transfer the results to the host timer.start(); thrust::copy(context->r.keys.begin(), context->r.keys.begin() + p1_size + p2_size, out->keys.begin()+out_idx); thrust::copy(context->r.ids.begin(), context->r.ids.begin() + p1_size + p2_size, out->ids.begin()+out_idx); timer.stop(); data_time += timer.seconds(); out_idx += p1_size + p2_size; } // merge the final pieces p1_size = b1->keys.size() - p1H; p2_size = b2->keys.size() - p2H; //printf("Final partition sizes: %llu %llu \n", p1_size, p2_size); // if one of the batch remainders is empty, we are done if(p1_size == 0) { thrust::copy(b2->keys.begin() + p2H, b2->keys.end(), out->keys.begin()+out_idx); thrust::copy(b2->ids.begin() + p2H, b2->ids.end(), out->ids.begin()+out_idx); return; } else if(p2_size == 0) { thrust::copy(b1->keys.begin() + p1H, b1->keys.end(), out->keys.begin()+out_idx); thrust::copy(b1->ids.begin() + p1H, b1->ids.end(), out->ids.begin()+out_idx); return; } NVBIO_CUDA_ASSERT(context->p1.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); NVBIO_CUDA_ASSERT(context->p2.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); NVBIO_CUDA_ASSERT(context->r.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL); timer.start(); thrust::copy(b1->keys.begin() + p1H, b1->keys.end(), context->p1.keys.begin()); thrust::copy(b2->keys.begin() + p2H, b2->keys.end(), context->p2.keys.begin()); thrust::copy(b1->ids.begin() + p1H, b1->ids.end(), context->p1.ids.begin()); thrust::copy(b2->ids.begin() + p2H, b2->ids.end(), context->p2.ids.begin()); timer.stop(); data_time += timer.seconds(); timer.start(); mgpu::MergePairs(context->p1.keys.begin(), context->p1.ids.begin(), p1_size, context->p2.keys.begin(), context->p2.ids.begin(), p2_size, context->r.keys.begin(), context->r.ids.begin(), *mgpu_ctxt); timer.stop(); merge_time += timer.seconds(); timer.start(); thrust::copy(context->r.keys.begin(), context->r.keys.begin() + p1_size + p2_size, out->keys.begin()+out_idx); thrust::copy(context->r.ids.begin(), context->r.ids.begin() + p1_size + p2_size, out->ids.begin()+out_idx); timer.stop(); data_time += timer.seconds(); } // out-of-core merge of two sorted batches // two GPUs void merge_batches_2GPU(H_KVP_batch* b1, H_KVP_batch* b2, H_KVP_batch* out) { // partition info uint64 b1_npivots = (b1->keys.size()-1) / PIVOT_SAMPLING_INTERVAL; uint64 b2_npivots = (b2->keys.size()-1) / PIVOT_SAMPLING_INTERVAL; // 1. sort the pivots H_VectorU64 pivots(b1_npivots + b2_npivots); for(uint64 i = 0; i < b1_npivots; i++) { pivots[i] = b1->keys[(i+1)*PIVOT_SAMPLING_INTERVAL-1]; } for(uint64 i = 0; i < b2_npivots; i++) { pivots[b1_npivots + i] = b2->keys[(i+1)*PIVOT_SAMPLING_INTERVAL-1]; } thrust::sort(pivots.begin(), pivots.end()); printf("Merge: found and sorted pivots. Num pivots %llu \n", (uint64) pivots.size()); std::vector<H_KVP_batch*> batches(2); std::vector<H_VectorU64> pivot_idx(2); batches[0] = b1; batches[1] = b2; // 2. search each batch for the partition delimiters omp_set_num_threads(2); #pragma omp parallel { int tid = omp_get_thread_num(); cudaSetDevice(tid); H_KVP_batch* b = batches[tid]; D_VectorU64 d_bkeys(D_BATCH_SIZE); pivot_idx[tid].resize(pivots.size()); uint64 num_processed = 0; uint64 pid = 0; while(num_processed < b->keys.size() && pid < pivots.size()) { uint64 batch_size = D_BATCH_SIZE; if(b->keys.size() - num_processed < D_BATCH_SIZE) { batch_size = b->keys.size() - num_processed; } thrust::copy(b->keys.begin() + num_processed, b->keys.begin() + num_processed + batch_size, d_bkeys.begin()); // find as many pivots as possible in the loaded partition while(1) { if(pid >= pivots.size() || pivots[pid] > b->keys[num_processed + batch_size - 1]) { break; // load the next batch } // pivot is in the loaded section uint64 offset = thrust::distance(d_bkeys.begin(), thrust::partition_point(d_bkeys.begin(), d_bkeys.begin() + batch_size, is_less(pivots[pid]))); pivot_idx[tid][pid] = num_processed + offset; pid++; } num_processed += batch_size; } if(pid < pivots.size()) { // if pid == 0, all elements in this batch are smaller than the elements in the second batch for(uint64 i = pid; i < pivots.size(); i++) { pivot_idx[tid][i] = b->keys.size(); } } printf("Thread %d processed %llu elements \n", tid, num_processed); } // 3. find partition offsets into output // TODO: optimize out empty partitions (when pivots are equal) uint64 num_partitions = pivots.size() + 1; H_VectorU64 p_offsets(num_partitions); p_offsets[0] = 0; for(uint64 i = 1; i < num_partitions; i++) { p_offsets[i] = pivot_idx[0][i-1] + pivot_idx[1][i-1]; } printf("Total number of partitions: %llu \n", num_partitions); std::vector<mgpu::ContextPtr> mgpu_ctxt(2); mgpu_ctxt[0] = mgpu::CreateCudaDevice(0); mgpu_ctxt[1] = mgpu::CreateCudaDevice(1); // 4. merge the partitions omp_set_num_threads(2); #pragma omp parallel { int tid = omp_get_thread_num(); cudaSetDevice(tid); bamsort_context context; context.allocate_partition(); uint64 p1L, p1H; // batch partition limits [L, H) uint64 p2L, p2H; uint64 p1_size, p2_size; uint64 part_id = tid; while(part_id < num_partitions) { uint64 out_idx = p_offsets[part_id]; if(part_id == 0) { p1L = 0; p2L = 0; } else { p1L = pivot_idx[0][part_id-1]; p2L = pivot_idx[1][part_id-1]; } if(part_id == num_partitions - 1) { p1H = b1->keys.size(); p2H = b2->keys.size(); } else { p1H = pivot_idx[0][part_id]; p2H = pivot_idx[1][part_id]; } p1_size = p1H - p1L; p2_size = p2H - p2L; printf("Thread %d. Partition sizes: %llu %llu\n", tid, p1_size, p2_size); // if one of the batch partitions is empty, we are done if(p1_size == 0) { thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, out->keys.begin()+out_idx); thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, out->ids.begin()+out_idx); part_id += 2; continue; } else if(p2_size == 0) { thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, out->keys.begin()+out_idx); thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, out->ids.begin()+out_idx); part_id += 2; continue; } // transfer the partitions to the device thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, context.p1.keys.begin()); thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, context.p2.keys.begin()); thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, context.p1.ids.begin()); thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, context.p2.ids.begin()); // merge mgpu::MergePairs(context.p1.keys.begin(), context.p1.ids.begin(), p1_size, context.p2.keys.begin(), context.p2.ids.begin(), p2_size, context.r.keys.begin(), context.r.ids.begin(), *mgpu_ctxt[tid]); // transfer the results to the host thrust::copy(context.r.keys.begin(), context.r.keys.begin() + p1_size + p2_size, out->keys.begin()+out_idx); thrust::copy(context.r.ids.begin(), context.r.ids.begin() + p1_size + p2_size, out->ids.begin()+out_idx); part_id += 2; } } } /** ------ Sorting Pipelines ---------- **/ // full load -> sort -> store (no IO-compute overlapping) // single GPU void bamsort_pipeline_basic(const char* in_fname, const char* out_fname) { cuda::Timer timer, timer_alloc, timer_all; float sort_time = 0, keygen_time = 0, data_time = 0, merge_time = 0, merge_data_time = 0, merge_search_time = 0; //timer_all.start(); // 1. load BAM timer.start(); HTSBAMReader bam_reader(in_fname); BAM_alignment_batch_SoA h_batch(BAM_POSITIONS | BAM_REFIDS | BAM_FLAGS); bam_reader.read_aln_batch(h_batch, H_BATCH_SIZE); timer.stop(); printf("BAM load time: %.4fs\n", timer.seconds()); printf("Total number of alignments: %llu \n", h_batch.num_alns); timer_all.start(); // 2. split and sort int num_batches = 0; uint64 num_aln_loaded = 0; std::list<H_KVP_batch*> sorted_kvp_batches; // container for the sorted batches bamsort_context device_context; // device data for sorting while(num_aln_loaded < h_batch.num_alns) { // transfer the next batch to the device timer.start(); device_context.load_batch(h_batch, num_aln_loaded, D_BATCH_SIZE); timer.stop(); data_time += timer.seconds(); // generate the sort keys timer.start(); sortkey_gen(&device_context); timer.stop(); keygen_time += timer.seconds(); // sort timer.start(); sort(&device_context); timer.stop(); sort_time += timer.seconds(); // save sorted batches on the host timer.start(); H_KVP_batch* sorted_batch = new H_KVP_batch(); sorted_batch->keys.resize(device_context.sort_keys.size()); sorted_batch->ids.resize(device_context.sort_keys.size()); thrust::copy(device_context.sort_keys.begin(), device_context.sort_keys.end(), sorted_batch->keys.begin()); thrust::copy(device_context.active_read_ids.begin(), device_context.active_read_ids.end(), sorted_batch->ids.begin()); sorted_kvp_batches.push_back(sorted_batch); timer.stop(); data_time += timer.seconds(); num_batches += 1; num_aln_loaded += sorted_batch->ids.size(); printf("Processed %d batches and %llu reads \n", num_batches, num_aln_loaded); } printf("Local keygen-only time : %.4fs\n", keygen_time); printf("Local batch sorting-only time : %.4fs\n", sort_time); printf("Local device data allocation and transfer time : %.4fs\n", data_time); // free device data device_context.free_local_sort_batch(); // 3. merge H_KVP_batch* final_result = sorted_kvp_batches.front(); if(sorted_kvp_batches.size() != 1) { device_context.allocate_partition(); } timer.start(); float alloc_time = 0; while(sorted_kvp_batches.size() > 1) { timer_alloc.start(); H_KVP_batch* out = new H_KVP_batch(); H_KVP_batch* b1 = sorted_kvp_batches.front(); sorted_kvp_batches.pop_front(); H_KVP_batch* b2 = sorted_kvp_batches.front(); sorted_kvp_batches.pop_front(); // allocate space for the merged batches out->keys.resize(b1->keys.size() + b2->keys.size()); out->ids.resize(out->keys.size()); timer_alloc.stop(); alloc_time += timer_alloc.seconds(); // merge merge_batches_1GPU(&device_context, b1, b2, out, merge_time, merge_data_time, merge_search_time); sorted_kvp_batches.push_back(out); // free batch memory b1->free(); b2->free(); free(b1); free(b2); if(sorted_kvp_batches.size() == 1) { // merged down to one sequence final_result = out; break; } } timer.stop(); printf("Device merge-only time : %.4fs\n", merge_time); printf("Device merge data time : %.4fs\n", merge_data_time); printf("Device merge search time : %.4fs\n", merge_search_time); printf("Merge out alloc time : %.4fs\n", alloc_time); printf("Total merge time : %.4fs\n", timer.seconds()); timer_all.stop(); test_sorted(*final_result); timer.start(); //BAM_alignment_batch_SoA out_batch(h_batch.field_mask); //h_batch.shuffle(out_batch, final_result->ids); timer.stop(); printf("Shuffle time : %.4fs\n", timer.seconds()); // 4. write BAM output timer.start(); HTSBAMWriter bam_writer(out_fname); //bam_writer.write_hdr(bam_reader.header); //bam_writer.write_aln_batch(h_batch, final_result->ids, bam_reader.header); timer.stop(); printf("BAM write time : %.4fs\n", timer.seconds()); //timer_all.stop(); printf("Total BAMSORT time : %.4fs\n", timer_all.seconds()); } // full load -> sort -> store (no overlapping) // multi-GPU void bamsort_pipeline_multigpu(const char* in_fname, const char* out_fname) { cuda::Timer timer, timer_all; // 1. load BAM timer.start(); HTSBAMReader bam_reader(in_fname); BAM_alignment_batch_SoA h_batch(BAM_POSITIONS | BAM_REFIDS | BAM_FLAGS); bam_reader.read_aln_batch(h_batch, H_BATCH_SIZE); timer.stop(); printf("BAM load time: %.4fs\n", timer.seconds()); printf("Total number of alignments: %llu \n", h_batch.num_alns); int num_dev = 0; cudaGetDeviceCount(&num_dev); printf("Total number of CPUs: %d\n", omp_get_num_procs()); printf("Total number of GPUs: %d\n", num_dev); for (int i = 0; i < num_dev; i++) { cudaDeviceProp dprop; cudaGetDeviceProperties(&dprop, i); printf(" %d: %s\n", i, dprop.name); } // 2. split and sort //TODO: figure out # devices that is appropriate to use based on load size (when num_dev > 2) if(h_batch.num_alns <= D_MIN_BATCH_SIZE) { printf("Running on a single device (load size is <= minimum single device size) \n"); num_dev = 1; } uint64 thread_batch_size = h_batch.num_alns/num_dev; std::vector<H_KVP_batch*> thread_sorted_batches(num_dev); timer_all.start(); omp_set_num_threads(num_dev); #pragma omp parallel { int tid = omp_get_thread_num(); cudaSetDevice(tid); int devId; cudaGetDevice(&devId); printf("Thread %d device %d\n", tid, devId); cuda::Timer ttimer; uint64 toffset = tid * thread_batch_size; uint64 tsize = thread_batch_size; if(tid == num_dev-1) { tsize = h_batch.num_alns - toffset; // remainder } printf("Thread %d offset %llu size %llu\n", tid, toffset, tsize); std::list<H_KVP_batch*> sorted_kvp_batches; // host container for the sorted batches bamsort_context device_context; uint64 num_aln_loaded = 0; ttimer.start(); while(num_aln_loaded < tsize) { // transfer the next batch to the device uint64 batch_size = D_BATCH_SIZE; if(tsize - num_aln_loaded < D_BATCH_SIZE) { batch_size = tsize - num_aln_loaded; } device_context.load_batch(h_batch, toffset + num_aln_loaded, batch_size); sortkey_gen(&device_context); sort(&device_context); // save sorted batches on the host H_KVP_batch* sorted_batch = new H_KVP_batch(); sorted_batch->keys.resize(device_context.sort_keys.size()); sorted_batch->ids.resize(device_context.sort_keys.size()); thrust::copy(device_context.sort_keys.begin(), device_context.sort_keys.end(), sorted_batch->keys.begin()); thrust::copy(device_context.active_read_ids.begin(), device_context.active_read_ids.end(), sorted_batch->ids.begin()); sorted_kvp_batches.push_back(sorted_batch); num_aln_loaded += sorted_batch->ids.size(); printf("Thread %d processed %llu records \n", tid, (uint64) sorted_batch->ids.size()); } ttimer.stop(); printf("Thread %d done with local sorting. Time %.4fs \n", tid, ttimer.seconds()); device_context.free_local_sort_batch(); // merge down to a single batch on each device if(sorted_kvp_batches.size() == 1) { thread_sorted_batches[tid] = sorted_kvp_batches.front(); } else { device_context.allocate_partition(); } ttimer.start(); while(sorted_kvp_batches.size() > 1) { H_KVP_batch* out = new H_KVP_batch(); H_KVP_batch* b1 = sorted_kvp_batches.front(); sorted_kvp_batches.pop_front(); H_KVP_batch* b2 = sorted_kvp_batches.front(); sorted_kvp_batches.pop_front(); // allocate space for the merged batches out->keys.resize(b1->keys.size() + b2->keys.size()); out->ids.resize(out->keys.size()); float t1, t2, t3; merge_batches_1GPU(&device_context, b1, b2, out, t1, t2, t3); sorted_kvp_batches.push_back(out); b1->free(); b2->free(); free(b1); free(b2); if(sorted_kvp_batches.size() == 1) { // merged down to one sequence thread_sorted_batches[tid] = out; break; } } ttimer.stop(); printf("Thread %d done with merging. Time %.4fs \n", tid, ttimer.seconds()); } H_KVP_batch* final_result = new H_KVP_batch(); if(num_dev == 2) { //TODO: generalize to any number of devices final_result->keys.resize(thread_sorted_batches[0]->keys.size() + thread_sorted_batches[1]->keys.size()); final_result->ids.resize(final_result->keys.size()); merge_batches_2GPU(thread_sorted_batches[0], thread_sorted_batches[1], final_result); } timer_all.stop(); printf("Total sort time : %.4fs\n", timer_all.seconds()); test_sorted(*final_result); // 4. write BAM output timer.start(); HTSBAMWriter bam_writer(out_fname); //bam_writer.write_header(bam_reader.header); //bam_writer.write_aln_batch(h_batch, final_result.ids, bam_reader.header); timer.stop(); printf("BAM write time : %.4fs\n", timer.seconds()); } // permute the sorted BAM file void generate_unsorted_bam(const char* in_fname, const char* out_fname) { try { // load BAMReader bam_reader(in_fname); BAM_header hdr = bam_reader.header; BAM_alignment_batch_raw sorted_batch; bam_reader.read_aln_batch_raw(sorted_batch, H_BATCH_SIZE); uint64 num_loaded = sorted_batch.offsets.size()-1; printf("Total number of loaded alignments: %llu \n", num_loaded); // generate permutation std::vector<uint64> perm(num_loaded); for (uint64 i=0; i<num_loaded; ++i) perm.push_back(i); std::srand(0); std::random_shuffle(perm.begin(), perm.end()); H_VectorU64 ids (num_loaded); for (uint64 i=0; i<num_loaded; ++i) ids.push_back(perm[i]); // write BAMWriter bam_writer(out_fname); bam_writer.write_header(hdr); bam_writer.write_aln_batch_raw(sorted_batch, ids); } catch (nvbio::runtime_error& e) { printf("%s\n", e.what()); exit(1); } } // concatenate BAM file contents multiple times void duplicate_unsorted_bam(const char* in_fname, const char* out_fname, int num_repeats) { try { BAMReader bam_reader(in_fname); BAM_header hdr = bam_reader.header; BAM_alignment_batch_raw sorted_batch; bam_reader.read_aln_batch_raw(sorted_batch, H_BATCH_SIZE); uint64 num_loaded = sorted_batch.offsets.size()-1; printf("Total number of loaded alignments: %llu \n", num_loaded); H_VectorU64 ids (num_loaded); thrust::sequence(ids.begin(), ids.end()); BAMWriter bam_writer(out_fname); bam_writer.write_header(hdr); for(int i = 0; i < num_repeats; i++) { bam_writer.write_aln_batch_raw(sorted_batch, ids); } } catch (nvbio::runtime_error& e) { printf("%s\n", e.what()); exit(1); } } //TODO: overlap load with device compute int main(int argc, char **argv) { if(argc < 3) { printf("Usage: ./bamsort <bam_file> <out_file> \n"); exit(1); } try { //generate_unsorted_bam(argv[1], argv[2]); //duplicate_unsorted_bam(argv[1], argv[2], 1000); bamsort_pipeline_basic(argv[1], argv[2]); //bamsort_pipeline_multigpu(argv[1], argv[2]); } catch (nvbio::runtime_error& e) { printf("%s\n", e.what()); exit(1); } return 0; } int test_sorted(const H_KVP_batch& result) { // check that all the keys are in ascending order uint64 k = 0; for(uint64 i = 0; i < result.keys.size(); i++) { if(k > result.keys[i]) { printf("Failed test; out of order: %llu %llu %llu %llu %llu \n", (uint64) k, (uint64) result.keys[i], (uint64) result.keys[i-1], i, i-1); return 0; } k = result.keys[i]; } printf("Passed test! \n"); return 1; }
f5203f801367bc4637bee689e1c6ac9c630335fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #define GPU //#define CUDNN extern "C" { #include "activations.h" #include "hip/hip_runtime.h" } __device__ float lhtan_activate_kernel(float x) { if(x < 0) return .001*x; if(x > 1) return .001*(x-1) + 1; return x; } __device__ float lhtan_gradient_kernel(float x) { if(x > 0 && x < 1) return 1; return .001; } __device__ float hardtan_activate_kernel(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } __device__ float linear_activate_kernel(float x){return x;} __device__ float logistic_activate_kernel(float x){return 1./(1. + exp(-x));} __device__ float loggy_activate_kernel(float x){return 2./(1. + exp(-x)) - 1;} __device__ float relu_activate_kernel(float x){return x*(x>0);} __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);} __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01*x;} __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1*x;} __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1*x;} __device__ float tanh_activate_kernel(float x){return (2/(1 + exp(-2*x)) - 1);} __device__ float plse_activate_kernel(float x) { if(x < -4) return .01 * (x + 4); if(x > 4) return .01 * (x - 4) + 1; return .125*x + .5; } __device__ float stair_activate_kernel(float x) { int n = floor(x); if (n%2 == 0) return floor(x/2.); else return (x - n) + floor(x/2.); } __device__ float hardtan_gradient_kernel(float x) { if (x > -1 && x < 1) return 1; return 0; } __device__ float linear_gradient_kernel(float x){return 1;} __device__ float logistic_gradient_kernel(float x){return (1-x)*x;} __device__ float loggy_gradient_kernel(float x) { float y = (x+1.)/2.; return 2*(1-y)*y; } __device__ float relu_gradient_kernel(float x){return (x>0);} __device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);} __device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01;} __device__ float ramp_gradient_kernel(float x){return (x>0)+.1;} __device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1;} __device__ float tanh_gradient_kernel(float x){return 1-x*x;} __device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01 : .125;} __device__ float stair_gradient_kernel(float x) { if (floor(x) == x) return 0; return 1; } __device__ float activate_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate_kernel(x); case LOGISTIC: return logistic_activate_kernel(x); case LOGGY: return loggy_activate_kernel(x); case RELU: return relu_activate_kernel(x); case ELU: return elu_activate_kernel(x); case RELIE: return relie_activate_kernel(x); case RAMP: return ramp_activate_kernel(x); case LEAKY: return leaky_activate_kernel(x); case TANH: return tanh_activate_kernel(x); case PLSE: return plse_activate_kernel(x); case STAIR: return stair_activate_kernel(x); case HARDTAN: return hardtan_activate_kernel(x); case LHTAN: return lhtan_activate_kernel(x); } return 0; } __device__ float gradient_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient_kernel(x); case LOGISTIC: return logistic_gradient_kernel(x); case LOGGY: return loggy_gradient_kernel(x); case RELU: return relu_gradient_kernel(x); case ELU: return elu_gradient_kernel(x); case RELIE: return relie_gradient_kernel(x); case RAMP: return ramp_gradient_kernel(x); case LEAKY: return leaky_gradient_kernel(x); case TANH: return tanh_gradient_kernel(x); case PLSE: return plse_gradient_kernel(x); case STAIR: return stair_gradient_kernel(x); case HARDTAN: return hardtan_gradient_kernel(x); case LHTAN: return lhtan_gradient_kernel(x); } return 0; } __global__ void activate_array_kernel(float *x, int n, ACTIVATION a) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) x[i] = activate_kernel(x[i], a); } __global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) delta[i] *= gradient_kernel(x[i], a); } extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a) { hipLaunchKernelGGL(( activate_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a); check_error(hipPeekAtLastError()); } extern "C" void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta) { hipLaunchKernelGGL(( gradient_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a, delta); check_error(hipPeekAtLastError()); }
f5203f801367bc4637bee689e1c6ac9c630335fc.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #define GPU //#define CUDNN extern "C" { #include "activations.h" #include "cuda.h" } __device__ float lhtan_activate_kernel(float x) { if(x < 0) return .001*x; if(x > 1) return .001*(x-1) + 1; return x; } __device__ float lhtan_gradient_kernel(float x) { if(x > 0 && x < 1) return 1; return .001; } __device__ float hardtan_activate_kernel(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } __device__ float linear_activate_kernel(float x){return x;} __device__ float logistic_activate_kernel(float x){return 1./(1. + exp(-x));} __device__ float loggy_activate_kernel(float x){return 2./(1. + exp(-x)) - 1;} __device__ float relu_activate_kernel(float x){return x*(x>0);} __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);} __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01*x;} __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1*x;} __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1*x;} __device__ float tanh_activate_kernel(float x){return (2/(1 + exp(-2*x)) - 1);} __device__ float plse_activate_kernel(float x) { if(x < -4) return .01 * (x + 4); if(x > 4) return .01 * (x - 4) + 1; return .125*x + .5; } __device__ float stair_activate_kernel(float x) { int n = floor(x); if (n%2 == 0) return floor(x/2.); else return (x - n) + floor(x/2.); } __device__ float hardtan_gradient_kernel(float x) { if (x > -1 && x < 1) return 1; return 0; } __device__ float linear_gradient_kernel(float x){return 1;} __device__ float logistic_gradient_kernel(float x){return (1-x)*x;} __device__ float loggy_gradient_kernel(float x) { float y = (x+1.)/2.; return 2*(1-y)*y; } __device__ float relu_gradient_kernel(float x){return (x>0);} __device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);} __device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01;} __device__ float ramp_gradient_kernel(float x){return (x>0)+.1;} __device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1;} __device__ float tanh_gradient_kernel(float x){return 1-x*x;} __device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01 : .125;} __device__ float stair_gradient_kernel(float x) { if (floor(x) == x) return 0; return 1; } __device__ float activate_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate_kernel(x); case LOGISTIC: return logistic_activate_kernel(x); case LOGGY: return loggy_activate_kernel(x); case RELU: return relu_activate_kernel(x); case ELU: return elu_activate_kernel(x); case RELIE: return relie_activate_kernel(x); case RAMP: return ramp_activate_kernel(x); case LEAKY: return leaky_activate_kernel(x); case TANH: return tanh_activate_kernel(x); case PLSE: return plse_activate_kernel(x); case STAIR: return stair_activate_kernel(x); case HARDTAN: return hardtan_activate_kernel(x); case LHTAN: return lhtan_activate_kernel(x); } return 0; } __device__ float gradient_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient_kernel(x); case LOGISTIC: return logistic_gradient_kernel(x); case LOGGY: return loggy_gradient_kernel(x); case RELU: return relu_gradient_kernel(x); case ELU: return elu_gradient_kernel(x); case RELIE: return relie_gradient_kernel(x); case RAMP: return ramp_gradient_kernel(x); case LEAKY: return leaky_gradient_kernel(x); case TANH: return tanh_gradient_kernel(x); case PLSE: return plse_gradient_kernel(x); case STAIR: return stair_gradient_kernel(x); case HARDTAN: return hardtan_gradient_kernel(x); case LHTAN: return lhtan_gradient_kernel(x); } return 0; } __global__ void activate_array_kernel(float *x, int n, ACTIVATION a) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) x[i] = activate_kernel(x[i], a); } __global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) delta[i] *= gradient_kernel(x[i], a); } extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a) { activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a); check_error(cudaPeekAtLastError()); } extern "C" void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta) { gradient_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a, delta); check_error(cudaPeekAtLastError()); }
d47783ee1ccd14e9c27dd19414c93ca0caa40747.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zmdot.cu normal z -> d, Wed Sep 17 15:08:43 2014 @author Hartwig Anzt */ #include "common_magma.h" #define BLOCK_SIZE 256 #define PRECISION_d // initialize arrays with zero __global__ void magma_dgpumemzero( double *d, int n, int k ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i < n ){ for( int j=0; j<k; j++) d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); } } // dot product __global__ void magma_ddot_kernel( int Gs, int n, double *v, double *r, double *vtmp){ extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // dot product for multiple vectors __global__ void magma_dblockdot_kernel( int Gs, int n, int k, double *v, double *r, double *vtmp){ extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // k vectors v(i) if (i<n){ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = v[i+j*n] * r[i]; } else{ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] =MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for multiple vectors __global__ void magma_dblockreduce_kernel( int Gs, int n, int k, double *vtmp, double *vtmp2 ){ extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ] : MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // accelerated reduction for one vector __global__ void magma_dreduce_kernel_fast( int Gs, int n, double *vtmp, double *vtmp2 ){ extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated block reduction for multiple vectors __global__ void magma_dblockreduce_kernel_fast( int Gs, int n, int k, double *vtmp, double *vtmp2 ){ extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<k; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param n int length of v_i and r @param k int # vectors v_i @param v double* v = (v_0 .. v_i.. v_k) @param r double* r @param d1 double* workspace @param d2 double* workspace @param skp double* vector[k] of scalar products (<v_i,r>...) @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc( int n, int k, double *v, double *r, double *d1, double *d2, double *skp ){ int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (n+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = (k)* (local_block_size) * sizeof( double ); // k vecs double *aux1 = d1, *aux2 = d2; int b = 1; if(k>1){ hipLaunchKernelGGL(( magma_dblockdot_kernel), dim3(Gs), dim3(Bs), Ms, 0, Gs.x, n, k, v, r, d1 ); } else{ hipLaunchKernelGGL(( magma_ddot_kernel), dim3(Gs), dim3(Bs), Ms, 0, Gs.x, n, v, r, d1 ); } /* // not necessary to zero GPU mem magma_dgpumemzero<<<Gs, Bs, 0>>>( d1, n*k,1 ); magma_dgpumemzero<<<Gs, Bs, 0>>>( d2, n*k,1 ); //magmablas_dlaset( MagmaUpperLower, n, k, d1, n ); //magmablas_dlaset( MagmaUpperLower, n, k, d2, n ); while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; magma_dblockreduce_kernel<<< Gs_next.x, Bs.x, Ms >>> ( Gs.x, n, k, aux1, aux2 ); Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } for( int j=0; j<k; j++){ magma_dcopyvector( 1, aux1+j*n, 1, skp+j, 1 ); } */ if( k>1){ while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, n, k, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } else{ while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } for( int j=0; j<k; j++){ magma_dcopyvector( 1, aux1+j*n, 1, skp+j, 1 ); } return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param n int length of v_i and r @param k int # vectors v_i @param v double* v = (v_0 .. v_i.. v_k) @param r double* r @param d1 double* workspace @param d2 double* workspace @param skp double* vector[k] of scalar products (<v_i,r>...) @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dgemvmdot( int n, int k, double *v, double *r, double *d1, double *d2, double *skp ){ int rows_left = k; int offset = 0; int chunk_size = 4; // process in chunks of 10 - has to be adapted to hardware and precision while( rows_left > (chunk_size) ){ magma_dmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset ); offset = offset + chunk_size; rows_left = rows_left-chunk_size; } // process rest magma_dmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset ); return MAGMA_SUCCESS; }
d47783ee1ccd14e9c27dd19414c93ca0caa40747.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zmdot.cu normal z -> d, Wed Sep 17 15:08:43 2014 @author Hartwig Anzt */ #include "common_magma.h" #define BLOCK_SIZE 256 #define PRECISION_d // initialize arrays with zero __global__ void magma_dgpumemzero( double *d, int n, int k ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i < n ){ for( int j=0; j<k; j++) d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); } } // dot product __global__ void magma_ddot_kernel( int Gs, int n, double *v, double *r, double *vtmp){ extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // dot product for multiple vectors __global__ void magma_dblockdot_kernel( int Gs, int n, int k, double *v, double *r, double *vtmp){ extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // k vectors v(i) if (i<n){ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = v[i+j*n] * r[i]; } else{ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] =MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for multiple vectors __global__ void magma_dblockreduce_kernel( int Gs, int n, int k, double *vtmp, double *vtmp2 ){ extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ] : MAGMA_D_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // accelerated reduction for one vector __global__ void magma_dreduce_kernel_fast( int Gs, int n, double *vtmp, double *vtmp2 ){ extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated block reduction for multiple vectors __global__ void magma_dblockreduce_kernel_fast( int Gs, int n, int k, double *vtmp, double *vtmp2 ){ extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<k; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param n int length of v_i and r @param k int # vectors v_i @param v double* v = (v_0 .. v_i.. v_k) @param r double* r @param d1 double* workspace @param d2 double* workspace @param skp double* vector[k] of scalar products (<v_i,r>...) @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc( int n, int k, double *v, double *r, double *d1, double *d2, double *skp ){ int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (n+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = (k)* (local_block_size) * sizeof( double ); // k vecs double *aux1 = d1, *aux2 = d2; int b = 1; if(k>1){ magma_dblockdot_kernel<<<Gs, Bs, Ms>>>( Gs.x, n, k, v, r, d1 ); } else{ magma_ddot_kernel<<<Gs, Bs, Ms>>>( Gs.x, n, v, r, d1 ); } /* // not necessary to zero GPU mem magma_dgpumemzero<<<Gs, Bs, 0>>>( d1, n*k,1 ); magma_dgpumemzero<<<Gs, Bs, 0>>>( d2, n*k,1 ); //magmablas_dlaset( MagmaUpperLower, n, k, d1, n ); //magmablas_dlaset( MagmaUpperLower, n, k, d2, n ); while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; magma_dblockreduce_kernel<<< Gs_next.x, Bs.x, Ms >>> ( Gs.x, n, k, aux1, aux2 ); Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } for( int j=0; j<k; j++){ magma_dcopyvector( 1, aux1+j*n, 1, skp+j, 1 ); } */ if( k>1){ while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; magma_dblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, n, k, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } else{ while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; magma_dreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } for( int j=0; j<k; j++){ magma_dcopyvector( 1, aux1+j*n, 1, skp+j, 1 ); } return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param n int length of v_i and r @param k int # vectors v_i @param v double* v = (v_0 .. v_i.. v_k) @param r double* r @param d1 double* workspace @param d2 double* workspace @param skp double* vector[k] of scalar products (<v_i,r>...) @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dgemvmdot( int n, int k, double *v, double *r, double *d1, double *d2, double *skp ){ int rows_left = k; int offset = 0; int chunk_size = 4; // process in chunks of 10 - has to be adapted to hardware and precision while( rows_left > (chunk_size) ){ magma_dmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset ); offset = offset + chunk_size; rows_left = rows_left-chunk_size; } // process rest magma_dmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset ); return MAGMA_SUCCESS; }
1243c9ba786d814d742775fca02e4af57295a5e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <ops/declarable/helpers/sequence_mask.h> namespace sd { namespace ops { namespace helpers { template <typename I, typename B> static __global__ void sequenceMaskKernel(const void* inputBuf, const Nd4jLong* inputShape, void* outputBuf, const Nd4jLong* outputShape, int maxIndex) { __shared__ const I* input; __shared__ B* output; __shared__ Nd4jLong inputLen, outputLen; if (threadIdx.x == 0) { input = reinterpret_cast<const I*>(inputBuf); output = reinterpret_cast<B*>(outputBuf); inputLen = shape::length(inputShape); outputLen = shape::length(outputShape); } __syncthreads(); for (auto i = blockIdx.x; i < maxIndex; i += gridDim.x) for(auto k = threadIdx.x; k < inputLen; k += blockDim.x) if (i < input[shape::getIndexOffset(k, inputShape)]) output[shape::getIndexOffset(k * maxIndex + i, outputShape)] = B(true); } template <typename I, typename B> static void sequenceMask_(LaunchContext* context, NDArray* input, NDArray* output, int maxIndex) { dim3 launchDims(maxIndex, input->lengthOf(), 128); NDArray::prepareSpecialUse({output}, {input}); auto stream = context->getCudaStream(); hipLaunchKernelGGL(( sequenceMaskKernel<I, B>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), maxIndex); NDArray::registerSpecialUse({output}, {input}); } ND4J_LOCAL void sequenceMask(sd::LaunchContext * context, NDArray* input, NDArray* output, int maxIndex) { BUILD_DOUBLE_SELECTOR(input->dataType(), output->dataType(), sequenceMask_, (context, input, output, maxIndex), INTEGER_TYPES, LIBND4J_TYPES_EXTENDED); } BUILD_DOUBLE_TEMPLATE(template ND4J_LOCAL void sequenceMask_, (sd::LaunchContext* context, NDArray* input, NDArray* output, int maxIndex), INTEGER_TYPES, LIBND4J_TYPES_EXTENDED); } } }
1243c9ba786d814d742775fca02e4af57295a5e4.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <ops/declarable/helpers/sequence_mask.h> namespace sd { namespace ops { namespace helpers { template <typename I, typename B> static __global__ void sequenceMaskKernel(const void* inputBuf, const Nd4jLong* inputShape, void* outputBuf, const Nd4jLong* outputShape, int maxIndex) { __shared__ const I* input; __shared__ B* output; __shared__ Nd4jLong inputLen, outputLen; if (threadIdx.x == 0) { input = reinterpret_cast<const I*>(inputBuf); output = reinterpret_cast<B*>(outputBuf); inputLen = shape::length(inputShape); outputLen = shape::length(outputShape); } __syncthreads(); for (auto i = blockIdx.x; i < maxIndex; i += gridDim.x) for(auto k = threadIdx.x; k < inputLen; k += blockDim.x) if (i < input[shape::getIndexOffset(k, inputShape)]) output[shape::getIndexOffset(k * maxIndex + i, outputShape)] = B(true); } template <typename I, typename B> static void sequenceMask_(LaunchContext* context, NDArray* input, NDArray* output, int maxIndex) { dim3 launchDims(maxIndex, input->lengthOf(), 128); NDArray::prepareSpecialUse({output}, {input}); auto stream = context->getCudaStream(); sequenceMaskKernel<I, B><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), maxIndex); NDArray::registerSpecialUse({output}, {input}); } ND4J_LOCAL void sequenceMask(sd::LaunchContext * context, NDArray* input, NDArray* output, int maxIndex) { BUILD_DOUBLE_SELECTOR(input->dataType(), output->dataType(), sequenceMask_, (context, input, output, maxIndex), INTEGER_TYPES, LIBND4J_TYPES_EXTENDED); } BUILD_DOUBLE_TEMPLATE(template ND4J_LOCAL void sequenceMask_, (sd::LaunchContext* context, NDArray* input, NDArray* output, int maxIndex), INTEGER_TYPES, LIBND4J_TYPES_EXTENDED); } } }
1ef20092bedbda2aa88b60528db7e0736c50e77b.hip
// !!! This is a file automatically generated by hipify!!! /*----------------------------------------------------------------------------------* * Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, * * Sergio Losilla, Elias Toivanen, Jonas Juselius * * * * Permission is hereby granted, free of charge, to any person obtaining a copy * * of this software and associated documentation files (the "Software"), to deal * * in the Software without restriction, including without limitation the rights * * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * * copies of the Software, and to permit persons to whom the Software is * * furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included in all* * copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * * SOFTWARE. * *----------------------------------------------------------------------------------*/ /*! @file bubbles_cuda.cu *! @brief CUDA implementation of the Bubbles. */ #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> //#include <algorithm> *std::max_element(result_cube, result_cube + totalPointCount) #include "bubbles_cuda.h" #include "streamcontainer.h" #include "grid.h" #include "spherical_harmonics_cuda.h" #include "cube.h" #include "function3d_multiplier.h" #include "memory_leak_operators.h" #include "evaluators.h" #define X_ 0 #define Y_ 1 #define Z_ 2 #define R_ 3 #define FOURPI_ 12.566370614359173 #if (__CUDA_ARCH__ > 350) #define INJECT_BLOCK_SIZE 256 #else #define INJECT_BLOCK_SIZE 128 #endif #define NLIP 7 /** \brief Size of the CUDA blocks in the X dimension */ #define BLOCKDIMX 8 /** \brief Size of the CUDA blocks in the Y dimension */ #define BLOCKDIMY 4 /** \brief Size of the CUDA blocks in the Z dimension */ #define BLOCKDIMZ 4 #define STR_HELPER(x) #x #define STR(x) STR_HELPER(x) hipError_t cudastat; __constant__ int shape_x_, shape_y_, shape_z_, ncell_, nlip_, lmax_, ilmmin_, lmin_, ilmmax_, first_term_, normalization_, ijk_max_; __constant__ double charge_, r_max_; hipStream_t **streams; int streams_inited = 0; int allocated = 0; extern __shared__ double shared_memory[]; __host__ inline void check_memory(const char *filename, const int line_number) { size_t mem_tot_0 = 0; size_t mem_free_0 = 0; hipMemGetInfo (&mem_free_0, &mem_tot_0); printf("Free memory after: %ld, total: %ld\n ", mem_free_0, mem_tot_0); } template<typename T> __device__ __forceinline__ T ldg(const T* ptr) { #if __CUDA_ARCH__ >= 350 return __ldg(ptr); #else return *ptr; #endif } void cube_download(double *hstPtr, int width, int height ,int depth, void *devPtr, size_t pitch) { // Define copy "from device to host" parameters hipMemcpy3DParms d2h={0}; d2h.srcPtr = make_hipPitchedPtr(devPtr, pitch,width,height); d2h.dstPtr = make_hipPitchedPtr((void *)hstPtr, width*sizeof(double),width,height); d2h.extent = make_hipExtent(width * sizeof(double), height, depth); // hipMemset3D( d2h.srcPtr, 999, d2h.extent); d2h.kind = hipMemcpyDeviceToHost; // cudastat=hipMemset3D( d2h.srcPtr, 0, d2h.extent); // Copy to host cudastat = hipMemcpy3D( &d2h ); check_errors(__FILE__, __LINE__); return; } void cube_upload(double *hstPtr, int *width ,int *height ,int *depth, void *devPtr, size_t pitch) { // Define copy "from host to device" parameters hipMemcpy3DParms h2d={0}; h2d.srcPtr = make_hipPitchedPtr((void *)hstPtr, *width*sizeof(double),*width,*height); h2d.dstPtr = make_hipPitchedPtr(devPtr, pitch,*width,*height); h2d.extent = make_hipExtent(*width * sizeof(double), *height, *depth); h2d.kind = hipMemcpyHostToDevice; // Copy to device hipMemcpy3D( &h2d ); return; } __device__ int icell(double x, double *d, int n){ if ( ( x > d[n] ) || ( x < d[0] ) ) { return -1; } int i[2]; i[0]=0; i[1]=n; int im=(i[0]+i[1])/2; int j; int max=log((float)n)/log(2.)+1; for(j=0;j<max;j++){ i[ x<d[im] ] = im; im=(i[0]+i[1])/2; } return im; } __device__ void calc_rc(double dist_vec[3], double *dist, double ref[3],double x, double y, double z){ dist_vec[X_]=x-ref[X_]; dist_vec[Y_]=y-ref[Y_]; dist_vec[Z_]=z-ref[Z_]; *dist=sqrt(dist_vec[X_]*dist_vec[X_]+ dist_vec[Y_]*dist_vec[Y_]+ dist_vec[Z_]*dist_vec[Z_]); dist_vec[X_]/=*dist; dist_vec[Y_]/=*dist; dist_vec[Z_]/=*dist; return; } __device__ double eval_lip(int n, double *lip, double *f, double x){ short i,j; double out=0.0; for (j=0;j<n;j++){ double tmp=0.0; for (i=0;i<n;i++){ tmp*= x; tmp+= *(lip++); } out+=tmp*f[j]; } return out; } __device__ double eval_poly(int n, double *c, double x){ double r=0.0; while (n-- > 0) { r *= x; r += *(c++); } return r; } /* * the following function precalculates some common values for the injection. * * NOTE: We are setting the cf-array to have 8 * (lmax+1) * (lmax+1) size * This has several advantages (even if we are using more space and have * blank spots in the array). 1) Every cell read is coalesced and we don't * have overlapping requests! Additionally, we avoid divergence of the threads * of one warp in the injection. */ __global__ void calc_cf(Bubble *bub, int offset, int number_of_points, size_t device_f_pitch) { // get the index within this kernel call const int index = blockIdx.x * blockDim.x + threadIdx.x; // get the global index const int id= index + offset; const int icell=id%bub->grid->ncell; const int ilm=id/bub->grid->ncell; const int nlip = bub->grid->nlip; __shared__ double shared_lip[49]; __shared__ double derivative_lip[42]; __shared__ double lower_derivative_lip[30]; __shared__ double cf_results[8*64]; __shared__ double df_results[8*64]; double f_i; // load the Lagrange interpolation polynomials coefficients to // the shared memory if (threadIdx.x < (nlip) * (nlip)) { shared_lip[threadIdx.x] = bub->grid->lip[threadIdx.x]; } if (threadIdx.x < (nlip) * (nlip-1)) { derivative_lip[threadIdx.x] = bub->grid->derivative_lip[threadIdx.x]; } if (threadIdx.x < (nlip-2) * (nlip-1)) { lower_derivative_lip[threadIdx.x] = bub->grid->lower_derivative_lip[threadIdx.x]; } __syncthreads(); if ( index < number_of_points && ilm < ((bub->lmax+1)*(bub->lmax+1)) ) { double *f = bub->f + ilm * device_f_pitch / sizeof(double) + (icell * (bub->grid->nlip-1)); double *cf = bub->cf + ( ilm * bub->grid->ncell + icell ) * 8; double *df = bub->df + ( ilm * bub->grid->ncell + icell ) * 8; short i,j; double one_per_cell_step = 1.0 / bub->grid->h[icell]; double *lip=&shared_lip[0]; double *dlip=&derivative_lip[0]; double *ldlip=&lower_derivative_lip[0]; // set the shared memory result array to zero for (i=0; i < 8; i++) { cf_results[threadIdx.x * 8 + i]=0.0; df_results[threadIdx.x * 8 + i]=0.0; } // evaluate the cf to shared memory for (i=0; i < nlip; i++) { f_i = f[i]; for (j=0; j < nlip ;j++){ cf_results[threadIdx.x * 8 + j] += f_i* (*(lip++)); } // handle the special case of the first cell, where the first // data item most likely is not valid if (icell == 0) { if (i != 0) { for (j = 1 ; j <= nlip-2; j++) { df_results[threadIdx.x * 8 + j] += f_i* (*(ldlip++)); } } else { df_results[threadIdx.x * 8] = 0.0; } } else { for (j=0; j < nlip-1 ;j++) { df_results[threadIdx.x * 8 + j] += f_i* (*(dlip++)); } } } // copy the result to device memory for (i=0; i < 8; i++) { cf[i] = cf_results[threadIdx.x * 8 + i]; df[i] = one_per_cell_step * df_results[threadIdx.x * 8 + i]; } } return; } __device__ inline double evaluate_polynomials(int n, const double* __restrict__ c, const double x){ double result=0.0; while (n-- > 0) { result *= x; result += *(c++); } return result; } //#ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 350 /* * Evaluates one granular polynomial for coefficients, and x * NOTE: each thread is different value for coefficient, when entering the function * NOTE: each x value must be the same for 8 consecutive threads * NOTE: upon return each thread has the same value. */ __inline__ __device__ double evaluate_polynomials_unit_shuffle(double coefficient, const double x) { double result = coefficient; for (int i = 1; i < 7; i++) { result *= x; result += __shfl_down(coefficient, i, 8); } return result; } __inline__ __device__ double evaluate_polynomials_unit_register(const double * __restrict__ coefficients, const double x, int nlip) { double result = 0.0; while (nlip-- > 0) { result *= x; result += *(coefficients++); } return result; } __device__ inline void horizontal_rotate_8f(double coefficients[8], unsigned int order_number) { coefficients[1] = __shfl(coefficients[1], (order_number+1)%8, 8); coefficients[2] = __shfl(coefficients[2], (order_number+2)%8, 8); coefficients[3] = __shfl(coefficients[3], (order_number+3)%8, 8); coefficients[4] = __shfl(coefficients[4], (order_number+4)%8, 8); coefficients[5] = __shfl(coefficients[5], (order_number+5)%8, 8); coefficients[6] = __shfl(coefficients[6], (order_number+6)%8, 8); coefficients[7] = __shfl(coefficients[7], (order_number+7)%8, 8); } __device__ inline void horizontal_rotate_8b(double coefficients[8], unsigned int order_number) { coefficients[1] = __shfl(coefficients[1], (order_number+7)%8, 8); coefficients[2] = __shfl(coefficients[2], (order_number+6)%8, 8); coefficients[3] = __shfl(coefficients[3], (order_number+5)%8, 8); coefficients[4] = __shfl(coefficients[4], (order_number+4)%8, 8); coefficients[5] = __shfl(coefficients[5], (order_number+3)%8, 8); coefficients[6] = __shfl(coefficients[6], (order_number+2)%8, 8); coefficients[7] = __shfl(coefficients[7], (order_number+1)%8, 8); } __device__ inline void vertical_rotate_8(double src[8], unsigned int order_number) { double tmp = src[0]; src[0] = (order_number == 1) ? src[7] : src[0]; src[7] = (order_number == 1) ? src[6] : src[7]; src[6] = (order_number == 1) ? src[5] : src[6]; src[5] = (order_number == 1) ? src[4] : src[5]; src[4] = (order_number == 1) ? src[3] : src[4]; src[3] = (order_number == 1) ? src[2] : src[3]; src[2] = (order_number == 1) ? src[1] : src[2]; src[1] = (order_number == 1) ? tmp : src[1]; src[1] = (order_number == 2) ? src[7] : src[1]; src[0] = (order_number == 2) ? src[6] : src[0]; src[7] = (order_number == 2) ? src[5] : src[7]; src[6] = (order_number == 2) ? src[4] : src[6]; src[5] = (order_number == 2) ? src[3] : src[5]; src[4] = (order_number == 2) ? src[2] : src[4]; src[3] = (order_number == 2) ? src[1] : src[3]; src[2] = (order_number == 2) ? tmp : src[2]; src[2] = (order_number == 3) ? src[7] : src[2]; src[1] = (order_number == 3) ? src[6] : src[1]; src[0] = (order_number == 3) ? src[5] : src[0]; src[7] = (order_number == 3) ? src[4] : src[7]; src[6] = (order_number == 3) ? src[3] : src[6]; src[5] = (order_number == 3) ? src[2] : src[5]; src[4] = (order_number == 3) ? src[1] : src[4]; src[3] = (order_number == 2) ? tmp : src[3]; src[3] = (order_number == 4) ? src[7] : src[3]; src[2] = (order_number == 4) ? src[6] : src[2]; src[1] = (order_number == 4) ? src[5] : src[1]; src[0] = (order_number == 4) ? src[4] : src[0]; src[7] = (order_number == 4) ? src[3] : src[7]; src[6] = (order_number == 4) ? src[2] : src[6]; src[5] = (order_number == 4) ? src[1] : src[5]; src[4] = (order_number == 4) ? tmp : src[4]; src[4] = (order_number == 5) ? src[7] : src[4]; src[3] = (order_number == 5) ? src[6] : src[3]; src[2] = (order_number == 5) ? src[5] : src[2]; src[1] = (order_number == 5) ? src[4] : src[1]; src[0] = (order_number == 5) ? src[3] : src[0]; src[7] = (order_number == 5) ? src[2] : src[7]; src[6] = (order_number == 5) ? src[1] : src[6]; src[5] = (order_number == 5) ? tmp : src[5]; src[5] = (order_number == 6) ? src[7] : src[5]; src[4] = (order_number == 6) ? src[6] : src[4]; src[3] = (order_number == 6) ? src[5] : src[3]; src[2] = (order_number == 6) ? src[4] : src[2]; src[1] = (order_number == 6) ? src[3] : src[1]; src[0] = (order_number == 6) ? src[2] : src[0]; src[7] = (order_number == 6) ? src[1] : src[7]; src[6] = (order_number == 6) ? tmp : src[6]; src[6] = (order_number == 7) ? src[7] : src[6]; src[5] = (order_number == 7) ? src[6] : src[5]; src[4] = (order_number == 7) ? src[5] : src[4]; src[3] = (order_number == 7) ? src[4] : src[3]; src[2] = (order_number == 7) ? src[3] : src[2]; src[1] = (order_number == 7) ? src[2] : src[1]; src[0] = (order_number == 7) ? src[1] : src[0]; src[7] = (order_number == 7) ? tmp : src[7]; } __device__ inline void transpose8(double coefficients[8], int order_number) { //printf("Original coefficients %d: %f, %f, %f, %f, %f, %f, %f, %f\n", order_number, coefficients[0], coefficients[1], coefficients[2], coefficients[3], coefficients[4], coefficients[5], coefficients[6], coefficients[7]); horizontal_rotate_8f(coefficients, order_number); vertical_rotate_8(coefficients, order_number); horizontal_rotate_8b(coefficients, order_number); //printf("Transposed coefficients coefficients %d: %f, %f, %f, %f, %f, %f, %f, %f\n", order_number, coefficients[0], coefficients[1], coefficients[2], coefficients[3], coefficients[4], coefficients[5], coefficients[6], coefficients[7]); } /* * Evaluates the polynomials using shuffle actions. This saves the shared_memory significantly and allows * the increase of the occupancy of the devices. * * This function only needs blockDim.x * 8 bytes of shared memory. This allows the usage of any sized blocks * that are practically useful. * * The number of arithmetic operations is larger than for the version using shared memory only, and thus * the effect to the execution speed remains to be seen. */ __device__ inline double evaluate_polynomials_shuffle(const int address, const double * __restrict__ c, const double x, const int nlip) { double *result = &shared_memory[0]; //double coefficients[8]; //double res; int remainder = threadIdx.x%8; int base_address = 8*(threadIdx.x/8); double res; for (int i = 0; i < 8; i ++) { // evaluate the polynomials // NOTE: __shfl(address, i, width=8) gets the address needed by the thread i/8 in the thread group // NOTE: __shfl(x, i, width = 8) gets the coordinate x of the thread i/8 in the thread group // NOTE: the c access (global memory is coalesced), // NOTE: shared memorybank conflict should not occur, as every thread in the 8 thread group access // the same address, thus resulting in broadcast. //coefficients[i] = c[__shfl(address, i, 8) + remainder]; res = evaluate_polynomials_unit_shuffle( c[__shfl(address, i, 8) + remainder], __shfl(x, i, 8)); if (remainder == 0) result[base_address + i] = res; } // swap the coefficients to be with their rightful owners //transpose8(coefficients, remainder); return result[threadIdx.x]; //return evaluate_polynomials_unit_register(coefficients, x, nlip); } #endif //#endif /* * Get the thread-id within block. */ __device__ inline int getThreadId() { return threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z; } /* * @param c, bubbles coefficients in the global memory * @param x, the coordinate of the point in cell coordinates * * NOTE: The parameter 'c' must be pitched for this function to be useful * NOTE: This function is made for NLIP:7, with other nlip values, the function must be remade */ template<int nlip> __device__ inline double evaluate_polynomials_shared(const int address, const double* __restrict__ c, const double x) { double *coefficients = &shared_memory[0]; //const float *fc = (const float *)c; int threadId = getThreadId(); int remainder = threadId%8; int base_address = 8*(threadId/8); int id = base_address * 7 + remainder; /*int remainder = threadId%16; base_address = 16*(threadId/16); id = base_address * 16 + remainder; int faddress = 2 * address;*/ #if (__CUDA_ARCH__ >= 350) // read the coefficients in the shared memory, 8 threads // neighbouring each other are reading the global memory // coefficients for one thread at the time, starting from 0 // and going to 7 int address_7 = __shfl(address, 7, 8); if (remainder < 7) { coefficients[id] = ldg<double>(&c[__shfl(address, 0, 8) + remainder]); coefficients[id+7] = ldg<double>(&c[__shfl(address, 1, 8) + remainder]); coefficients[id+7*2] = ldg<double>(&c[__shfl(address, 2, 8) + remainder]); coefficients[id+7*3] = ldg<double>(&c[__shfl(address, 3, 8) + remainder]); coefficients[id+7*4] = ldg<double>(&c[__shfl(address, 4, 8) + remainder]); coefficients[id+7*5] = ldg<double>(&c[__shfl(address, 5, 8) + remainder]); coefficients[id+7*6] = ldg<double>(&c[__shfl(address, 6, 8) + remainder]); coefficients[id+7*7] = ldg<double>(&c[address_7 + remainder]); } /*coefficients[id] = c[__shfl(address, 0, 8) + remainder]; coefficients[id+8] = c[__shfl(address, 1, 8) + remainder]; coefficients[id+16] = c[__shfl(address, 2, 8) + remainder]; coefficients[id+24] = c[__shfl(address, 3, 8) + remainder]; coefficients[id+32] = c[__shfl(address, 4, 8) + remainder]; coefficients[id+40] = c[__shfl(address, 5, 8) + remainder]; coefficients[id+48] = c[__shfl(address, 6, 8) + remainder]; coefficients[id+56] = c[__shfl(address, 7, 8) + remainder];*/ /*fcoefficients[id] = fc[__shfl(faddress, 0, 16) + remainder]; fcoefficients[id+16] = fc[__shfl(faddress, 1, 16) + remainder]; fcoefficients[id+32] = fc[__shfl(faddress, 2, 16) + remainder]; fcoefficients[id+48] = fc[__shfl(faddress, 3, 16) + remainder]; fcoefficients[id+64] = fc[__shfl(faddress, 4, 16) + remainder]; fcoefficients[id+80] = fc[__shfl(faddress, 5, 16) + remainder]; fcoefficients[id+96] = fc[__shfl(faddress, 6, 16) + remainder]; fcoefficients[id+112] = fc[__shfl(faddress, 7, 16) + remainder]; fcoefficients[id+128] = fc[__shfl(faddress, 8, 16) + remainder]; fcoefficients[id+144] = fc[__shfl(faddress, 9, 16) + remainder]; fcoefficients[id+160] = fc[__shfl(faddress, 10, 16) + remainder]; fcoefficients[id+176] = fc[__shfl(faddress, 11, 16) + remainder]; fcoefficients[id+192] = fc[__shfl(faddress, 12, 16) + remainder]; fcoefficients[id+208] = fc[__shfl(faddress, 13, 16) + remainder]; fcoefficients[id+224] = fc[__shfl(faddress, 14, 16) + remainder]; fcoefficients[id+240] = fc[__shfl(faddress, 15, 16) + remainder];*/ #else // store the addresses to the shared memory int *address_array = (int *) &shared_memory[8*blockDim.x * blockDim.y * blockDim.z]; address_array[threadIdx.x] = address; coefficients[id] = c[address_array[base_address] + remainder]; coefficients[id+8] = c[address_array[base_address +1] + remainder]; coefficients[id+16] = c[address_array[base_address +2] + remainder]; coefficients[id+24] = c[address_array[base_address +3] + remainder]; coefficients[id+32] = c[address_array[base_address +4] + remainder]; coefficients[id+40] = c[address_array[base_address +5] + remainder]; coefficients[id+48] = c[address_array[base_address +6] + remainder]; coefficients[id+56] = c[address_array[base_address +7] + remainder]; #endif double *coeff = &coefficients[threadId * 7]; double result = coeff[0]; if (nlip > 1) { result *= x; result += coeff[1]; } if (nlip > 2) { result *= x; result += coeff[2]; } if (nlip > 3) { result *= x; result += coeff[3]; } if (nlip > 4) { result *= x; result += coeff[4]; } if (nlip > 5) { result *= x; result += coeff[5]; } if (nlip > 6) { result *= x; result += coeff[6]; } return result; } __device__ inline int calculate_icell(double x, double *d, int n){ if ( ( x > d[n] ) || ( x < d[0] ) ) { return -1; } int i[2]; i[0]=0; i[1]=n; int im=(i[0]+i[1])/2; int j; int max=log((float)n)/log(2.)+1; for(j=0;j<max;j++){ i[ x<d[im] ] = im; im=(i[0]+i[1])/2; } return im; } __device__ inline void calculate_icell_radial(const double x, const double charge, const double r_max, const int ncell, const int nlip, int *icell, double *in_cell_position) { const double dx = r_max/(double)ncell; const double c=8.0*rsqrt(charge)/charge; const double a = r_max + c; *icell = (int)(x * a / ((c + x)*dx)); double x1 = c / (a/((*icell+1) * dx) - 1.0); double x0 = c / (a/(*icell * dx) - 1.0); if (icell == 0) { x0 = 0.0; } double grid_step = (x1-x0) / (nlip-1); double center = (x1+x0) / (2.0); *in_cell_position= (x - center)/grid_step; } inline __device__ void calculate_distance(double &dist_vec_x, double &dist_vec_y, double &dist_vec_z, double &dist, const double reference_point_x, const double reference_point_y, const double reference_point_z, const double x, const double y, const double z){ // calculate the vector relative to reference_point dist_vec_x=x-reference_point_x; dist_vec_y=y-reference_point_y; dist_vec_z=z-reference_point_z; // evaluate the length of the dist_vector, i.e., the distance between dist_vec and reference_point dist=sqrt(dist_vec_x * dist_vec_x + dist_vec_y * dist_vec_y + dist_vec_z * dist_vec_z); return; } /* * Evaluates value of single bubble at a point. This is very similar with the * SolidHarmonics simple evaluation, but the results are multiplied with the * polynomial evaluations */ __device__ inline double Bubbles_evaluate_point_lmin( // x-coordinate relative to the center of the bubble const double &x, // y-coordinate relative to the center of the bubble const double &y, // z-coordinate relative to the center of the bubble const double &z, // relative distance from the center of the bubble const double &distance, // minimum quantum number 'l' const int &lmin, // maximum quantum number 'l' const int &lmax, // number of cells const int &ncell, // number of lagrange integration polyniomials per // cell, i.e., the number of grid points per cell const int &nlip, // position inside the cell const double &r, // k value for the bubble const int &k, // the first address value in bubble for the selected cell const int &address, const double* __restrict__ cf ) { double result = 0.0; int lm_address = address, address2 = address; // NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff // also *cf this should be done const int ncell_nlip = ncell * 8; int l, m, l2; double top = 0.0, bottom = 0.0, new_bottom = 0.0, prev1 = 0.0, prev2 = 0.0, current = 0.0; double multiplier = 0.0, multiplier2 = 0.0, one_per_r = 1.0 / distance; double r2 = x*x+y*y+z*z; l = 0; // set value for l=0, m=0 if (lmin == 0) { //printf("x: %f, y: %f, z: %f, nlip: %d, ncell: %d, l: 0, address: %d, cf: %ld, r: %f\n", x, y, z, nlip, ncell, 0, lm_address, cf, r); //printf("shared_memory address: %ld\n"); //printf("shared memory first value: %f", shared_memory[0]); result = evaluate_polynomials_shared<NLIP>(lm_address, cf, r); } if (lmax >= 1) { l = 1; multiplier = one_per_r; // set value for l=1, m=-1 lm_address += ncell_nlip; if (lmin <= 1) { result += y * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier; } // set all values where m=-1 m = -1; prev1 = y; // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = address + ncell_nlip * 5; multiplier2 = multiplier * one_per_r; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; if (l >= lmin) { result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2; } // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l+2); multiplier2 *= one_per_r; } // set value for l=1, m=0 lm_address += ncell_nlip; if (lmin <= 1) { result += z * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier; } // set all values where m=0 prev1 = z; prev2 = 1.0; m = 0; // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = address + ncell_nlip * 6; multiplier2 = multiplier * one_per_r; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z * prev1; current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; prev2 = prev1; prev1 = current; if (l >= lmin) { result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2; } // add the address2 to get to the next item with m=0 address2 += ncell_nlip * (2*l+2); multiplier2 *= one_per_r; } // set value for l=1, m=1 lm_address += ncell_nlip; if (lmin <= 1) { result += x * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier; } // set all values where m=1 prev1 = x; m = 1; // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = address + ncell_nlip * 7; multiplier2 = multiplier * one_per_r; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; if (l >= lmin) { result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2; } // add the address2 to get to the next item with m=1 address2 += ncell_nlip * (2*l+2); multiplier2 *= one_per_r; } // go through the rest of the stuff bottom = y; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1) top = x; // top refers to solid harmonics value with l=l-1 and m=l-1 lm_address += ncell_nlip; multiplier *= one_per_r; for (l=2; l <= lmax; l++) { new_bottom = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( y*top + x*bottom); if (l >= lmin) { result += new_bottom * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier; } // set all values where m=-l m = -l; prev1 = new_bottom; address2 = lm_address + (2*l+2) * ncell_nlip; multiplier2 = multiplier * one_per_r; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; if (l2 >= lmin) { result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2; } // add the address2 to get to the next item with m=l address2 += ncell_nlip * (2*l2+2); multiplier2 *= one_per_r; } // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l lm_address += 2*l * ncell_nlip; top = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( x*top-y*bottom ); // set all values where m=l m = l; prev1 = top; address2 = lm_address + (2*l+2) * ncell_nlip; multiplier2 = multiplier * one_per_r; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; if (l2 >= lmin) { result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2; } // add the address2 to get to the next item with m=l address2 += ncell_nlip * (2*l2+2); multiplier2 *= one_per_r; } // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top) bottom = new_bottom; if (l >= lmin) { result += top * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier; } // get next address lm_address += ncell_nlip; multiplier *= one_per_r; } } // multiply the result with r^k, if k is not 0 // the distance is not too close to 0.0 as this is checked // earlier in this function if (k != 0 && distance > 1e-12) { result *= pow(distance, (double)k); } if (distance < 1e-8) { result = 1.0 * cf[0]; //evaluate_polynomials(nlip, &cf[address], r); } return result; } /* * (int nlip, int ncell, int l, int address, double *c, const double x) * Evaluates the value of gradient of a single bubble at a point. This is very similar with the * SolidHarmonics simple evaluation, but the results are multiplied with the * polynomial evaluations and summed together. */ template <bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z > __device__ inline void Bubbles_evaluate_gradient_point( // x-coordinate relative to the center of the bubble const double &x, // y-coordinate relative to the center of the bubble const double &y, // z-coordinate relative to the center of the bubble const double &z, // relative distance from the center of the bubble const double &distance, // maximum quantum number 'l' const int &lmax, // number of cells const int &ncell, // number of lagrange integration polyniomials per // cell, i.e., the number of grid points per cell const int &nlip, // position inside the cell const double &r, // k value for the bubble const int &k, // the first address value in bubble for the selected cell const int &address, // constant pointer to a variable double array const double* __restrict__ cf, // constant pointer to a derivative variable double array const double* __restrict__ df, // if only the l = 0 is evaluated const bool only_spherical, // result double result[3] ) { int lm_address = address, address2; // NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff // also *cf this should be done const int ncell_nlip = ncell * 8; int l, l2; double top, bottom, new_bottom, prev1, prev2, current, current_gradient[3], prev1_gradient[3], prev2_gradient[3], bottom_gradient[3], new_bottom_gradient, top_gradient[3]; double one_per_r = 1.0 / distance;; double one_per_r_gradient[3] = {(-x) * one_per_r * one_per_r, (-y) * one_per_r * one_per_r, (-z) * one_per_r * one_per_r}; l = 0; // set value for l=0, m=0 double radial_value, radial_derivative; radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address, df, r); if (evaluate_gradients_x) result[X_] = radial_derivative * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] = radial_derivative * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] = radial_derivative * z;// * one_per_r; if (distance >= 0.0 && distance < 1e-12) { one_per_r = 0.0; if (evaluate_gradients_x) one_per_r_gradient[X_] = 0.0; if (evaluate_gradients_y) one_per_r_gradient[Y_] = 0.0; if (evaluate_gradients_z) one_per_r_gradient[Z_] = 0.0; if (evaluate_gradients_x) result[X_] = 0.0; //radial_derivative; if (evaluate_gradients_y) result[Y_] = 0.0; //radial_derivative; if (evaluate_gradients_z) result[Z_] = 0.0;//radial_derivative; } /*if (only_spherical) { one_per_r = 0.0; if (evaluate_gradients_x) one_per_r_gradient[X_] = 0.0; if (evaluate_gradients_y) one_per_r_gradient[Y_] = 0.0; if (evaluate_gradients_z) one_per_r_gradient[Z_] = 0.0; }*/ if (lmax >= 1) { // set all values where m=-1 prev1 = y * one_per_r; if (evaluate_gradients_x) prev1_gradient[X_] = one_per_r_gradient[X_] * y; if (evaluate_gradients_y) prev1_gradient[Y_] = 1.0 + one_per_r_gradient[Y_] * y; if (evaluate_gradients_z) prev1_gradient[Z_] = one_per_r_gradient[Z_] * y; // set value for l=1, m=-1 radial_value = evaluate_polynomials_shared<NLIP>(address+ncell_nlip, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+ncell_nlip, df, r); if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r; //if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., y/r: %e\n", radial_value, radial_derivative, prev1); //if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r); //if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r); //if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r); // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = address + ncell_nlip * 5; for (l = 2; l <= lmax; l++) { double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l-1)*(l+1)) ); current = a * z*prev1 * one_per_r; if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]); if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]); if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]); if (l > 2) { double b = sqrt( (double)((l-2)*(l)) / (double)((l-1)*(l+1)) ); current -= b * prev2; if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_]; if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_]; if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_]; } radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r); if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r; prev2 = prev1; if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_]; if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_]; if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_]; prev1 = current; if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_]; // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l+2); } prev2 = 1.0; if (evaluate_gradients_x) prev2_gradient[X_] = 0.0; if (evaluate_gradients_y) prev2_gradient[Y_] = 0.0; if (evaluate_gradients_z) prev2_gradient[Z_] = 0.0; // set all values where m=0 prev1 = z * one_per_r; if (evaluate_gradients_x) prev1_gradient[X_] = one_per_r_gradient[X_] * z; if (evaluate_gradients_y) prev1_gradient[Y_] = one_per_r_gradient[Y_] * z; if (evaluate_gradients_z) prev1_gradient[Z_] = 1.0 + one_per_r_gradient[Z_] * z; // set value for l=1, m=0 radial_value = evaluate_polynomials_shared<NLIP>(address+2*ncell_nlip, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+2*ncell_nlip, df, r); if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r; //if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., z/r: %e\n", radial_value, radial_derivative, prev1); //if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r); //if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r); //if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r); // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = address + ncell_nlip * 6; for (l = 2; l <= lmax; l++) { double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l)*(l)) ); double b = sqrt( (double)((l-1)*(l-1)) / (double)((l)*(l)) ); current = a * z * prev1 * one_per_r; if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]); if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]); if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]); current -= b * prev2; if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_]; if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_]; if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_]; radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r); if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r; prev2 = prev1; if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_]; if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_]; if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_]; prev1 = current; if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_]; // add the address2 to get to the next item with m=0 address2 += ncell_nlip * (2*l+2); } // set all values where m=1 prev1 = x * one_per_r; if (evaluate_gradients_x) prev1_gradient[X_] = 1.0 + one_per_r_gradient[X_] * x; if (evaluate_gradients_y) prev1_gradient[Y_] = one_per_r_gradient[Y_] * x; if (evaluate_gradients_z) prev1_gradient[Z_] = one_per_r_gradient[Z_] * x; // set value for l=1, m=1 radial_value = evaluate_polynomials_shared<NLIP>(address+3*ncell_nlip, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+3*ncell_nlip, df, r); if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r; //if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., x/r: %e\n", radial_value, radial_derivative, prev1); //if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r); //if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r); //if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r); // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = address + ncell_nlip * 7; for (l = 2; l <= lmax; l++) { double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+1)*(l-1)) ); current = a * z*prev1 * one_per_r; if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]); if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]); if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]); if (l > 2) { double b = sqrt( (double)((l)*(l-2)) / (double)((l+1)*(l-1)) ); current -= b * prev2; if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_]; if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_]; if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_]; } radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r); if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r; prev2 = prev1; if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_]; if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_]; if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_]; prev1 = current; if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_]; // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l+2); } // go through the rest of the stuff bottom = y * one_per_r; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1) if (evaluate_gradients_x) bottom_gradient[X_] = one_per_r_gradient[X_] * y; if (evaluate_gradients_y) bottom_gradient[Y_] = 1.0 + one_per_r_gradient[Y_] * y; if (evaluate_gradients_z) bottom_gradient[Z_] = one_per_r_gradient[Z_] * y; top = x * one_per_r; // top refers to solid harmonics value with l=l-1 and m=l-1 if (evaluate_gradients_x) top_gradient[X_] = 1.0 + one_per_r_gradient[X_] * x; if (evaluate_gradients_y) top_gradient[Y_] = one_per_r_gradient[Y_] * x; if (evaluate_gradients_z) top_gradient[Z_] = one_per_r_gradient[Z_] * x; lm_address += 4 * ncell_nlip; for (l=2; l <= lmax; l++) { double c = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)); new_bottom = c * one_per_r * ( y*top + x*bottom); // get the gradients to x direction if (evaluate_gradients_x) new_bottom_gradient = c * (one_per_r_gradient[X_] * (y * top + x * bottom) + one_per_r * (y * top_gradient[X_] + x * bottom_gradient[X_] + bottom)) ; if (evaluate_gradients_x) top_gradient[X_] = c * (one_per_r_gradient[X_] * (x * top - y * bottom) + one_per_r * (x * top_gradient[X_] + top - y * bottom_gradient[X_])); if (evaluate_gradients_x) bottom_gradient[X_] = new_bottom_gradient; // get the gradients to y direction if (evaluate_gradients_y) new_bottom_gradient = c * (one_per_r_gradient[Y_] * (y * top + x * bottom) + one_per_r * (y * top_gradient[Y_] + top + x * bottom_gradient[Y_])); if (evaluate_gradients_y) top_gradient[Y_] = c * (one_per_r_gradient[Y_] * (x * top - y * bottom) + one_per_r * (x * top_gradient[Y_] - y * bottom_gradient[Y_] - bottom)); if (evaluate_gradients_y) bottom_gradient[Y_] = new_bottom_gradient; // get the gradients to z direction if (evaluate_gradients_z) new_bottom_gradient = c * (one_per_r_gradient[Z_] * (y * top + x * bottom) + one_per_r * (y * top_gradient[Z_] + x * bottom_gradient[Z_])); if (evaluate_gradients_z) top_gradient[Z_] = c * (one_per_r_gradient[Z_] * (x * top - y * bottom) + one_per_r * (x * top_gradient[Z_] - y * bottom_gradient[Z_])); if (evaluate_gradients_z) bottom_gradient[Z_] = new_bottom_gradient; top = c * one_per_r * ( x*top-y*bottom ); // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top previously, so we // have to sacrifice one register temporarily) bottom = new_bottom; radial_value = evaluate_polynomials_shared<NLIP>(lm_address, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address, df, r); // get value for l=l, m=-l. if (evaluate_gradients_x) result[X_] += radial_value * bottom_gradient[X_] + radial_derivative * bottom * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * bottom_gradient[Y_] + radial_derivative * bottom * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * bottom_gradient[Z_] + radial_derivative * bottom * z;// * one_per_r; radial_value = evaluate_polynomials_shared<NLIP>(lm_address + 2*l * ncell_nlip, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address + 2*l * ncell_nlip, df, r); // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l if (evaluate_gradients_x) result[X_] += radial_value * top_gradient[X_] + radial_derivative * top * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * top_gradient[Y_] + radial_derivative * top * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * top_gradient[Z_] + radial_derivative * top * z;// * one_per_r; // set all values where m=-l prev1 = bottom; if (evaluate_gradients_x) prev1_gradient[X_] = bottom_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = bottom_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = bottom_gradient[Z_]; address2 = lm_address + (2*l+2) * ncell_nlip; for (l2 = l+1; l2 <= lmax; l2++) { // evaluate spherical harmonics for l=l2, m=-l double a = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2-l)*(l2+l)) ); current = a * z*prev1 * one_per_r; if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]); if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]); if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]); if (l2 > l+1) { double b = sqrt( (double)((l2-l-1)*(l2+l-1)) / (double)((l2-l)*(l2+l)) ); current -= b * prev2; if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_]; if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_]; if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_]; } radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r); if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r; prev2 = prev1; if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_]; if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_]; if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_]; prev1 = current; if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_]; // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l2+2); } // set all values where m=l lm_address += 2*l * ncell_nlip; prev1 = top; if (evaluate_gradients_x) prev1_gradient[X_] = top_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = top_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = top_gradient[Z_]; address2 = lm_address + (2*l+2) * ncell_nlip; for (l2 = l+1; l2 <= lmax; l2++) { // evaluate spherical harmonics for l=l2, m=l double a = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+l)*(l2-l)) ); current = a * z*prev1 * one_per_r; if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]); if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]); if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]); if (l2 > l+1) { double b = sqrt( (double)((l2+l-1)*(l2-l-1)) / (double)((l2+l)*(l2-l)) ); current -= b * prev2; if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_]; if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_]; if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_]; } radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r); if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r; prev2 = prev1; if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_]; if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_]; if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_]; prev1 = current; if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_]; // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l2+2); } // get next address lm_address += ncell_nlip; } } result[X_] *= one_per_r; result[Y_] *= one_per_r; result[Z_] *= one_per_r; // multiply the result with r^k, if k is not 0 // the distance is not too close to 0.0 as this is checked // earlier in this function, NOTE: should never happen, thus // commented away //if (k != 0 && distance > 1e-12) { /*for (int i = 0; i < k; i ++) { result *= distance; } for (int i = 0; i < -k; i ++) { result *= one_per_r; }*/ //} if (distance < 1e-12) { result[X_] = 0.0; // * evaluate_polynomials_shared<NLIP-1>(address, df, r); result[Y_] = 0.0; result[Z_] = 0.0; } } /* * Evaluates value of single bubble at a point. This is very similar with the * SolidHarmonics simple evaluation, but the results are multiplied with the * polynomial evaluations */ __device__ inline double Bubbles_evaluate_point( // x-coordinate relative to the center of the bubble const double &x, // y-coordinate relative to the center of the bubble const double &y, // z-coordinate relative to the center of the bubble const double &z, // relative distance from the center of the bubble const double &distance, // maximum quantum number 'l' const int &lmax, // number of cells const int &ncell, // number of lagrange integration polyniomials per // cell, i.e., the number of grid points per cell const int &nlip, // position inside the cell const double &r, // k value for the bubble const int &k, // the first address value in bubble for the selected cell const int &address, // constant pointer to a variable double array const double* __restrict__ cf ) { double result = 0.0; int lm_address = address, address2; // NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff // also *cf this should be done const int ncell_nlip = ncell * 8; int l, l2; double top, bottom, new_bottom, prev1, prev2, current, a, b, a2; const double one_per_r = 1.0 / distance;; l = 0; // set value for l=0, m=0 //printf("x: %f, y: %f, z: %f, nlip: %d, ncell: %d, l: 0, address: %d, cf: %ld, r: %f\n", x, y, z, nlip, ncell, 0, lm_address, cf, r); //printf("shared_memory address: %ld\n"); //printf("shared memory first value: %f", shared_memory[0]); result = evaluate_polynomials_shared<NLIP>(lm_address, cf, r); if (lmax >= 1) { // set value for l=1, m=-1 result += y * evaluate_polynomials_shared<NLIP>(address+ncell_nlip, cf, r) * one_per_r; // set value for l=1, m=0 result += z * evaluate_polynomials_shared<NLIP>(address+2*ncell_nlip, cf, r) * one_per_r; // set value for l=1, m=1 result += x * evaluate_polynomials_shared<NLIP>(address+3*ncell_nlip, cf, r) * one_per_r; // set all values where m=-1 prev2 = 0.0; prev1 = y * one_per_r; // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = address + ncell_nlip * 5; l = threadIdx.x % 32; a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l-1)*(l+1)) ); b = (l > 2) ? sqrt( (double)((l-2)*(l)) / (double)((l-1)*(l+1)) ) : 0.0; for (l = 2; l <= lmax; l++) { current = __shfl(a, l) * z*prev1 * one_per_r - __shfl(b, l) * prev2; result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) ; prev2 = prev1; prev1 = current; // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l+2); } // set all values where m=0 prev1 = z * one_per_r; prev2 = 1.0; // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = address + ncell_nlip * 6; l = threadIdx.x % 32; a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l)*(l)) ); b = sqrt( (double)((l-1)*(l-1)) / (double)((l)*(l)) ); for (l = 2; l <= lmax; l++) { current = __shfl(a, l) * z * prev1 * one_per_r - __shfl(b, l) * prev2; result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r); prev2 = prev1; prev1 = current; // add the address2 to get to the next item with m=0 address2 += ncell_nlip * (2*l+2); } // set all values where m=1 prev1 = x * one_per_r; // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = address + ncell_nlip * 7; l = threadIdx.x % 32; a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+1)*(l-1)) ); b = (l > 2) ? sqrt( (double)((l)*(l-2)) / (double)((l+1)*(l-1)) ) : 0.0; for (l = 2; l <= lmax; l++) { current = __shfl(a, l) * z*prev1 * one_per_r - __shfl(b, l) * prev2; result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r); prev2 = prev1; prev1 = current; // add the address2 to get to the next item with m=1 address2 += ncell_nlip * (2*l+2); } // go through the rest of the stuff bottom = y * one_per_r; // bottom refers to spherical harmonics value with l=l-1 and m=-(l-1) top = x * one_per_r; // top refers to spherical harmonics value with l=l-1 and m=l-1 lm_address += 4 * ncell_nlip; l = threadIdx.x % 32; a = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)); for (l=2; l <= lmax; l++) { new_bottom = __shfl(a, l) * one_per_r * ( y*top + x*bottom); top = __shfl(a, l) * one_per_r * ( x*top - y*bottom ); // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top previously, so we // have to sacrifice one register temporarily) bottom = new_bottom; result += bottom * evaluate_polynomials_shared<NLIP>(lm_address, cf, r); // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l result += top * evaluate_polynomials_shared<NLIP>(lm_address + 2*l * ncell_nlip, cf, r); // set all values where m=-l prev2 = 0.0; prev1 = bottom; address2 = lm_address + (2*l+2) * ncell_nlip; // set all values where m=l lm_address += 2*l * ncell_nlip; l2 = threadIdx.x % 32; a2 = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2-l)*(l2+l)) ); b = (l2 > l+1) ? sqrt( (double)((l2-l-1)*(l2+l-1)) / (double)((l2-l)*(l2+l)) ) : 0.0; for (l2 = l+1; l2 <= lmax; l2++) { // evaluate spherical harmonics for l=l2, m=-l current = __shfl(a2, l2) * z*prev1 * one_per_r - __shfl(b, l2) * prev2; result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r); prev2 = prev1; prev1 = current; // add the address2 to get to the next item with m=-l address2 += ncell_nlip * (2*l2+2); } prev2 = 0.0; prev1 = top; address2 = lm_address + (2*l+2) * ncell_nlip; l2 = threadIdx.x % 32; a2 = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+l)*(l2-l)) ) ; b = (l2 > l+1) ? sqrt( (double)((l2+l-1)*(l2-l-1)) / (double)((l2+l)*(l2-l)) ) : 0.0; for (l2 = l+1; l2 <= lmax; l2++) { // evaluate spherical harmonics for l=l2, m=l current = __shfl(a2, l2) * z*prev1 * one_per_r - __shfl(b, l2) * prev2; // the latter term will go to zero, if l2 <= l+1 result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r); prev2 = prev1; prev1 = current; // add the address3 to get to the next item with m=l address2 += ncell_nlip * (2*l2+2); } // get next address lm_address += ncell_nlip; } } // multiply the result with r^k, if k is not 0 // the distance is not too close to 0.0 as this is checked // earlier in this function, NOTE: should never happen, thus // commented away //if (k != 0 && distance > 1e-12) { if (distance < 1e-14) { result = 1.0 * evaluate_polynomials_shared<NLIP>(address, cf, r); } for (int i = 0; i < k; i ++) { result *= distance; } for (int i = 0; i < -k; i ++) { result *= one_per_r; } //} return result; } __device__ int getGlobalIdx_1D_1D() { int id=threadIdx.x + blockIdx.x * blockDim.x; return id; } __device__ int getGlobalIdx_3D_3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } /* * Get the minimum/maximum and overwrite values with -1 */ __device__ inline void minmax(int *first, int *second) { int temp; if (*first == -1) { *first = *second; } if (*second == -1) { *second = *first; } if (*second < *first) { temp = *second; *second = *first; *first = temp; } } /* * Find the minimum and maximum in array that is as large as a block, and store them as the first * and last value of the input array. NOTE: The arrayLength must be a power of 2. */ __device__ void calculateMinimumMaximum(int *array, int blockThreadId, int arrayLength) { int division = arrayLength / 2; // order so that the larger values of pairs are at the second part of the array // and the smaller are at the end of the array if (blockThreadId < division) { // rearrange the values so that the larger is in the &array[blockThreadId + division] // and smaller is in &array[blockThreadId] minmax(&array[blockThreadId], &array[blockThreadId + division]); } __syncthreads(); division = arrayLength / 4; // if the block while (division >= 1) { if (blockThreadId < division) { minmax(&array[blockThreadId], &array[blockThreadId + division]); } else if (blockThreadId > arrayLength - division) { minmax(&array[blockThreadId - division], &array[blockThreadId]); } division /= 2; __syncthreads(); } } /* * Evaluate Bubbles on a grid * */ template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z > __device__ inline void Bubbles_evaluate_grid(const Bubble* __restrict__ bubble, double* __restrict__ cube, double* __restrict__ gradient_cube_x, double* __restrict__ gradient_cube_y, double* __restrict__ gradient_cube_z, const double* __restrict__ grid_points_x, const double* __restrict__ grid_points_y, const double* __restrict__ grid_points_z, const int shape_x, const int shape_y, const int shape_z, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int k, const int slice_offset, const size_t pitch, const int memory_y_shape, const int slice_count, const int lmin, const double multiplier) { // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. int x, y, z; getXYZ(&x, &y, &z); // get the offset from the input cube pointer const int id = getCubeOffset3D(x, y, z, pitch, memory_y_shape); double value, gradient[3]; double in_cell_position = 0.0; const int ncell = bubble->grid->ncell, nlip = bubble->grid->nlip; int icell; double relative_position_x, relative_position_y, relative_position_z, distance; //printf("X: %f, cell_spacing: %f, ncell: %d", distance, bubble->cell_spacing, ncell); // Check that the point is within the block if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) { // calculate relative position to the zero-point and distance to it calculate_distance(relative_position_x, relative_position_y, relative_position_z, distance, zero_point_x, zero_point_y, zero_point_z, grid_points_x[x], ldg<double>(&grid_points_y[y]), ldg<double>(&grid_points_z[z+slice_offset])); // get the order number of cell the point resides in //icell = calculate_icell(distance, bubble->d, bubble->ncell); calculate_icell_radial(distance, bubble->charge, bubble->grid->r_max, ncell, nlip, &icell, &in_cell_position); //printf("x: %d, y: %d, z:%d, id:%d, vector_id: %d, vector_offset:%d, blockId: %d, blocks_per_vector: %d, %f, %f, %f, %d\n", x, y, z, id, vector_id, vector_offset, blockIdx.x, blocks_per_vector, grid_points_x[x], ldg(&grid_points_y[y]), ldg(&grid_points_z[z]), icell); } else { icell = 1; distance = 0.1; } if (lmin_zero) { // calculate the bubble value for the point with lmin = 0 if (evaluate_value) { value = Bubbles_evaluate_point( relative_position_x, relative_position_y, relative_position_z, distance, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf); } // evaluate gradients if we are evaluating any if (evaluate_gradients_x || evaluate_gradients_y || evaluate_gradients_z) { Bubbles_evaluate_gradient_point <evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z> (relative_position_x, relative_position_y, relative_position_z, distance, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf, bubble->df, false, gradient ); } } else { if (evaluate_value) { // calculate the bubble value for the point with lmin > 0 value = Bubbles_evaluate_point_lmin( relative_position_x, relative_position_y, relative_position_z, distance, lmin, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf ); } } if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count && icell < ncell) { /*if (x == 0 && y == 0) { printf("%d: [x, y, z], id : [%d, %d, %d], %d, icell: %d, in_cell_position:%f, first_bubble-value:%e, distance:%f, coord: [%f, %f, %f] old-value: %e, value: %e, multiplier: %f\n", slice_offset, x, y, z+slice_offset, id, icell, in_cell_position, bubble->cf[icell*8], distance, relative_position_x, relative_position_y, relative_position_z, cube[id], value, multiplier); }*/ if (evaluate_value) cube[id] += multiplier * value; if (evaluate_gradients_x) gradient_cube_x[id] += multiplier * gradient[X_]; if (evaluate_gradients_y) gradient_cube_y[id] += multiplier * gradient[Y_]; if (evaluate_gradients_z) gradient_cube_z[id] += multiplier * gradient[Z_]; } return; } /* * Evaluate Bubbles on a grid */ __global__ void #if (__CUDA_ARCH__ <= 350) __launch_bounds__(128, 6) #else __launch_bounds__(256) #endif Bubbles_evaluate_grid_lmin(const Bubble* __restrict__ bubble, double* __restrict__ cube, const double* __restrict__ grid_points_x, const double* __restrict__ grid_points_y, const double* __restrict__ grid_points_z, const int shape_x, const int shape_y, const int shape_z, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int k, const int slice_offset, const size_t pitch, const int memory_y_shape, const int slice_count, const int lmin, const double multiplier) { Bubbles_evaluate_grid <false, true, false, false, false> ( bubble, cube, /*gradient_cube_x = */NULL, /*gradient_cube_y = */NULL, /*gradient_cube_z = */NULL, grid_points_x, grid_points_y, grid_points_z, shape_x, shape_y, shape_z, zero_point_x, zero_point_y, zero_point_z, k, slice_offset, pitch, memory_y_shape, slice_count, lmin, multiplier); } __global__ void #if (__CUDA_ARCH__ > 350) __launch_bounds__(256) #else __launch_bounds__(128, 8) #endif Bubbles_evaluate_grid_pitched(const Bubble* __restrict__ bubble, double* __restrict__ cube, const double* __restrict__ grid_points_x, const double* __restrict__ grid_points_y, const double* __restrict__ grid_points_z, const int shape_x, const int shape_y, const int shape_z, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int k, const int slice_offset, const size_t pitch, const int memory_y_shape, const int slice_count, const double multiplier) { Bubbles_evaluate_grid <true, true, false, false, false> ( bubble, cube, /*gradient_cube_x = */NULL, /*gradient_cube_y = */NULL, /*gradient_cube_z = */NULL, grid_points_x, grid_points_y, grid_points_z, shape_x, shape_y, shape_z, zero_point_x, zero_point_y, zero_point_z, k, slice_offset, pitch, memory_y_shape, slice_count, /*lmin = */0, multiplier); } template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z > __global__ void #if (__CUDA_ARCH__ > 350) __launch_bounds__(256) #else __launch_bounds__(128, 5) #endif Bubbles_evaluate_grid_gradients(const Bubble* __restrict__ bubble, double* __restrict__ cube, double* __restrict__ gradient_cube_x, double* __restrict__ gradient_cube_y, double* __restrict__ gradient_cube_z, const double* __restrict__ grid_points_x, const double* __restrict__ grid_points_y, const double* __restrict__ grid_points_z, const int shape_x, const int shape_y, const int shape_z, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int k, const int slice_offset, const size_t pitch, const int memory_y_shape, const int slice_count, const double multiplier) { Bubbles_evaluate_grid <lmin_zero, evaluate_value, evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z> ( bubble, cube, gradient_cube_x, gradient_cube_y, gradient_cube_z, grid_points_x, grid_points_y, grid_points_z, shape_x, shape_y, shape_z, zero_point_x, zero_point_y, zero_point_z, k, slice_offset, pitch, memory_y_shape, slice_count, /*lmin = */0, multiplier); } /* * Evaluate Bubbles at points */ template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z> __device__ inline void Bubbles_evaluate_points(const Bubble* __restrict__ bubble, double* __restrict__ result_array, double* __restrict__ device_gradients_x, double* __restrict__ device_gradients_y, double* __restrict__ device_gradients_z, // a 3d array, where the x coordinates are first, // then y coordinates, and finally the z coordinates. This ordering // is selected to get coalesced memory reads const double* __restrict__ points, // total number of points evaluated by this device const int device_number_of_points, // the zero point x-coordinate of bubbles const double zero_point_x, // the zero point y-coordinate of bubbles const double zero_point_y, // the zero point z-coordinate of bubbles const double zero_point_z, // the k value of the bubbles const int k, // the lmin value evaluated const int lmin, // number of points in this kernel call const int point_count, // device_point_offset const int device_point_offset, const double multiplier ) { // Get the point order number within this kernel call int id = blockIdx.x * blockDim.x + threadIdx.x; double value, gradient[3]; double in_cell_position = 0.0; const int ncell = bubble->grid->ncell, nlip = bubble->grid->nlip; int icell = -1; double relative_position_x, relative_position_y, relative_position_z, distance, r_max = bubble->grid->r_max; //printf("X: %f, cell_spacing: %f, ncell: %d", distance, bubble->cell_spacing, ncell); // Check that the point is within the block if (id + device_point_offset < device_number_of_points && id < point_count ) { // calculate relative position to the zero-point and distance to it calculate_distance(relative_position_x, relative_position_y, relative_position_z, distance, zero_point_x, zero_point_y, zero_point_z, points[id + device_point_offset], points[id + device_point_offset + device_number_of_points], points[id + device_point_offset + device_number_of_points*2]); // get the order number of cell the point resides in calculate_icell_radial(distance, bubble->charge, bubble->grid->r_max, ncell, nlip, &icell, &in_cell_position); } else { icell = 1; distance = 0.1; } // calculate the bubble value for the point if (!lmin_zero) { if (evaluate_value) { value = Bubbles_evaluate_point_lmin( relative_position_x, relative_position_y, relative_position_z, distance, lmin, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf ); } } else { if (evaluate_gradients_x || evaluate_gradients_y || evaluate_gradients_z) { Bubbles_evaluate_gradient_point <evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z> (relative_position_x, relative_position_y, relative_position_z, distance, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf, bubble->df, false, //(evaluate_gradients_x != evaluate_gradients_y || evaluate_gradients_x != evaluate_gradients_z) && icell == 0, //evaluate_gradients_x != evaluate_gradients_y || evaluate_gradients_x != evaluate_gradients_z, gradient ); } if (evaluate_value) { value = Bubbles_evaluate_point( relative_position_x, relative_position_y, relative_position_z, distance, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf ); } } // store the result to the result array if (id + device_point_offset < device_number_of_points && id < point_count && distance < r_max && icell < ncell ) { if (evaluate_value) result_array[id+device_point_offset] += multiplier * value; //if ((evaluate_gradients_x) && (id + device_point_offset <= 7)) printf("%%%%#%# X: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[X_], device_gradients_x[id+device_point_offset]); //if ((evaluate_gradients_y) && (id + device_point_offset <= 7)) printf("%%%%#%# Y: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[Y_], device_gradients_y[id+device_point_offset]); //if ((evaluate_gradients_z) && (id + device_point_offset <= 7)) printf("%%%%#%# Z: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[Z_], device_gradients_z[id+device_point_offset]); // add also the gradient value, if we are evaluating them if (evaluate_gradients_x) device_gradients_x[id+device_point_offset] += multiplier * gradient[X_]; if (evaluate_gradients_y) device_gradients_y[id+device_point_offset] += multiplier * gradient[Y_]; if (evaluate_gradients_z) device_gradients_z[id+device_point_offset] += multiplier * gradient[Z_]; } return; } __device__ inline double get_damping_factor(double r) { double result; // erfc: error function if (r > 1e-12) { result = 0.5*erfc(r-2.0/r); } else { result = 1.0; } return result; } template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z > #if (__CUDA_ARCH__ <= 350) __launch_bounds__(128, 4) #else __launch_bounds__(256) #endif __global__ void Bubbles_evaluate_gradient_points( const Bubble* __restrict__ bubble, double* __restrict__ result_array, double* __restrict__ device_gradients_x, double* __restrict__ device_gradients_y, double* __restrict__ device_gradients_z, // a 3d array, where the x coordinates are first, // then y coordinates, and finally the z coordinates. This ordering // is selected to get coalesced memory reads const double* __restrict__ points, // total number of points evaluated by this device const int device_number_of_points, // the zero point x-coordinate of bubbles const double zero_point_x, // the zero point y-coordinate of bubbles const double zero_point_y, // the zero point z-coordinate of bubbles const double zero_point_z, // the k value of the bubbles const int k, // number of points in this kernel call const int point_count, // device_point_offset const int device_point_offset, const double multiplier ) { Bubbles_evaluate_points<lmin_zero, evaluate_value, evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z>( bubble, result_array, device_gradients_x, device_gradients_y, device_gradients_z, points, device_number_of_points, zero_point_x, zero_point_y, zero_point_z, k, 0, point_count, device_point_offset, multiplier ); } #if (__CUDA_ARCH__ <= 350) __launch_bounds__(128, 7) #else __launch_bounds__(256) #endif __global__ void Bubbles_evaluate_points_simple( const Bubble* __restrict__ bubble, double* __restrict__ result_array, // a 3d array, where the x coordinates are first, // then y coordinates, and finally the z coordinates. This ordering // is selected to get coalesced memory reads const double* __restrict__ points, // total number of points evaluated by this device const int device_number_of_points, // the zero point x-coordinate of bubbles const double zero_point_x, // the zero point y-coordinate of bubbles const double zero_point_y, // the zero point z-coordinate of bubbles const double zero_point_z, // the k value of the bubbles const int k, // number of points in this kernel call const int point_count, // device_point_offset const int device_point_offset, const double multiplier ) { Bubbles_evaluate_points<true, true, false, false, false>( bubble, result_array, /*device_gradients_x*/NULL, /*device_gradients_y*/NULL, /*device_gradients_z*/NULL, points, device_number_of_points, zero_point_x, zero_point_y, zero_point_z, k, 0, point_count, device_point_offset, multiplier ); } /*__global__ void Bubble_make_taylor_kernel(Bubble_t *result_bubble, int maximum_taylor_order, double *contaminants, double *c2s_coefficients, int *c2s_lm_ids, int *c2s_term_starts, int offset) { const int index=threadIdx.x + blockIdx.x * blockDim.x + offset; extern __shared__ double shared_memory[]; double *one_per_kappa_factorial = &shared_memory[0]; double *shared_contaminants = &shared_memory[maximum_taylor_order]; int contaminants_size = (maximum_taylor_order+1)*(maximum_taylor_order+2)*(maximum_taylor_order+3)/6; // calculate the 1/kappa! terms to the shared memory if (threadIdx.x < maximum_taylor_order) { int kappa = 1; for (int i = 1; i <= threadIdx.x; i++) { kappa *= i+1; } one_per_kappa_factorial[threadIdx.x] = 1.0 / ((double) kappa); } // load the contaminats to the shared memory if (threadIdx.x < contaminants_size) { int id = threadIdx.x; while (id < contaminants_size) { shared_contaminants[id] = contaminants[id]; id += blockDim.x; } } __syncthreads(); // do the actual calculation double r = result_bubble->gridpoints[index]; double prefactor; double damping_factor = get_damping_factor(r); int k = result_bubble->k, ncell= result_bubble->ncell, nlip = result_bubble->nlip; int result_index = 0, counter = 0, term_counter = 0; for (int x = 0; x <= maximum_taylor_order; x++) { for (int y = 0; y <= maximum_taylor_order - x; y++) { for (int z = 0; z <= maximum_taylor_order - x - y; z++) { prefactor = one_per_kappa_factorial[x+y+z]// 1/[x+y+z] * pow(r, (double)(x+y+z - k)) // r^x+y+z-k * shared_contaminants[counter] // c * damping_factor; // go through all l,m terms which get contribution from x,y,z -term while (term_counter < c2s_term_starts[counter+1]) { // get the index in the result array, note: the -1 is because the indices are in // fortran format, starting from 1 result_index = (c2s_lm_ids[term_counter]-1) * (ncell * (nlip-1) +1) + index; // add the prefactor times the coefficient from cartesion to spherical conversion result_bubble->f[result_index] += c2s_coefficients[term_counter] * prefactor; // add the counter value used to follow the c2s conversion term_counter++; } // add the conter value used to follow cartesian terms counter ++; } } } } */ /* * Kernel that sums the f-values of two bubble objects together. The summation happens * pointwise so that each thread calculates all l,m values for each point. The result * is stored to the bubble_f. */ __global__ void Bubble_sum_kernel(double* __restrict__ bubble_f, const double* __restrict__ bubble1_f, const int lmax, const int max_id, const size_t device_f_pitch) { const int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < max_id) { // go through all l, m values of input bubble 'bubble' for (int ilm = 0; ilm < (lmax+1)*(lmax+1); ilm++) { bubble_f[ilm * device_f_pitch / sizeof(double) + id] += bubble1_f[ilm * device_f_pitch / sizeof(double) + id]; } } } /* * Decreases the k-value of a bubble by k_decrese. The operation happens * pointwise so that each thread calculates all l,m values for each point. The result * is stored to the bubble_f. * * k_decrease is how many k values is decreased */ __global__ void Bubble_decrease_k_kernel(double* __restrict__ bubble_f, const double* __restrict__ r, const int k_decrease, const int lmax, const int max_id, const size_t device_f_pitch) { const int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < max_id) { const double rpow = pow(r[id], (double) k_decrease); // go through all l, m values of input bubble 'bubble' for (int ilm = 0; ilm < (lmax+1)*(lmax+1); ilm++) { bubble_f[ilm * device_f_pitch / sizeof(double) + id] *= rpow; } } } /* * Multiply cubes 1 and 2 and store it to cube1 */ __global__ void multiply_cubes(double *cube1, double *cube2, const int cube_size, const int offset) { // get the id of the point (We are using only the first ) const int index=threadIdx.x + blockIdx.x * blockDim.x + offset; if (index < cube_size) { cube1[index] *= cube2[index]; } } /************************************************************** * Bubble-implementation * **************************************************************/ /* * Evaluate the cf at ALL devices. This is a crucial preparation function for injection. * For correct results, on call the Bubble must have all f-values present. * * NOTE: the function streaming is structured using number of l,m-pairs, like the uploadAll. */ void Bubble::calculateCf() { // calculate the cf int ilmmax = (this->lmax+1)*(this->lmax+1); int block_size = 64; int grid_size; int offset; check_errors(__FILE__, __LINE__); for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); offset = 0; for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { int ilm_per_stream = ilmmax / this->streamContainer->getStreamsPerDevice() + (( ilmmax % this->streamContainer->getStreamsPerDevice()) > stream); int number_of_points = ilm_per_stream * this->grid->ncell; // verify that there is something to calculate the cf for (for instance if ilmmax is 1, some streams // can be left without any points, resulting to a cuda error) if (number_of_points > 0) { grid_size = (number_of_points + block_size - 1) / block_size; hipLaunchKernelGGL(( calc_cf) , dim3(grid_size), dim3(block_size), 0, *this->streamContainer->getStream(device, stream) , this->device_copies[device], offset, number_of_points, this->device_f_pitch[device]); offset += number_of_points; } check_errors(__FILE__, __LINE__); } } } void Bubble::initDeviceMemory(int ibub, Grid1D *grid, double center[3], int lmax, int k, double charge, StreamContainer *streamContainer) { //hipHostRegister(this, sizeof(Bubble), hipHostRegisterPortable); //check_errors(__FILE__, __LINE__); this->ibub = ibub; this->lmax = lmax; this->device_memory_lmax = lmax; this->k = k; this->charge = charge; this->streamContainer = streamContainer; this->crd[X_] = center[X_]; this->crd[Y_] = center[Y_]; this->crd[Z_] = center[Z_]; this->integrator = NULL; this->uploaded_events = new hipEvent_t*[this->streamContainer->getNumberOfDevices()]; this->device_copies = new Bubble * [this->streamContainer->getNumberOfDevices()]; this->device_f = new double *[this->streamContainer->getNumberOfDevices()]; this->device_f_pitch = new size_t [this->streamContainer->getNumberOfDevices()]; this->device_cf = new double * [this->streamContainer->getNumberOfDevices()]; this->device_df = new double * [this->streamContainer->getNumberOfDevices()]; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); size_t sz=sizeof(double)*(grid->ncell*(grid->nlip-1)+1); hipMallocPitch((void**)&device_f[device], &device_f_pitch[device], sz, (lmax+1)*(lmax+1)); check_errors(__FILE__, __LINE__); hipMemset(device_f[device], 0, device_f_pitch[device]*(lmax+1)*(lmax+1)); check_errors(__FILE__, __LINE__); sz=sizeof(double)*grid->ncell*8*(lmax+1)*(lmax+1); hipMalloc(&this->device_cf[device], sz); hipMalloc(&this->device_df[device], sz); check_errors(__FILE__, __LINE__); // copy the bubble to the device, for which set the device pointers // to be the main-pointers this->f = this->device_f[device]; this->cf = this->device_cf[device]; this->df = this->device_df[device]; this->grid = grid->device_copies[device]; // allocate & copy the bubble to device hipMalloc(&this->device_copies[device], sizeof(Bubble)); hipMemcpy(this->device_copies[device], this, sizeof(Bubble), hipMemcpyHostToDevice); check_errors(__FILE__, __LINE__); } this->grid = grid; } Bubble::Bubble(int ibub, Grid1D *grid, double center[3], int lmax, int k, double *bf, double charge, StreamContainer *streamContainer) { this->initDeviceMemory(ibub, grid, center, lmax, k, charge, streamContainer); // set the host variables and register them for faster data transfer this->f = bf; /*hipHostRegister(this->f, sizeof(double)*(grid->ncell*(grid->nlip-1)+1)*(lmax+1)*(lmax+1), hipHostRegisterPortable); check_errors(__FILE__, __LINE__);*/ } Bubble::Bubble(int ibub, Grid1D *grid, double center[3], int lmax, int k, double charge, StreamContainer *streamContainer) { this->initDeviceMemory(ibub, grid, center, lmax, k, charge, streamContainer); } Bubble::Bubble(Bubble *old_bubble, int lmax, int k) { this->initDeviceMemory(old_bubble->ibub, old_bubble->grid, old_bubble->crd, lmax, old_bubble->k, old_bubble->charge, old_bubble->streamContainer); } /* * Uploads all bubble data to all devices (gpus) on all nodes. This kind of approach * is needed when injecting bubbles to cuda. With bubble-multiplication - the upload * -method is preferred. */ void Bubble::uploadAll(double *f, int lmax) { // set the host variables and register them for faster data transfer this->f = f; this->lmax = lmax; size_t host_pitch = (this->grid->ncell * (this->grid->nlip - 1) + 1) * sizeof(double); int ilmmax = (lmax+1)*(lmax+1); check_errors(__FILE__, __LINE__); Grid1D* host_grid = this->grid; // register the host array array //hipHostRegister(this->f, host_pitch * ilmmax, hipHostRegisterPortable); check_errors(__FILE__, __LINE__); double *device_f, *host_f; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // get the preallocated device pointers device_f = this->device_f[device]; // NOTE: for all devices the first pointer points to the first value of each array host_f = this->f; for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) { int ilm_per_stream = ilmmax / this->streamContainer->getStreamsPerDevice() + (( ilmmax % this->streamContainer->getStreamsPerDevice()) > stream); // upload the stream data to device hipMemcpy2DAsync((void *) device_f, this->device_f_pitch[device], (void *) host_f, host_pitch, host_pitch, ilm_per_stream, hipMemcpyHostToDevice, *this->streamContainer->getStream(device, stream) ); check_errors(__FILE__, __LINE__); // add to the pointers device_f += ilm_per_stream * this->device_f_pitch[device] / sizeof(double); host_f += ilm_per_stream * host_pitch / sizeof(double); } // copy the bubble to the device, for which set the device pointers // to be the main-pointers this->f = this->device_f[device]; this->cf = this->device_cf[device]; this->df = this->device_df[device]; this->grid = host_grid->device_copies[device]; this->lmax = lmax; // copy the bubble to device hipMemcpyAsync(this->device_copies[device], this, sizeof(Bubble), hipMemcpyHostToDevice, *this->streamContainer->getStream(device, 0)); check_errors(__FILE__, __LINE__); this->f = f; this->grid = host_grid; } check_errors(__FILE__, __LINE__); this->streamContainer->synchronizeAllDevices(); // calculate the cf this->calculateCf(); // and synchronize the host with the device for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->uploaded_events[device] = this->streamContainer->recordDeviceEvent(device); } // we are not in any case downloading the data back, so we can unregister the array //hipHostUnregister(this->f); check_errors(__FILE__, __LINE__); } /* * Uploads part of a bubble to the device * * NOTE: in order to use this, the bubble uploaded (i.e., the f-array given as input) * must have the same lmax value as the Bubble-object we are uploading to. * NOTE: registers the input array but does not unregister it, thus after calling this * the user must unregister the f elsewhere, for instance by calling the unregister function. * NOTE: this function is designed to function together with the bubble multiplication */ void Bubble::upload(double *f, int lmax, bool register_host) { // set the host variables and register them for faster data transfer this->f = f; check_errors(__FILE__, __LINE__); this->lmax = lmax; int ilmmax = (lmax + 1) * (lmax + 1); // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; Grid1D* host_grid = this->grid; // register the host array, if not explicitly telling not to /*if (register_host) { hipHostRegister(this->f, sizeof(double)*ilmmax*total_point_count, hipHostRegisterPortable); check_errors(__FILE__, __LINE__); }*/ // store the processor variables to be used at downloading time this->processor_order_number = processor_order_number; this->number_of_processors = number_of_processors; size_t host_pitch = total_point_count * sizeof(double); // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % number_of_processors) > processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = processor_order_number * total_point_count / number_of_processors + ((remainder < processor_order_number) ? remainder : processor_order_number); double *device_f; double *host_f = &this->f[offset]; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // get the preallocated device pointers, // NOTE: The memory of bubble is allocated for its entire // length, thus we have to go to the part we want to upload device_f = this->device_f[device]; device_f = &device_f[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); // upload the data to device, copy all ilmmax-rows for stream_point_count columns hipMemcpy2DAsync((void *) device_f, this->device_f_pitch[device], (void *) host_f, host_pitch, stream_point_count * sizeof(double), ilmmax, hipMemcpyHostToDevice, *this->streamContainer->getStream(device, stream) ); check_errors(__FILE__, __LINE__); offset += stream_point_count; device_f += stream_point_count; host_f += stream_point_count; } // copy the bubble to the device, for which set the device pointers // to be the main-pointers this->f = this->device_f[device]; this->cf = this->device_cf[device]; this->df = this->device_df[device]; this->grid = host_grid->device_copies[device]; this->lmax = lmax; // copy the bubble to device hipMemcpyAsync(this->device_copies[device], this, sizeof(Bubble), hipMemcpyHostToDevice, *this->streamContainer->getStream(device, 0)); check_errors(__FILE__, __LINE__); this->f = f; this->grid = host_grid; } // and synchronize the host with the device for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->uploaded_events[device] = this->streamContainer->recordDeviceEvent(device); } } void Bubble::waitBubbleUploaded(int device, hipStream_t *stream) { hipStreamWaitEvent(*stream, *this->uploaded_events[device], 0); } void Bubble::waitBubbleUploaded(int device) { hipStreamWaitEvent(0, *this->uploaded_events[device], 0); } /* * Sets bubble values to zero * * NOTE: in order to use this, the bubble uploaded (i.e., the f-array given as input) * must have the same lmax value as the Bubble-object we are uploading to. * NOTE: registers the input array but does not unregister it, thus after calling this * the user must unregister the f elsewhere, for instance by calling the unregister function. * NOTE: this function is designed to function together with the bubble multiplication */ void Bubble::setToZero() { // set the host variables and register them for faster data transfer this->f = f; check_errors(__FILE__, __LINE__); int ilmmax = (this->device_memory_lmax + 1) * (this->device_memory_lmax + 1); // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % this->number_of_processors) > this->processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = this->processor_order_number * total_point_count / this->number_of_processors + ((remainder < this->processor_order_number) ? remainder : this->processor_order_number); double *device_f; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // get the preallocated device pointers, // NOTE: The memory of bubble is allocated for its entire // length, thus we have to go to the part we want to upload device_f = this->device_f[device]; device_f = &device_f[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); // upload the data to device, copy all ilmmax-rows for stream_point_count columns hipMemset2DAsync((void *) device_f, this->device_f_pitch[device], 0, stream_point_count * sizeof(double), ilmmax, *this->streamContainer->getStream(device, stream) ); check_errors(__FILE__, __LINE__); offset += stream_point_count; device_f += stream_point_count; } } } /* * Downloads part of a bubble from the device. Downloads to host exactly the same * part as the upload function above uploads to device. * * NOTE: this function is designed to function together with the bubble multiplication & * summation */ void Bubble::download(int lmax) { // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; size_t host_pitch = total_point_count * sizeof(double); int ilmmax = (lmax + 1) * (lmax + 1); // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % this->number_of_processors) > this->processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = this->processor_order_number * total_point_count / this->number_of_processors + ((remainder < this->processor_order_number) ? remainder : this->processor_order_number); double *device_f; double *host_f = &this->f[offset]; check_errors(__FILE__, __LINE__); for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // get the preallocated device pointers, // NOTE: The memory of bubble is allocated for its entire // length, thus we have to go to the part we want to upload device_f = this->device_f[device]; device_f = &device_f[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); // upload the data to device, copy all ilmmax-rows for stream_point_count columns hipMemcpy2DAsync((void *) host_f, host_pitch, (void *) device_f, this->device_f_pitch[device], stream_point_count * sizeof(double), ilmmax, hipMemcpyDeviceToHost, *this->streamContainer->getStream(device, stream) ); check_errors(__FILE__, __LINE__); offset += stream_point_count; device_f += stream_point_count; host_f += stream_point_count; check_errors(__FILE__, __LINE__); } } } /* * Adds together the f-values of 'this' and input bubble 'bubble' * * NOTE: this function is designed to function together with the bubble multiplication * NOTE: this function assumes that the bubbles have identical grids and with that, * identical f_pitches */ void Bubble::add(Bubble *bubble) { // make sure that the k-values of the input functions are the same // this is done by decreasing the larger k-value to be equal // with the smaller check_errors(__FILE__, __LINE__); if (this->k > bubble->k) { this->decreaseK(this->k - bubble->k); } else if (this->k < bubble->k) { bubble->decreaseK(bubble->k - this->k); } check_errors(__FILE__, __LINE__); // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; int smaller_lmax = min(this->lmax, bubble->lmax); // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % this->number_of_processors) > this->processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = this->processor_order_number * total_point_count / this->number_of_processors + ((remainder < this->processor_order_number) ? remainder : this->processor_order_number); double *device_f; double *device_f1; int block_size = 256; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); this->waitBubbleUploaded(device); bubble->waitBubbleUploaded(device); // get the preallocated device pointers, // NOTE: The memory of bubble is allocated for its entire // length, thus we have to go to the part we want to upload device_f = this->device_f[device]; device_f = &device_f[offset]; device_f1 = bubble->device_f[device]; device_f1 = &device_f1[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); int grid_size = (stream_point_count + block_size - 1) / block_size; // call the kernel hipLaunchKernelGGL(( Bubble_sum_kernel) , dim3(grid_size), dim3(block_size), 0, *this->streamContainer->getStream(device, stream), device_f, device_f1, smaller_lmax, stream_point_count, this->device_f_pitch[device]); check_errors(__FILE__, __LINE__); // add the device pointers and the offset offset += stream_point_count; device_f += stream_point_count; device_f1 += stream_point_count; } } } /* * Decreases the k-value of a bubble by k_decrease * * NOTE: this function is designed to function together with the bubble multiplication * NOTE: this function assumes that the bubbles have identical grids and with that, * identical f_pitches */ void Bubble::decreaseK(int k_decrease) { // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % this->number_of_processors) > this->processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = this->processor_order_number * total_point_count / this->number_of_processors + ((remainder < this->processor_order_number) ? remainder : this->processor_order_number); double *device_f; double *device_r; int block_size = 256; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); this->waitBubbleUploaded(device); // get the preallocated device pointers, // NOTE: The memory of bubble is allocated for its entire // length, thus we have to go to the part we want to upload device_f = this->device_f[device]; device_f = &device_f[offset]; device_r = this->grid->device_gridpoints[device]; device_r = &device_r[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); int grid_size = (stream_point_count + block_size - 1) / block_size; // call the kernel hipLaunchKernelGGL(( Bubble_decrease_k_kernel) , dim3(grid_size), dim3(block_size), 0, *this->streamContainer->getStream(device, stream), device_f, device_r, k_decrease, this->lmax, stream_point_count, this->device_f_pitch[device]); check_errors(__FILE__, __LINE__); // add the device pointers and the offset offset += stream_point_count; device_f += stream_point_count; device_r += stream_point_count; } } } /* * Integrates over the bubble. We only need to integrate over the s-bubble. */ double Bubble::integrate() { // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->getShape(); // check if the integrator has been inited, if not, init it if (!this->integrator) { this->integrator = new Integrator1D(this->streamContainer, this->grid, this->processor_order_number, this->number_of_processors); } // upload the l,m=0 radial function f to the integrator this->integrator->upload(this->f); check_errors(__FILE__, __LINE__); // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % this->number_of_processors) > this->processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = this->processor_order_number * total_point_count / this->number_of_processors + ((remainder < this->processor_order_number) ? remainder : this->processor_order_number); // get the partial s-bubble device vectors residing now in the integrators device memory double **device_vectors = this->integrator->getDeviceVectors(); double *device_vector; double *device_r; // multiply the integration vector with r^(2+this->k) // get the times we have to multiply the vector with r, i.e., 2+this->k // NOTE: this must be larger or equal to zero int k_change = 2 + this->k; if (k_change > 0) { int block_size = 256; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // get the preallocated device pointers, // NOTE: The memory of gridpoints is allocated for its entire // length, thus we have to go to the part we want to upload // however, the integrator only has the memory it needs, the we don't need to // offset the device_vector device_vector = device_vectors[device]; device_r = this->grid->device_gridpoints[device]; device_r = &device_r[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); int grid_size = (stream_point_count + block_size - 1) / block_size; // call the decrease_k- kernel by using lmax = 0 hipLaunchKernelGGL(( Bubble_decrease_k_kernel) , dim3(grid_size), dim3(block_size), 0, *this->streamContainer->getStream(device, stream), device_vector, device_r, k_change, 0, stream_point_count, 0); check_errors(__FILE__, __LINE__); // add the device pointers and the offset offset += stream_point_count; device_vector += stream_point_count; device_r += stream_point_count; check_errors(__FILE__, __LINE__); } } } else if (k_change < 0) { printf("Invalid k-value (%d) at bubble-integrate, must be larger or equal with -2. At file '%s', line number %d", this->k, __FILE__, __LINE__); exit(-1); } return FOURPI_ * this->integrator->integrate(); // } void Bubble::registerHost(double *f) { check_errors(__FILE__, __LINE__); this->f = f; /*int ilmmax = (this->lmax + 1) * (this->lmax + 1); // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; hipHostRegister(this->f, sizeof(double)*ilmmax*total_point_count, hipHostRegisterPortable);*/ check_errors(__FILE__, __LINE__); } void Bubble::destroy() { //this->grid->destroy(); //check_errors(__FILE__, __LINE__); //delete this->grid; this->grid = NULL; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device++) { this->streamContainer->setDevice(device); hipFree(this->device_f[device]); check_errors(__FILE__, __LINE__); hipFree(this->device_cf[device]); check_errors(__FILE__, __LINE__); hipFree(this->device_df[device]); check_errors(__FILE__, __LINE__); hipFree(this->device_copies[device]); check_errors(__FILE__, __LINE__); } delete[] this->device_copies; delete[] this->device_f; delete[] this->device_df; delete[] this->device_f_pitch; delete[] this->device_cf; delete[] this->uploaded_events; // check if integrator is null pointer, if not // delete the integrator if (this->integrator) { this->integrator->destroy(); delete this->integrator; this->integrator = NULL; } check_errors(__FILE__, __LINE__); //hipHostUnregister(this); } /* * Set MPI-configuration used by the bubble object. */ void Bubble::setProcessorConfiguration( int processor_order_number, int number_of_processors) { this->number_of_processors = number_of_processors; this->processor_order_number = processor_order_number; } /************************************************************** * Bubbles-implementation * **************************************************************/ int Bubbles::getBubbleCount() { return this->nbub; } Bubbles::Bubbles(int nbub) { this->nbub = nbub; this->bubbles = new Bubble*[nbub]; this->is_sub_bubbles = false; } /* * Init new Bubbles by making a copy of the old. * * NOTE: This makes a deep copy of the old bubbles, meaning that * new memory places are allocated for the underlying Bubble objects. */ Bubbles::Bubbles(Bubbles *old_bubbles, int lmax, int k) { this->is_sub_bubbles = false; this->nbub = old_bubbles->nbub; this->bubbles = new Bubble*[nbub]; for (int i = 0; i < old_bubbles->getBubbleCount(); i++) { this->bubbles[i] = new Bubble(old_bubbles->bubbles[i], lmax, k); } } /* * Get new bubbles object containing some of the original bubbles. * The bubbles selected in the new objects are the ones with * the ibub values matching to those in input parameter 'ibubs'. * NOTE: this function makes a shallow copy of the input bubbles 'this', * i.e., the underlying Bubble objects are copied as references only */ Bubbles *Bubbles::getSubBubbles(int *ibubs, int nbub) { Bubbles *new_bubbles = new Bubbles(nbub); new_bubbles->is_sub_bubbles = true; // copy the references to the wanted Bubble-objects specified // in ibubs for (int i = 0; i < new_bubbles->getBubbleCount(); i++) { new_bubbles->bubbles[i] = this->getBubble(ibubs[i]); } return new_bubbles; } /* * Get the pointer to the Bubble with local order number 'i' equal to * input parameter 'i'. If not found NULL is returned. * * @param i - The local order number of the bubble */ Bubble *Bubbles::getBubbleWithLocalOrderNumber(int i) { if (i < this->nbub) { return this->bubbles[i]; } return NULL; } /* * Get the pointer to the Bubble with global order number 'ibub' equal to * input parameter 'ibub'. If not found NULL is returned. * * @param ibub - The global order number of the bubble */ Bubble *Bubbles::getBubble(int ibub) { for (int i = 0; i < this->getBubbleCount(); i ++) { if (this->bubbles[i]->ibub == ibub) { return this->bubbles[i]; } } return NULL; } /* * Check if the Bubbles contains a Bubble with global order number 'ibub'. * * @param ibub - The global order number of the bubble */ bool Bubbles::containsBubble(int ibub) { Bubble *bubble = this->getBubble(ibub); return (bubble != NULL); } /* * Init a bubble with global order number 'ibub' to the 'i':th slot in the * internal bubbles array. Contains also the values for the bubble. * * @param grid - The grid used in the bubble * @param i - The internal order number of the bubble * @param ibub - The global order number of the bubble * @param center - The global center point of the bubble * @param lmax - The maximum value of quantum number 'l' for the bubble * @param k - The parameter k for the r^k multiplier of the values * @param bf - The values of the bubble * @param charge - The charge of the atom at the center of the bubble * @param streaContainer - The container holding the streams used in cuda evaluation of anything * related to this object */ void Bubbles::initBubble(Grid1D *grid, int i, int ibub, double center[3], int lmax, int k, double *bf, double charge, StreamContainer *streamContainer) { this->bubbles[i] = new Bubble(ibub, grid, center, lmax, k, bf, charge, streamContainer); } /* * Init a bubble with global order number 'ibub' to the 'i':th slot in the * internal bubbles array. Contains also the values for the bubble. * * @param grid - The grid used in the bubble * @param i - The internal order number of the bubble * @param ibub - The global order number of the bubble * @param center - The global center point of the bubble * @param lmax - The maximum value of quantum number 'l' for the bubble * @param k - The parameter k for the r^k multiplier of the values * @param charge - The charge of the atom at the center of the bubble * @param streaContainer - The container holding the streams used in cuda evaluation of anything * related to this object */ void Bubbles::initBubble(Grid1D *grid, int i, int ibub, double center[3], int lmax, int k, double charge, StreamContainer *streamContainer) { check_errors(__FILE__, __LINE__); this->bubbles[i] = new Bubble(ibub, grid, center, lmax, k, charge, streamContainer); } void Bubbles::unregister() { /*for (int ibub = 0; ibub < this->getBubbleCount(); ibub ++) { hipHostUnregister(this->getBubble(ibub)->f); check_errors(__FILE__, __LINE__); }*/ } void Bubbles::waitBubblesUploaded(int device) { for (int i = 0; i < this->getBubbleCount(); i ++) { this->bubbles[i]->waitBubbleUploaded(device); } } /* * Set MPI-configuration used by the bubble object. */ void Bubbles::setProcessorConfiguration( int processor_order_number, int number_of_processors) { for (int i = 0; i < this->getBubbleCount(); i ++) { this->bubbles[i]->setProcessorConfiguration(processor_order_number, number_of_processors); } } double Bubbles::integrate() { double result = 0.0; for (int i = 0; i < this->getBubbleCount(); i ++) { result += this->getBubbleWithLocalOrderNumber(i)->integrate(); } return result; } void Bubbles::download() { for (int i = 0; i < this->getBubbleCount(); i ++) { this->bubbles[i]->download(this->bubbles[i]->lmax); } } void Bubbles::add(Bubbles *bubbles) { // go through all the Bubble-objects present in this for (int i = 0; i < bubbles->getBubbleCount(); i ++) { // get the matching bubble in the added bubbles Bubble * bubble = bubbles->getBubble(this->bubbles[i]->ibub); // if the corresponding Bubble exists in both the Bubbles, do the add if (bubble) { this->bubbles[i]->add(bubble); } } check_errors(__FILE__, __LINE__); } void Bubbles::destroy() { if (!this->is_sub_bubbles) { for (int ibub = 0; ibub < this->getBubbleCount(); ibub ++) { this->bubbles[ibub]->destroy(); delete this->bubbles[ibub]; } } delete[] this->bubbles; } void Bubbles::inject(Grid3D *grid3d, CudaCube *cube, int lmin, CudaCube *gradients_cube_x, CudaCube *gradients_cube_y, CudaCube *gradients_cube_z, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z) { check_errors(__FILE__, __LINE__); int total_slice_count = cube->getShape(Z_); // the minimum l is 0 always in the multiplication int device_slice_count; // get the pointer arrays from the cubes double **device_cubes = cube->getDeviceCubes(); double **device_gradients_x, **device_gradients_y, **device_gradients_z; // get the device gradient result pointers if (evaluate_gradients_x) device_gradients_x = gradients_cube_x->getDeviceCubes(); if (evaluate_gradients_y) device_gradients_y = gradients_cube_y->getDeviceCubes(); if (evaluate_gradients_z) device_gradients_z = gradients_cube_z->getDeviceCubes(); size_t *device_pitches = cube->getDevicePitches(); int *device_memory_shape = cube->getDeviceMemoryShape(); int slice_offset = 0; Bubble *bubble; StreamContainer *streamContainer = cube->getStreamContainer(); // copy the cubes to the device & execute the kernels for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) { // set the used device (gpu) streamContainer->setDevice(device); double *dev_cube = device_cubes[device]; double *dev_gradient_x, *dev_gradient_y, *dev_gradient_z; // get the gradient addresses for the device if (evaluate_gradients_x) dev_gradient_x = device_gradients_x[device]; if (evaluate_gradients_y) dev_gradient_y = device_gradients_y[device]; if (evaluate_gradients_z) dev_gradient_z = device_gradients_z[device]; // calculate the number of vectors this device handles device_slice_count = total_slice_count / streamContainer->getNumberOfDevices() + ((total_slice_count % streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < streamContainer->getStreamsPerDevice(); stream++) { // determine the count of vectors handled by this stream int slice_count = device_slice_count / streamContainer->getStreamsPerDevice() + ((device_slice_count % streamContainer->getStreamsPerDevice()) > stream); check_errors(__FILE__, __LINE__); // get the launch configuration for the f1-inject dim3 block, grid; cube->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE); if (slice_count > 0) { // inject bubbles to the cube for (int i = 0; i < this->getBubbleCount(); i++) { bubble = this->getBubbleWithLocalOrderNumber(i); // wait that the bubble is uploaded to the device before starting if (stream == 0) bubble->waitBubbleUploaded(device); // call the kernel if (lmin == 0) { if (evaluate_gradients_x && evaluate_gradients_y && evaluate_gradients_z) { if (evaluate_value) { hipLaunchKernelGGL(( Bubbles_evaluate_grid_gradients < true, true, true, true, true>) , dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) , bubble->device_copies[device], dev_cube, dev_gradient_x, dev_gradient_y, dev_gradient_z, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } else { hipLaunchKernelGGL(( Bubbles_evaluate_grid_gradients < true, false, true, true, true>) , dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) , bubble->device_copies[device], dev_cube, dev_gradient_x, dev_gradient_y, dev_gradient_z, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } } else if (evaluate_gradients_x) { hipLaunchKernelGGL(( Bubbles_evaluate_grid_gradients < true, false, true, false, false>) , dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) , bubble->device_copies[device], dev_cube, dev_gradient_x, dev_gradient_y, dev_gradient_z, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } else if (evaluate_gradients_y) { hipLaunchKernelGGL(( Bubbles_evaluate_grid_gradients < true, false, false, true, false>) , dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) , bubble->device_copies[device], dev_cube, dev_gradient_x, dev_gradient_y, dev_gradient_z, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } else if (evaluate_gradients_z) { hipLaunchKernelGGL(( Bubbles_evaluate_grid_gradients < true, false, false, false, true>) , dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) , bubble->device_copies[device], dev_cube, dev_gradient_x, dev_gradient_y, dev_gradient_z, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } else if (evaluate_value) { hipLaunchKernelGGL(( Bubbles_evaluate_grid_pitched) , dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) , bubble->device_copies[device], dev_cube, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } } else if (evaluate_value) { hipLaunchKernelGGL(( Bubbles_evaluate_grid_lmin) , dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) , bubble->device_copies[device], dev_cube, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, lmin, 1.0); } check_errors(__FILE__, __LINE__); } } // increase the address by the number of vectors in this array if (evaluate_value) dev_cube += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_]; if (evaluate_gradients_x) dev_gradient_x += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_]; if (evaluate_gradients_y) dev_gradient_y += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_]; if (evaluate_gradients_z) dev_gradient_z += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_]; slice_offset += slice_count; } } } /************************************************************** * BubblesEvaluator function implementations * **************************************************************/ /* * Evaluate the bubbles at preset points. The results are stored in the device memory. * * @param gradient_direction - possible values X_ = 0, Y_ = 1, Z_ = 2, (X_, Y_, Z_) = 3 && this->evaluateGradients * anything else: no gradients */ void BubblesEvaluator::evaluatePoints(Points *result_points, Points *gradient_points_x, Points *gradient_points_y, Points *gradient_points_z, int gradient_direction) { int warp_size = 32; int total_warp_count = result_points->point_coordinates->number_of_points / warp_size + ((result_points->point_coordinates->number_of_points % warp_size) > 0); int point_offset = 0; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // allocate space for device results and device points int device_warp_count = total_warp_count / this->streamContainer->getNumberOfDevices() + ((total_warp_count % this->streamContainer->getNumberOfDevices()) > device); int device_point_count = device_warp_count * warp_size; int device_point_offset = 0; check_errors(__FILE__, __LINE__); // get the pointers to the device points & results double *device_points_ptr = result_points->point_coordinates->device_coordinates[device]; double *device_results_ptr = result_points->device_values[device]; double *device_gradients_x_ptr = NULL; double *device_gradients_y_ptr = NULL; double *device_gradients_z_ptr = NULL; if (gradient_direction == 3) { device_gradients_x_ptr = gradient_points_x->device_values[device]; device_gradients_y_ptr = gradient_points_y->device_values[device]; device_gradients_z_ptr = gradient_points_z->device_values[device]; } else if (gradient_direction < 3 && gradient_direction >= 0) { device_gradients_x_ptr = result_points->device_values[device]; device_gradients_y_ptr = result_points->device_values[device]; device_gradients_z_ptr = result_points->device_values[device]; } for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // get the number of points that are in the responsibility of this stream int stream_warp_count = device_warp_count / this->streamContainer->getStreamsPerDevice() + ((device_warp_count % streamContainer->getStreamsPerDevice()) > stream); int stream_point_count = stream_warp_count * warp_size; // make sure that the last stream does not go over board if (stream_point_count + point_offset > result_points->point_coordinates->number_of_points) { stream_point_count = result_points->point_coordinates->number_of_points - point_offset; } check_errors(__FILE__, __LINE__); if (stream_point_count > 0) { for (int i = 0; i < this->bubbles->getBubbleCount(); i++) { Bubble *bubble = this->bubbles->getBubbleWithLocalOrderNumber(i); // wait that the bubble is uploaded before calling the kernel if (stream == 0) bubble->waitBubbleUploaded(device); int grid_size = (stream_point_count + INJECT_BLOCK_SIZE - 1) / INJECT_BLOCK_SIZE; //printf("ibub: %d, device: %d, stream: %d, grid_size: %d, block_size: %d, stream_point_count: %d, device_point_offset: %d, device_point_count: %d, point_count: %d\n", // ibub, device, stream, grid_size, INJECT_BLOCK_SIZE, stream_point_count, device_point_offset, device_point_count, this->point_count); if (gradient_direction == X_) { hipLaunchKernelGGL(( Bubbles_evaluate_gradient_points <true, false, true, false, false>) , dim3(grid_size), dim3(INJECT_BLOCK_SIZE), INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) , bubble->device_copies[device], device_results_ptr, device_gradients_x_ptr, device_gradients_y_ptr, device_gradients_z_ptr, device_points_ptr, device_point_count, bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, stream_point_count, device_point_offset, 1.0 ); } else if (gradient_direction == Y_) { hipLaunchKernelGGL(( Bubbles_evaluate_gradient_points <true, false, false, true, false>) , dim3(grid_size), dim3(INJECT_BLOCK_SIZE), INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) , bubble->device_copies[device], device_results_ptr, device_gradients_x_ptr, device_gradients_y_ptr, device_gradients_z_ptr, device_points_ptr, device_point_count, bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, stream_point_count, device_point_offset, 1.0 ); } else if (gradient_direction == Z_) { hipLaunchKernelGGL(( Bubbles_evaluate_gradient_points <true, false, false, false, true>) , dim3(grid_size), dim3(INJECT_BLOCK_SIZE), INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) , bubble->device_copies[device], device_results_ptr, device_gradients_x_ptr, device_gradients_y_ptr, device_gradients_z_ptr, device_points_ptr, device_point_count, bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, stream_point_count, device_point_offset, 1.0 ); } else if (gradient_direction == 3) { hipLaunchKernelGGL(( Bubbles_evaluate_gradient_points <true, true, true, true, true>) , dim3(grid_size), dim3(INJECT_BLOCK_SIZE), INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) , bubble->device_copies[device], device_results_ptr, device_gradients_x_ptr, device_gradients_y_ptr, device_gradients_z_ptr, device_points_ptr, device_point_count, bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, stream_point_count, device_point_offset, 1.0 ); } else { hipLaunchKernelGGL(( Bubbles_evaluate_points_simple) , dim3(grid_size), dim3(INJECT_BLOCK_SIZE), INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) , bubble->device_copies[device], device_results_ptr, device_points_ptr, device_point_count, bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, stream_point_count, device_point_offset, 1.0 ); } check_errors(__FILE__, __LINE__); } } // add the pointers point_offset += stream_point_count; device_point_offset += stream_point_count; } check_errors(__FILE__, __LINE__); } } /************************************************************** * Function3DMultiplier-implementation * **************************************************************/ /* * Injects the f1_bubbles to this->cube1 and f2_bubbles to this->cube2, * multiplies this->cube1 with this->cube2 and de-injects the 'result_bubbles' * from 'this->cube1' * * @param f1_bubbles * @param f2_bubbles * @param result_bubbles */ void Function3DMultiplier::multiply(Bubbles *f1_bubbles, Bubbles *f2_bubbles, Bubbles *result_bubbles) { int total_slice_count = this->cube1->getShape(Z_); // the minimum l is 0 always in the multiplication int device_slice_count; // get the pointer arrays from the cubes double **f1_device_cubes = this->cube1->getDeviceCubes(); size_t *f1_device_pitches = this->cube1->getDevicePitches(); double **f2_device_cubes = this->cube2->getDeviceCubes(); size_t *f2_device_pitches = this->cube2->getDevicePitches(); int *f1_device_memory_shape = this->cube1->getDeviceMemoryShape(); int *f2_device_memory_shape = this->cube2->getDeviceMemoryShape(); int f1_shape[3]; f1_shape[X_] = this->cube1->getShape(X_); f1_shape[Y_] = this->cube1->getShape(Y_); f1_shape[Z_] = this->cube1->getShape(Z_); int f2_shape[3]; f2_shape[X_] = this->cube2->getShape(X_); f2_shape[Y_] = this->cube2->getShape(Y_); f2_shape[Z_] = this->cube2->getShape(Z_); int slice_offset = 0; Bubble *bubble; // copy the cubes to the device & execute the kernels for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // set the used device (gpu) this->streamContainer->setDevice(device); //hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); //int first_block = 0; double *dev_f1_cube = f1_device_cubes[device]; double *dev_f2_cube = f2_device_cubes[device]; // calculate the number of vectors this device handles device_slice_count = total_slice_count / this->streamContainer->getNumberOfDevices() + ((total_slice_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) { // determine the count of vectors handled by this stream int slice_count = device_slice_count / this->streamContainer->getStreamsPerDevice() + ((device_slice_count % this->streamContainer->getStreamsPerDevice()) > stream); if (slice_count > 0) { // get the launch configuration for the f1-inject dim3 block, grid; this->cube1->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE); check_errors(__FILE__, __LINE__); // inject the f1 bubbles to the f1_cube (and sum) for (int i = 0; i < f1_bubbles->getBubbleCount(); i++) { bubble = f1_bubbles->getBubbleWithLocalOrderNumber(i); // wait that the bubble is uploaded to the device before starting if (stream == 0) bubble->waitBubbleUploaded(device); hipLaunchKernelGGL(( Bubbles_evaluate_grid_pitched) , dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) , bubble->device_copies[device], dev_f1_cube, this->grid->axis[X_]->device_gridpoints[device], this->grid->axis[Y_]->device_gridpoints[device], this->grid->axis[Z_]->device_gridpoints[device], f1_shape[X_], f1_shape[Y_], f1_shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, f1_device_pitches[device], f1_device_memory_shape[Y_], slice_count, 1.0); check_errors(__FILE__, __LINE__); } check_errors(__FILE__, __LINE__); // get the launch configuration for the f2-inject this->cube2->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE); // inject the f2 bubbles to the f1_cube (and sum) for (int i = 0; i < f2_bubbles->getBubbleCount(); i++) { bubble = f2_bubbles->getBubbleWithLocalOrderNumber(i); // wait that the bubble is uploaded to the device before starting if (stream == 0) bubble->waitBubbleUploaded(device); // call the kernel hipLaunchKernelGGL(( Bubbles_evaluate_grid_pitched) , dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) , bubble->device_copies[device], dev_f2_cube, this->grid->axis[X_]->device_gridpoints[device], this->grid->axis[Y_]->device_gridpoints[device], this->grid->axis[Z_]->device_gridpoints[device], f2_shape[X_], f2_shape[Y_], f2_shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, f2_device_pitches[device], f2_device_memory_shape[Y_], slice_count, 1.0); check_errors(__FILE__, __LINE__); } // get the launch configuration for the multiplication and result-inject this->cube2->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE); // multiply dev_f1_cube with dev_f2_cube and store the result to dev_f1_cube multiply_3d_cubes(dev_f1_cube, f1_shape[X_], f1_shape[Y_], f1_device_memory_shape[Y_], f1_device_pitches[device], dev_f2_cube, f2_shape[X_], f2_shape[Y_], f2_device_memory_shape[Y_], f2_device_pitches[device], slice_count, &grid, &block, this->streamContainer->getStream(device, stream)); check_errors(__FILE__, __LINE__); // de-inject (deduct) the result bubbles from the dev_f1_cube for (int i = 0; i < result_bubbles->getBubbleCount(); i++) { bubble = result_bubbles->getBubbleWithLocalOrderNumber(i); // wait that the bubble is uploaded to the device before starting if (stream == 0) bubble->waitBubbleUploaded(device); // call the kernel hipLaunchKernelGGL(( Bubbles_evaluate_grid_pitched) , dim3(grid), dim3(block), INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) , bubble->device_copies[device], dev_f1_cube, this->grid->axis[X_]->device_gridpoints[device], this->grid->axis[Y_]->device_gridpoints[device], this->grid->axis[Z_]->device_gridpoints[device], f1_shape[X_], f1_shape[Y_], f1_shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, f1_device_pitches[device], f1_device_memory_shape[Y_], slice_count, -1.0); check_errors(__FILE__, __LINE__); } // increase the address by the number of vectors in this array // something else dev_f1_cube += slice_count * f1_device_pitches[device] / sizeof(double) * f1_device_memory_shape[Y_]; dev_f2_cube += slice_count * f2_device_pitches[device] / sizeof(double) * f2_device_memory_shape[Y_]; slice_offset += slice_count; } } } } /******************************************** * Fortran interfaces * ********************************************/ extern "C" void bubbles_add_cuda(Bubbles *bubbles, Bubbles *bubbles1) { bubbles->add(bubbles1); } extern "C" Bubbles* bubbles_get_sub_bubbles_cuda(Bubbles *bubbles, int *ibubs, int nbub) { return bubbles->getSubBubbles(ibubs, nbub); } extern "C" Bubbles *bubbles_init_cuda(int nbub) { Bubbles *new_bubbles = new Bubbles(nbub); check_errors(__FILE__, __LINE__); return new_bubbles; } /* * * @param id - local index of the bubble inited in Fortran format: first index is 1. */ extern "C" void bubble_init_cuda(Bubbles *bubbles, Grid1D *grid, int i, int ibub, double center[3], int lmax, int k, double charge, StreamContainer *streamContainer) { bubbles->initBubble(grid, i-1, ibub, center, lmax, k, charge, streamContainer); check_errors(__FILE__, __LINE__); } /* * Upload the content ('bf') of the Bubble with global order number 'ibub' to the device. * * @param ibub - tHe global order number of the bubble */ extern "C" void bubble_upload_all_cuda(Bubbles *bubbles, int ibub, int lmax, int k, double *bf) { if (bubbles->containsBubble(ibub)) { bubbles->getBubble(ibub)->k = k; bubbles->getBubble(ibub)->uploadAll(bf, lmax); check_errors(__FILE__, __LINE__); } } extern "C" void bubble_upload_cuda(Bubbles *bubbles, int ibub, int lmax, double *bf) { if (bubbles->containsBubble(ibub)) { bubbles->getBubble(ibub)->upload(bf, lmax); check_errors(__FILE__, __LINE__); } } extern "C" void bubble_add_cuda(Bubbles *bubbles, Bubbles *bubbles1, int ibub) { bubbles->getBubble(ibub)->add(bubbles1->getBubble(ibub)); check_errors(__FILE__, __LINE__); } extern "C" void bubbles_destroy_cuda(Bubbles* bubbles){ if (bubbles) { bubbles->destroy(); delete bubbles; check_errors(__FILE__, __LINE__); } } extern "C" double bubbles_integrate_cuda(Bubbles *bubbles) { return bubbles->integrate(); } extern "C" void bubbles_set_processor_configuration_cuda(Bubbles *bubbles, int processor_order_number, int number_of_processors) { bubbles->setProcessorConfiguration(processor_order_number, number_of_processors); } extern "C" void bubbles_inject_cuda(Bubbles *bubbles, Grid3D *grid, int lmin, CudaCube *cube) { bubbles->inject(grid, cube, lmin); } extern "C" void bubbles_inject_to_cuda(Bubbles *bubbles, Grid3D *grid, int lmin, CudaCube *cudaCube, double *cube, int offset, int cube_host_shape[3]) { cudaCube->initHost(&cube[offset], cube_host_shape, true); cudaCube->upload(); bubbles->inject(grid, cudaCube, lmin); } extern "C" double *bubbles_init_page_locked_f_cuda(int lmax, int shape){ //allocated += 1; double * result_f; check_errors(__FILE__, __LINE__); hipHostMalloc((void **)&result_f, sizeof(double) * (lmax+1) * (lmax+1) * shape, hipHostMallocPortable); check_errors(__FILE__, __LINE__); //printf("Allocated 1, Now allocated %d, address: %ld\n", allocated, result_f); return result_f; } extern "C" void bubbles_destroy_page_locked_f_cuda(double * f){ //allocated -= 1; //printf("Deallocated 1, Now allocated %d, address: %ld\n", allocated, f); hipHostFree(f); check_errors(__FILE__, __LINE__); }
1ef20092bedbda2aa88b60528db7e0736c50e77b.cu
/*----------------------------------------------------------------------------------* * Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, * * Sergio Losilla, Elias Toivanen, Jonas Juselius * * * * Permission is hereby granted, free of charge, to any person obtaining a copy * * of this software and associated documentation files (the "Software"), to deal * * in the Software without restriction, including without limitation the rights * * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * * copies of the Software, and to permit persons to whom the Software is * * furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included in all* * copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * * SOFTWARE. * *----------------------------------------------------------------------------------*/ /*! @file bubbles_cuda.cu *! @brief CUDA implementation of the Bubbles. */ #include <stdio.h> #include <cuda.h> #include <stdlib.h> //#include <algorithm> *std::max_element(result_cube, result_cube + totalPointCount) #include "bubbles_cuda.h" #include "streamcontainer.h" #include "grid.h" #include "spherical_harmonics_cuda.h" #include "cube.h" #include "function3d_multiplier.h" #include "memory_leak_operators.h" #include "evaluators.h" #define X_ 0 #define Y_ 1 #define Z_ 2 #define R_ 3 #define FOURPI_ 12.566370614359173 #if (__CUDA_ARCH__ > 350) #define INJECT_BLOCK_SIZE 256 #else #define INJECT_BLOCK_SIZE 128 #endif #define NLIP 7 /** \brief Size of the CUDA blocks in the X dimension */ #define BLOCKDIMX 8 /** \brief Size of the CUDA blocks in the Y dimension */ #define BLOCKDIMY 4 /** \brief Size of the CUDA blocks in the Z dimension */ #define BLOCKDIMZ 4 #define STR_HELPER(x) #x #define STR(x) STR_HELPER(x) cudaError_t cudastat; __constant__ int shape_x_, shape_y_, shape_z_, ncell_, nlip_, lmax_, ilmmin_, lmin_, ilmmax_, first_term_, normalization_, ijk_max_; __constant__ double charge_, r_max_; cudaStream_t **streams; int streams_inited = 0; int allocated = 0; extern __shared__ double shared_memory[]; __host__ inline void check_memory(const char *filename, const int line_number) { size_t mem_tot_0 = 0; size_t mem_free_0 = 0; cudaMemGetInfo (&mem_free_0, &mem_tot_0); printf("Free memory after: %ld, total: %ld\n ", mem_free_0, mem_tot_0); } template<typename T> __device__ __forceinline__ T ldg(const T* ptr) { #if __CUDA_ARCH__ >= 350 return __ldg(ptr); #else return *ptr; #endif } void cube_download(double *hstPtr, int width, int height ,int depth, void *devPtr, size_t pitch) { // Define copy "from device to host" parameters cudaMemcpy3DParms d2h={0}; d2h.srcPtr = make_cudaPitchedPtr(devPtr, pitch,width,height); d2h.dstPtr = make_cudaPitchedPtr((void *)hstPtr, width*sizeof(double),width,height); d2h.extent = make_cudaExtent(width * sizeof(double), height, depth); // cudaMemset3D( d2h.srcPtr, 999, d2h.extent); d2h.kind = cudaMemcpyDeviceToHost; // cudastat=cudaMemset3D( d2h.srcPtr, 0, d2h.extent); // Copy to host cudastat = cudaMemcpy3D( &d2h ); check_errors(__FILE__, __LINE__); return; } void cube_upload(double *hstPtr, int *width ,int *height ,int *depth, void *devPtr, size_t pitch) { // Define copy "from host to device" parameters cudaMemcpy3DParms h2d={0}; h2d.srcPtr = make_cudaPitchedPtr((void *)hstPtr, *width*sizeof(double),*width,*height); h2d.dstPtr = make_cudaPitchedPtr(devPtr, pitch,*width,*height); h2d.extent = make_cudaExtent(*width * sizeof(double), *height, *depth); h2d.kind = cudaMemcpyHostToDevice; // Copy to device cudaMemcpy3D( &h2d ); return; } __device__ int icell(double x, double *d, int n){ if ( ( x > d[n] ) || ( x < d[0] ) ) { return -1; } int i[2]; i[0]=0; i[1]=n; int im=(i[0]+i[1])/2; int j; int max=log((float)n)/log(2.)+1; for(j=0;j<max;j++){ i[ x<d[im] ] = im; im=(i[0]+i[1])/2; } return im; } __device__ void calc_rc(double dist_vec[3], double *dist, double ref[3],double x, double y, double z){ dist_vec[X_]=x-ref[X_]; dist_vec[Y_]=y-ref[Y_]; dist_vec[Z_]=z-ref[Z_]; *dist=sqrt(dist_vec[X_]*dist_vec[X_]+ dist_vec[Y_]*dist_vec[Y_]+ dist_vec[Z_]*dist_vec[Z_]); dist_vec[X_]/=*dist; dist_vec[Y_]/=*dist; dist_vec[Z_]/=*dist; return; } __device__ double eval_lip(int n, double *lip, double *f, double x){ short i,j; double out=0.0; for (j=0;j<n;j++){ double tmp=0.0; for (i=0;i<n;i++){ tmp*= x; tmp+= *(lip++); } out+=tmp*f[j]; } return out; } __device__ double eval_poly(int n, double *c, double x){ double r=0.0; while (n-- > 0) { r *= x; r += *(c++); } return r; } /* * the following function precalculates some common values for the injection. * * NOTE: We are setting the cf-array to have 8 * (lmax+1) * (lmax+1) size * This has several advantages (even if we are using more space and have * blank spots in the array). 1) Every cell read is coalesced and we don't * have overlapping requests! Additionally, we avoid divergence of the threads * of one warp in the injection. */ __global__ void calc_cf(Bubble *bub, int offset, int number_of_points, size_t device_f_pitch) { // get the index within this kernel call const int index = blockIdx.x * blockDim.x + threadIdx.x; // get the global index const int id= index + offset; const int icell=id%bub->grid->ncell; const int ilm=id/bub->grid->ncell; const int nlip = bub->grid->nlip; __shared__ double shared_lip[49]; __shared__ double derivative_lip[42]; __shared__ double lower_derivative_lip[30]; __shared__ double cf_results[8*64]; __shared__ double df_results[8*64]; double f_i; // load the Lagrange interpolation polynomials coefficients to // the shared memory if (threadIdx.x < (nlip) * (nlip)) { shared_lip[threadIdx.x] = bub->grid->lip[threadIdx.x]; } if (threadIdx.x < (nlip) * (nlip-1)) { derivative_lip[threadIdx.x] = bub->grid->derivative_lip[threadIdx.x]; } if (threadIdx.x < (nlip-2) * (nlip-1)) { lower_derivative_lip[threadIdx.x] = bub->grid->lower_derivative_lip[threadIdx.x]; } __syncthreads(); if ( index < number_of_points && ilm < ((bub->lmax+1)*(bub->lmax+1)) ) { double *f = bub->f + ilm * device_f_pitch / sizeof(double) + (icell * (bub->grid->nlip-1)); double *cf = bub->cf + ( ilm * bub->grid->ncell + icell ) * 8; double *df = bub->df + ( ilm * bub->grid->ncell + icell ) * 8; short i,j; double one_per_cell_step = 1.0 / bub->grid->h[icell]; double *lip=&shared_lip[0]; double *dlip=&derivative_lip[0]; double *ldlip=&lower_derivative_lip[0]; // set the shared memory result array to zero for (i=0; i < 8; i++) { cf_results[threadIdx.x * 8 + i]=0.0; df_results[threadIdx.x * 8 + i]=0.0; } // evaluate the cf to shared memory for (i=0; i < nlip; i++) { f_i = f[i]; for (j=0; j < nlip ;j++){ cf_results[threadIdx.x * 8 + j] += f_i* (*(lip++)); } // handle the special case of the first cell, where the first // data item most likely is not valid if (icell == 0) { if (i != 0) { for (j = 1 ; j <= nlip-2; j++) { df_results[threadIdx.x * 8 + j] += f_i* (*(ldlip++)); } } else { df_results[threadIdx.x * 8] = 0.0; } } else { for (j=0; j < nlip-1 ;j++) { df_results[threadIdx.x * 8 + j] += f_i* (*(dlip++)); } } } // copy the result to device memory for (i=0; i < 8; i++) { cf[i] = cf_results[threadIdx.x * 8 + i]; df[i] = one_per_cell_step * df_results[threadIdx.x * 8 + i]; } } return; } __device__ inline double evaluate_polynomials(int n, const double* __restrict__ c, const double x){ double result=0.0; while (n-- > 0) { result *= x; result += *(c++); } return result; } //#ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 350 /* * Evaluates one granular polynomial for coefficients, and x * NOTE: each thread is different value for coefficient, when entering the function * NOTE: each x value must be the same for 8 consecutive threads * NOTE: upon return each thread has the same value. */ __inline__ __device__ double evaluate_polynomials_unit_shuffle(double coefficient, const double x) { double result = coefficient; for (int i = 1; i < 7; i++) { result *= x; result += __shfl_down(coefficient, i, 8); } return result; } __inline__ __device__ double evaluate_polynomials_unit_register(const double * __restrict__ coefficients, const double x, int nlip) { double result = 0.0; while (nlip-- > 0) { result *= x; result += *(coefficients++); } return result; } __device__ inline void horizontal_rotate_8f(double coefficients[8], unsigned int order_number) { coefficients[1] = __shfl(coefficients[1], (order_number+1)%8, 8); coefficients[2] = __shfl(coefficients[2], (order_number+2)%8, 8); coefficients[3] = __shfl(coefficients[3], (order_number+3)%8, 8); coefficients[4] = __shfl(coefficients[4], (order_number+4)%8, 8); coefficients[5] = __shfl(coefficients[5], (order_number+5)%8, 8); coefficients[6] = __shfl(coefficients[6], (order_number+6)%8, 8); coefficients[7] = __shfl(coefficients[7], (order_number+7)%8, 8); } __device__ inline void horizontal_rotate_8b(double coefficients[8], unsigned int order_number) { coefficients[1] = __shfl(coefficients[1], (order_number+7)%8, 8); coefficients[2] = __shfl(coefficients[2], (order_number+6)%8, 8); coefficients[3] = __shfl(coefficients[3], (order_number+5)%8, 8); coefficients[4] = __shfl(coefficients[4], (order_number+4)%8, 8); coefficients[5] = __shfl(coefficients[5], (order_number+3)%8, 8); coefficients[6] = __shfl(coefficients[6], (order_number+2)%8, 8); coefficients[7] = __shfl(coefficients[7], (order_number+1)%8, 8); } __device__ inline void vertical_rotate_8(double src[8], unsigned int order_number) { double tmp = src[0]; src[0] = (order_number == 1) ? src[7] : src[0]; src[7] = (order_number == 1) ? src[6] : src[7]; src[6] = (order_number == 1) ? src[5] : src[6]; src[5] = (order_number == 1) ? src[4] : src[5]; src[4] = (order_number == 1) ? src[3] : src[4]; src[3] = (order_number == 1) ? src[2] : src[3]; src[2] = (order_number == 1) ? src[1] : src[2]; src[1] = (order_number == 1) ? tmp : src[1]; src[1] = (order_number == 2) ? src[7] : src[1]; src[0] = (order_number == 2) ? src[6] : src[0]; src[7] = (order_number == 2) ? src[5] : src[7]; src[6] = (order_number == 2) ? src[4] : src[6]; src[5] = (order_number == 2) ? src[3] : src[5]; src[4] = (order_number == 2) ? src[2] : src[4]; src[3] = (order_number == 2) ? src[1] : src[3]; src[2] = (order_number == 2) ? tmp : src[2]; src[2] = (order_number == 3) ? src[7] : src[2]; src[1] = (order_number == 3) ? src[6] : src[1]; src[0] = (order_number == 3) ? src[5] : src[0]; src[7] = (order_number == 3) ? src[4] : src[7]; src[6] = (order_number == 3) ? src[3] : src[6]; src[5] = (order_number == 3) ? src[2] : src[5]; src[4] = (order_number == 3) ? src[1] : src[4]; src[3] = (order_number == 2) ? tmp : src[3]; src[3] = (order_number == 4) ? src[7] : src[3]; src[2] = (order_number == 4) ? src[6] : src[2]; src[1] = (order_number == 4) ? src[5] : src[1]; src[0] = (order_number == 4) ? src[4] : src[0]; src[7] = (order_number == 4) ? src[3] : src[7]; src[6] = (order_number == 4) ? src[2] : src[6]; src[5] = (order_number == 4) ? src[1] : src[5]; src[4] = (order_number == 4) ? tmp : src[4]; src[4] = (order_number == 5) ? src[7] : src[4]; src[3] = (order_number == 5) ? src[6] : src[3]; src[2] = (order_number == 5) ? src[5] : src[2]; src[1] = (order_number == 5) ? src[4] : src[1]; src[0] = (order_number == 5) ? src[3] : src[0]; src[7] = (order_number == 5) ? src[2] : src[7]; src[6] = (order_number == 5) ? src[1] : src[6]; src[5] = (order_number == 5) ? tmp : src[5]; src[5] = (order_number == 6) ? src[7] : src[5]; src[4] = (order_number == 6) ? src[6] : src[4]; src[3] = (order_number == 6) ? src[5] : src[3]; src[2] = (order_number == 6) ? src[4] : src[2]; src[1] = (order_number == 6) ? src[3] : src[1]; src[0] = (order_number == 6) ? src[2] : src[0]; src[7] = (order_number == 6) ? src[1] : src[7]; src[6] = (order_number == 6) ? tmp : src[6]; src[6] = (order_number == 7) ? src[7] : src[6]; src[5] = (order_number == 7) ? src[6] : src[5]; src[4] = (order_number == 7) ? src[5] : src[4]; src[3] = (order_number == 7) ? src[4] : src[3]; src[2] = (order_number == 7) ? src[3] : src[2]; src[1] = (order_number == 7) ? src[2] : src[1]; src[0] = (order_number == 7) ? src[1] : src[0]; src[7] = (order_number == 7) ? tmp : src[7]; } __device__ inline void transpose8(double coefficients[8], int order_number) { //printf("Original coefficients %d: %f, %f, %f, %f, %f, %f, %f, %f\n", order_number, coefficients[0], coefficients[1], coefficients[2], coefficients[3], coefficients[4], coefficients[5], coefficients[6], coefficients[7]); horizontal_rotate_8f(coefficients, order_number); vertical_rotate_8(coefficients, order_number); horizontal_rotate_8b(coefficients, order_number); //printf("Transposed coefficients coefficients %d: %f, %f, %f, %f, %f, %f, %f, %f\n", order_number, coefficients[0], coefficients[1], coefficients[2], coefficients[3], coefficients[4], coefficients[5], coefficients[6], coefficients[7]); } /* * Evaluates the polynomials using shuffle actions. This saves the shared_memory significantly and allows * the increase of the occupancy of the devices. * * This function only needs blockDim.x * 8 bytes of shared memory. This allows the usage of any sized blocks * that are practically useful. * * The number of arithmetic operations is larger than for the version using shared memory only, and thus * the effect to the execution speed remains to be seen. */ __device__ inline double evaluate_polynomials_shuffle(const int address, const double * __restrict__ c, const double x, const int nlip) { double *result = &shared_memory[0]; //double coefficients[8]; //double res; int remainder = threadIdx.x%8; int base_address = 8*(threadIdx.x/8); double res; for (int i = 0; i < 8; i ++) { // evaluate the polynomials // NOTE: __shfl(address, i, width=8) gets the address needed by the thread i/8 in the thread group // NOTE: __shfl(x, i, width = 8) gets the coordinate x of the thread i/8 in the thread group // NOTE: the c access (global memory is coalesced), // NOTE: shared memorybank conflict should not occur, as every thread in the 8 thread group access // the same address, thus resulting in broadcast. //coefficients[i] = c[__shfl(address, i, 8) + remainder]; res = evaluate_polynomials_unit_shuffle( c[__shfl(address, i, 8) + remainder], __shfl(x, i, 8)); if (remainder == 0) result[base_address + i] = res; } // swap the coefficients to be with their rightful owners //transpose8(coefficients, remainder); return result[threadIdx.x]; //return evaluate_polynomials_unit_register(coefficients, x, nlip); } #endif //#endif /* * Get the thread-id within block. */ __device__ inline int getThreadId() { return threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z; } /* * @param c, bubbles coefficients in the global memory * @param x, the coordinate of the point in cell coordinates * * NOTE: The parameter 'c' must be pitched for this function to be useful * NOTE: This function is made for NLIP:7, with other nlip values, the function must be remade */ template<int nlip> __device__ inline double evaluate_polynomials_shared(const int address, const double* __restrict__ c, const double x) { double *coefficients = &shared_memory[0]; //const float *fc = (const float *)c; int threadId = getThreadId(); int remainder = threadId%8; int base_address = 8*(threadId/8); int id = base_address * 7 + remainder; /*int remainder = threadId%16; base_address = 16*(threadId/16); id = base_address * 16 + remainder; int faddress = 2 * address;*/ #if (__CUDA_ARCH__ >= 350) // read the coefficients in the shared memory, 8 threads // neighbouring each other are reading the global memory // coefficients for one thread at the time, starting from 0 // and going to 7 int address_7 = __shfl(address, 7, 8); if (remainder < 7) { coefficients[id] = ldg<double>(&c[__shfl(address, 0, 8) + remainder]); coefficients[id+7] = ldg<double>(&c[__shfl(address, 1, 8) + remainder]); coefficients[id+7*2] = ldg<double>(&c[__shfl(address, 2, 8) + remainder]); coefficients[id+7*3] = ldg<double>(&c[__shfl(address, 3, 8) + remainder]); coefficients[id+7*4] = ldg<double>(&c[__shfl(address, 4, 8) + remainder]); coefficients[id+7*5] = ldg<double>(&c[__shfl(address, 5, 8) + remainder]); coefficients[id+7*6] = ldg<double>(&c[__shfl(address, 6, 8) + remainder]); coefficients[id+7*7] = ldg<double>(&c[address_7 + remainder]); } /*coefficients[id] = c[__shfl(address, 0, 8) + remainder]; coefficients[id+8] = c[__shfl(address, 1, 8) + remainder]; coefficients[id+16] = c[__shfl(address, 2, 8) + remainder]; coefficients[id+24] = c[__shfl(address, 3, 8) + remainder]; coefficients[id+32] = c[__shfl(address, 4, 8) + remainder]; coefficients[id+40] = c[__shfl(address, 5, 8) + remainder]; coefficients[id+48] = c[__shfl(address, 6, 8) + remainder]; coefficients[id+56] = c[__shfl(address, 7, 8) + remainder];*/ /*fcoefficients[id] = fc[__shfl(faddress, 0, 16) + remainder]; fcoefficients[id+16] = fc[__shfl(faddress, 1, 16) + remainder]; fcoefficients[id+32] = fc[__shfl(faddress, 2, 16) + remainder]; fcoefficients[id+48] = fc[__shfl(faddress, 3, 16) + remainder]; fcoefficients[id+64] = fc[__shfl(faddress, 4, 16) + remainder]; fcoefficients[id+80] = fc[__shfl(faddress, 5, 16) + remainder]; fcoefficients[id+96] = fc[__shfl(faddress, 6, 16) + remainder]; fcoefficients[id+112] = fc[__shfl(faddress, 7, 16) + remainder]; fcoefficients[id+128] = fc[__shfl(faddress, 8, 16) + remainder]; fcoefficients[id+144] = fc[__shfl(faddress, 9, 16) + remainder]; fcoefficients[id+160] = fc[__shfl(faddress, 10, 16) + remainder]; fcoefficients[id+176] = fc[__shfl(faddress, 11, 16) + remainder]; fcoefficients[id+192] = fc[__shfl(faddress, 12, 16) + remainder]; fcoefficients[id+208] = fc[__shfl(faddress, 13, 16) + remainder]; fcoefficients[id+224] = fc[__shfl(faddress, 14, 16) + remainder]; fcoefficients[id+240] = fc[__shfl(faddress, 15, 16) + remainder];*/ #else // store the addresses to the shared memory int *address_array = (int *) &shared_memory[8*blockDim.x * blockDim.y * blockDim.z]; address_array[threadIdx.x] = address; coefficients[id] = c[address_array[base_address] + remainder]; coefficients[id+8] = c[address_array[base_address +1] + remainder]; coefficients[id+16] = c[address_array[base_address +2] + remainder]; coefficients[id+24] = c[address_array[base_address +3] + remainder]; coefficients[id+32] = c[address_array[base_address +4] + remainder]; coefficients[id+40] = c[address_array[base_address +5] + remainder]; coefficients[id+48] = c[address_array[base_address +6] + remainder]; coefficients[id+56] = c[address_array[base_address +7] + remainder]; #endif double *coeff = &coefficients[threadId * 7]; double result = coeff[0]; if (nlip > 1) { result *= x; result += coeff[1]; } if (nlip > 2) { result *= x; result += coeff[2]; } if (nlip > 3) { result *= x; result += coeff[3]; } if (nlip > 4) { result *= x; result += coeff[4]; } if (nlip > 5) { result *= x; result += coeff[5]; } if (nlip > 6) { result *= x; result += coeff[6]; } return result; } __device__ inline int calculate_icell(double x, double *d, int n){ if ( ( x > d[n] ) || ( x < d[0] ) ) { return -1; } int i[2]; i[0]=0; i[1]=n; int im=(i[0]+i[1])/2; int j; int max=log((float)n)/log(2.)+1; for(j=0;j<max;j++){ i[ x<d[im] ] = im; im=(i[0]+i[1])/2; } return im; } __device__ inline void calculate_icell_radial(const double x, const double charge, const double r_max, const int ncell, const int nlip, int *icell, double *in_cell_position) { const double dx = r_max/(double)ncell; const double c=8.0*rsqrt(charge)/charge; const double a = r_max + c; *icell = (int)(x * a / ((c + x)*dx)); double x1 = c / (a/((*icell+1) * dx) - 1.0); double x0 = c / (a/(*icell * dx) - 1.0); if (icell == 0) { x0 = 0.0; } double grid_step = (x1-x0) / (nlip-1); double center = (x1+x0) / (2.0); *in_cell_position= (x - center)/grid_step; } inline __device__ void calculate_distance(double &dist_vec_x, double &dist_vec_y, double &dist_vec_z, double &dist, const double reference_point_x, const double reference_point_y, const double reference_point_z, const double x, const double y, const double z){ // calculate the vector relative to reference_point dist_vec_x=x-reference_point_x; dist_vec_y=y-reference_point_y; dist_vec_z=z-reference_point_z; // evaluate the length of the dist_vector, i.e., the distance between dist_vec and reference_point dist=sqrt(dist_vec_x * dist_vec_x + dist_vec_y * dist_vec_y + dist_vec_z * dist_vec_z); return; } /* * Evaluates value of single bubble at a point. This is very similar with the * SolidHarmonics simple evaluation, but the results are multiplied with the * polynomial evaluations */ __device__ inline double Bubbles_evaluate_point_lmin( // x-coordinate relative to the center of the bubble const double &x, // y-coordinate relative to the center of the bubble const double &y, // z-coordinate relative to the center of the bubble const double &z, // relative distance from the center of the bubble const double &distance, // minimum quantum number 'l' const int &lmin, // maximum quantum number 'l' const int &lmax, // number of cells const int &ncell, // number of lagrange integration polyniomials per // cell, i.e., the number of grid points per cell const int &nlip, // position inside the cell const double &r, // k value for the bubble const int &k, // the first address value in bubble for the selected cell const int &address, const double* __restrict__ cf ) { double result = 0.0; int lm_address = address, address2 = address; // NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff // also *cf this should be done const int ncell_nlip = ncell * 8; int l, m, l2; double top = 0.0, bottom = 0.0, new_bottom = 0.0, prev1 = 0.0, prev2 = 0.0, current = 0.0; double multiplier = 0.0, multiplier2 = 0.0, one_per_r = 1.0 / distance; double r2 = x*x+y*y+z*z; l = 0; // set value for l=0, m=0 if (lmin == 0) { //printf("x: %f, y: %f, z: %f, nlip: %d, ncell: %d, l: 0, address: %d, cf: %ld, r: %f\n", x, y, z, nlip, ncell, 0, lm_address, cf, r); //printf("shared_memory address: %ld\n"); //printf("shared memory first value: %f", shared_memory[0]); result = evaluate_polynomials_shared<NLIP>(lm_address, cf, r); } if (lmax >= 1) { l = 1; multiplier = one_per_r; // set value for l=1, m=-1 lm_address += ncell_nlip; if (lmin <= 1) { result += y * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier; } // set all values where m=-1 m = -1; prev1 = y; // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = address + ncell_nlip * 5; multiplier2 = multiplier * one_per_r; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; if (l >= lmin) { result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2; } // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l+2); multiplier2 *= one_per_r; } // set value for l=1, m=0 lm_address += ncell_nlip; if (lmin <= 1) { result += z * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier; } // set all values where m=0 prev1 = z; prev2 = 1.0; m = 0; // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = address + ncell_nlip * 6; multiplier2 = multiplier * one_per_r; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z * prev1; current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; prev2 = prev1; prev1 = current; if (l >= lmin) { result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2; } // add the address2 to get to the next item with m=0 address2 += ncell_nlip * (2*l+2); multiplier2 *= one_per_r; } // set value for l=1, m=1 lm_address += ncell_nlip; if (lmin <= 1) { result += x * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier; } // set all values where m=1 prev1 = x; m = 1; // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = address + ncell_nlip * 7; multiplier2 = multiplier * one_per_r; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; if (l >= lmin) { result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2; } // add the address2 to get to the next item with m=1 address2 += ncell_nlip * (2*l+2); multiplier2 *= one_per_r; } // go through the rest of the stuff bottom = y; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1) top = x; // top refers to solid harmonics value with l=l-1 and m=l-1 lm_address += ncell_nlip; multiplier *= one_per_r; for (l=2; l <= lmax; l++) { new_bottom = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( y*top + x*bottom); if (l >= lmin) { result += new_bottom * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier; } // set all values where m=-l m = -l; prev1 = new_bottom; address2 = lm_address + (2*l+2) * ncell_nlip; multiplier2 = multiplier * one_per_r; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; if (l2 >= lmin) { result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2; } // add the address2 to get to the next item with m=l address2 += ncell_nlip * (2*l2+2); multiplier2 *= one_per_r; } // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l lm_address += 2*l * ncell_nlip; top = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( x*top-y*bottom ); // set all values where m=l m = l; prev1 = top; address2 = lm_address + (2*l+2) * ncell_nlip; multiplier2 = multiplier * one_per_r; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; if (l2 >= lmin) { result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) * multiplier2; } // add the address2 to get to the next item with m=l address2 += ncell_nlip * (2*l2+2); multiplier2 *= one_per_r; } // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top) bottom = new_bottom; if (l >= lmin) { result += top * evaluate_polynomials_shared<NLIP>(lm_address, cf, r) * multiplier; } // get next address lm_address += ncell_nlip; multiplier *= one_per_r; } } // multiply the result with r^k, if k is not 0 // the distance is not too close to 0.0 as this is checked // earlier in this function if (k != 0 && distance > 1e-12) { result *= pow(distance, (double)k); } if (distance < 1e-8) { result = 1.0 * cf[0]; //evaluate_polynomials(nlip, &cf[address], r); } return result; } /* * (int nlip, int ncell, int l, int address, double *c, const double x) * Evaluates the value of gradient of a single bubble at a point. This is very similar with the * SolidHarmonics simple evaluation, but the results are multiplied with the * polynomial evaluations and summed together. */ template <bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z > __device__ inline void Bubbles_evaluate_gradient_point( // x-coordinate relative to the center of the bubble const double &x, // y-coordinate relative to the center of the bubble const double &y, // z-coordinate relative to the center of the bubble const double &z, // relative distance from the center of the bubble const double &distance, // maximum quantum number 'l' const int &lmax, // number of cells const int &ncell, // number of lagrange integration polyniomials per // cell, i.e., the number of grid points per cell const int &nlip, // position inside the cell const double &r, // k value for the bubble const int &k, // the first address value in bubble for the selected cell const int &address, // constant pointer to a variable double array const double* __restrict__ cf, // constant pointer to a derivative variable double array const double* __restrict__ df, // if only the l = 0 is evaluated const bool only_spherical, // result double result[3] ) { int lm_address = address, address2; // NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff // also *cf this should be done const int ncell_nlip = ncell * 8; int l, l2; double top, bottom, new_bottom, prev1, prev2, current, current_gradient[3], prev1_gradient[3], prev2_gradient[3], bottom_gradient[3], new_bottom_gradient, top_gradient[3]; double one_per_r = 1.0 / distance;; double one_per_r_gradient[3] = {(-x) * one_per_r * one_per_r, (-y) * one_per_r * one_per_r, (-z) * one_per_r * one_per_r}; l = 0; // set value for l=0, m=0 double radial_value, radial_derivative; radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address, df, r); if (evaluate_gradients_x) result[X_] = radial_derivative * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] = radial_derivative * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] = radial_derivative * z;// * one_per_r; if (distance >= 0.0 && distance < 1e-12) { one_per_r = 0.0; if (evaluate_gradients_x) one_per_r_gradient[X_] = 0.0; if (evaluate_gradients_y) one_per_r_gradient[Y_] = 0.0; if (evaluate_gradients_z) one_per_r_gradient[Z_] = 0.0; if (evaluate_gradients_x) result[X_] = 0.0; //radial_derivative; if (evaluate_gradients_y) result[Y_] = 0.0; //radial_derivative; if (evaluate_gradients_z) result[Z_] = 0.0;//radial_derivative; } /*if (only_spherical) { one_per_r = 0.0; if (evaluate_gradients_x) one_per_r_gradient[X_] = 0.0; if (evaluate_gradients_y) one_per_r_gradient[Y_] = 0.0; if (evaluate_gradients_z) one_per_r_gradient[Z_] = 0.0; }*/ if (lmax >= 1) { // set all values where m=-1 prev1 = y * one_per_r; if (evaluate_gradients_x) prev1_gradient[X_] = one_per_r_gradient[X_] * y; if (evaluate_gradients_y) prev1_gradient[Y_] = 1.0 + one_per_r_gradient[Y_] * y; if (evaluate_gradients_z) prev1_gradient[Z_] = one_per_r_gradient[Z_] * y; // set value for l=1, m=-1 radial_value = evaluate_polynomials_shared<NLIP>(address+ncell_nlip, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+ncell_nlip, df, r); if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r; //if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., y/r: %e\n", radial_value, radial_derivative, prev1); //if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r); //if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r); //if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r); // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = address + ncell_nlip * 5; for (l = 2; l <= lmax; l++) { double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l-1)*(l+1)) ); current = a * z*prev1 * one_per_r; if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]); if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]); if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]); if (l > 2) { double b = sqrt( (double)((l-2)*(l)) / (double)((l-1)*(l+1)) ); current -= b * prev2; if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_]; if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_]; if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_]; } radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r); if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r; prev2 = prev1; if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_]; if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_]; if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_]; prev1 = current; if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_]; // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l+2); } prev2 = 1.0; if (evaluate_gradients_x) prev2_gradient[X_] = 0.0; if (evaluate_gradients_y) prev2_gradient[Y_] = 0.0; if (evaluate_gradients_z) prev2_gradient[Z_] = 0.0; // set all values where m=0 prev1 = z * one_per_r; if (evaluate_gradients_x) prev1_gradient[X_] = one_per_r_gradient[X_] * z; if (evaluate_gradients_y) prev1_gradient[Y_] = one_per_r_gradient[Y_] * z; if (evaluate_gradients_z) prev1_gradient[Z_] = 1.0 + one_per_r_gradient[Z_] * z; // set value for l=1, m=0 radial_value = evaluate_polynomials_shared<NLIP>(address+2*ncell_nlip, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+2*ncell_nlip, df, r); if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r; //if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., z/r: %e\n", radial_value, radial_derivative, prev1); //if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r); //if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r); //if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r); // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = address + ncell_nlip * 6; for (l = 2; l <= lmax; l++) { double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l)*(l)) ); double b = sqrt( (double)((l-1)*(l-1)) / (double)((l)*(l)) ); current = a * z * prev1 * one_per_r; if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]); if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]); if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]); current -= b * prev2; if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_]; if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_]; if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_]; radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r); if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r; prev2 = prev1; if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_]; if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_]; if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_]; prev1 = current; if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_]; // add the address2 to get to the next item with m=0 address2 += ncell_nlip * (2*l+2); } // set all values where m=1 prev1 = x * one_per_r; if (evaluate_gradients_x) prev1_gradient[X_] = 1.0 + one_per_r_gradient[X_] * x; if (evaluate_gradients_y) prev1_gradient[Y_] = one_per_r_gradient[Y_] * x; if (evaluate_gradients_z) prev1_gradient[Z_] = one_per_r_gradient[Z_] * x; // set value for l=1, m=1 radial_value = evaluate_polynomials_shared<NLIP>(address+3*ncell_nlip, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address+3*ncell_nlip, df, r); if (evaluate_gradients_x) result[X_] += radial_value * prev1_gradient[X_] + radial_derivative * prev1 * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * prev1_gradient[Y_] + radial_derivative * prev1 * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * prev1_gradient[Z_] + radial_derivative * prev1 * z;// * one_per_r; //if (only_spherical) printf("radial_value: %e, radial_derivative: %e, prev1, i.e., x/r: %e\n", radial_value, radial_derivative, prev1); //if (only_spherical && evaluate_gradients_x) printf("prev1-gradient-x: %e, x/r: %e\n", prev1_gradient[X_], x * one_per_r); //if (only_spherical && evaluate_gradients_y) printf("prev1-gradient-y: %e, y/r: %e\n", prev1_gradient[Y_], y * one_per_r); //if (only_spherical && evaluate_gradients_z) printf("prev1-gradient-z: %e, z/r: %e\n", prev1_gradient[Z_], z * one_per_r); // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = address + ncell_nlip * 7; for (l = 2; l <= lmax; l++) { double a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+1)*(l-1)) ); current = a * z*prev1 * one_per_r; if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]); if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]); if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]); if (l > 2) { double b = sqrt( (double)((l)*(l-2)) / (double)((l+1)*(l-1)) ); current -= b * prev2; if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_]; if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_]; if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_]; } radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r); if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r; prev2 = prev1; if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_]; if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_]; if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_]; prev1 = current; if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_]; // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l+2); } // go through the rest of the stuff bottom = y * one_per_r; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1) if (evaluate_gradients_x) bottom_gradient[X_] = one_per_r_gradient[X_] * y; if (evaluate_gradients_y) bottom_gradient[Y_] = 1.0 + one_per_r_gradient[Y_] * y; if (evaluate_gradients_z) bottom_gradient[Z_] = one_per_r_gradient[Z_] * y; top = x * one_per_r; // top refers to solid harmonics value with l=l-1 and m=l-1 if (evaluate_gradients_x) top_gradient[X_] = 1.0 + one_per_r_gradient[X_] * x; if (evaluate_gradients_y) top_gradient[Y_] = one_per_r_gradient[Y_] * x; if (evaluate_gradients_z) top_gradient[Z_] = one_per_r_gradient[Z_] * x; lm_address += 4 * ncell_nlip; for (l=2; l <= lmax; l++) { double c = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)); new_bottom = c * one_per_r * ( y*top + x*bottom); // get the gradients to x direction if (evaluate_gradients_x) new_bottom_gradient = c * (one_per_r_gradient[X_] * (y * top + x * bottom) + one_per_r * (y * top_gradient[X_] + x * bottom_gradient[X_] + bottom)) ; if (evaluate_gradients_x) top_gradient[X_] = c * (one_per_r_gradient[X_] * (x * top - y * bottom) + one_per_r * (x * top_gradient[X_] + top - y * bottom_gradient[X_])); if (evaluate_gradients_x) bottom_gradient[X_] = new_bottom_gradient; // get the gradients to y direction if (evaluate_gradients_y) new_bottom_gradient = c * (one_per_r_gradient[Y_] * (y * top + x * bottom) + one_per_r * (y * top_gradient[Y_] + top + x * bottom_gradient[Y_])); if (evaluate_gradients_y) top_gradient[Y_] = c * (one_per_r_gradient[Y_] * (x * top - y * bottom) + one_per_r * (x * top_gradient[Y_] - y * bottom_gradient[Y_] - bottom)); if (evaluate_gradients_y) bottom_gradient[Y_] = new_bottom_gradient; // get the gradients to z direction if (evaluate_gradients_z) new_bottom_gradient = c * (one_per_r_gradient[Z_] * (y * top + x * bottom) + one_per_r * (y * top_gradient[Z_] + x * bottom_gradient[Z_])); if (evaluate_gradients_z) top_gradient[Z_] = c * (one_per_r_gradient[Z_] * (x * top - y * bottom) + one_per_r * (x * top_gradient[Z_] - y * bottom_gradient[Z_])); if (evaluate_gradients_z) bottom_gradient[Z_] = new_bottom_gradient; top = c * one_per_r * ( x*top-y*bottom ); // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top previously, so we // have to sacrifice one register temporarily) bottom = new_bottom; radial_value = evaluate_polynomials_shared<NLIP>(lm_address, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address, df, r); // get value for l=l, m=-l. if (evaluate_gradients_x) result[X_] += radial_value * bottom_gradient[X_] + radial_derivative * bottom * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * bottom_gradient[Y_] + radial_derivative * bottom * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * bottom_gradient[Z_] + radial_derivative * bottom * z;// * one_per_r; radial_value = evaluate_polynomials_shared<NLIP>(lm_address + 2*l * ncell_nlip, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(lm_address + 2*l * ncell_nlip, df, r); // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l if (evaluate_gradients_x) result[X_] += radial_value * top_gradient[X_] + radial_derivative * top * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * top_gradient[Y_] + radial_derivative * top * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * top_gradient[Z_] + radial_derivative * top * z;// * one_per_r; // set all values where m=-l prev1 = bottom; if (evaluate_gradients_x) prev1_gradient[X_] = bottom_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = bottom_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = bottom_gradient[Z_]; address2 = lm_address + (2*l+2) * ncell_nlip; for (l2 = l+1; l2 <= lmax; l2++) { // evaluate spherical harmonics for l=l2, m=-l double a = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2-l)*(l2+l)) ); current = a * z*prev1 * one_per_r; if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]); if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]); if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]); if (l2 > l+1) { double b = sqrt( (double)((l2-l-1)*(l2+l-1)) / (double)((l2-l)*(l2+l)) ); current -= b * prev2; if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_]; if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_]; if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_]; } radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r); if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r; prev2 = prev1; if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_]; if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_]; if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_]; prev1 = current; if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_]; // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l2+2); } // set all values where m=l lm_address += 2*l * ncell_nlip; prev1 = top; if (evaluate_gradients_x) prev1_gradient[X_] = top_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = top_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = top_gradient[Z_]; address2 = lm_address + (2*l+2) * ncell_nlip; for (l2 = l+1; l2 <= lmax; l2++) { // evaluate spherical harmonics for l=l2, m=l double a = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+l)*(l2-l)) ); current = a * z*prev1 * one_per_r; if (evaluate_gradients_x) current_gradient[X_] = a *(z * prev1 * one_per_r_gradient[X_] + z * one_per_r * prev1_gradient[X_]); if (evaluate_gradients_y) current_gradient[Y_] = a *(z * prev1 * one_per_r_gradient[Y_] + z * one_per_r * prev1_gradient[Y_]); if (evaluate_gradients_z) current_gradient[Z_] = a *(z * prev1 * one_per_r_gradient[Z_] + prev1 + z * one_per_r * prev1_gradient[Z_]); if (l2 > l+1) { double b = sqrt( (double)((l2+l-1)*(l2-l-1)) / (double)((l2+l)*(l2-l)) ); current -= b * prev2; if (evaluate_gradients_x) current_gradient[X_] -= b * prev2_gradient[X_]; if (evaluate_gradients_y) current_gradient[Y_] -= b * prev2_gradient[Y_]; if (evaluate_gradients_z) current_gradient[Z_] -= b * prev2_gradient[Z_]; } radial_value = evaluate_polynomials_shared<NLIP>(address2, cf, r); radial_derivative = evaluate_polynomials_shared<NLIP-1>(address2, df, r); if (evaluate_gradients_x) result[X_] += radial_value * current_gradient[X_] + radial_derivative * current * x;// * one_per_r; if (evaluate_gradients_y) result[Y_] += radial_value * current_gradient[Y_] + radial_derivative * current * y;// * one_per_r; if (evaluate_gradients_z) result[Z_] += radial_value * current_gradient[Z_] + radial_derivative * current * z;// * one_per_r; prev2 = prev1; if (evaluate_gradients_x) prev2_gradient[X_] = prev1_gradient[X_]; if (evaluate_gradients_y) prev2_gradient[Y_] = prev1_gradient[Y_]; if (evaluate_gradients_z) prev2_gradient[Z_] = prev1_gradient[Z_]; prev1 = current; if (evaluate_gradients_x) prev1_gradient[X_] = current_gradient[X_]; if (evaluate_gradients_y) prev1_gradient[Y_] = current_gradient[Y_]; if (evaluate_gradients_z) prev1_gradient[Z_] = current_gradient[Z_]; // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l2+2); } // get next address lm_address += ncell_nlip; } } result[X_] *= one_per_r; result[Y_] *= one_per_r; result[Z_] *= one_per_r; // multiply the result with r^k, if k is not 0 // the distance is not too close to 0.0 as this is checked // earlier in this function, NOTE: should never happen, thus // commented away //if (k != 0 && distance > 1e-12) { /*for (int i = 0; i < k; i ++) { result *= distance; } for (int i = 0; i < -k; i ++) { result *= one_per_r; }*/ //} if (distance < 1e-12) { result[X_] = 0.0; // * evaluate_polynomials_shared<NLIP-1>(address, df, r); result[Y_] = 0.0; result[Z_] = 0.0; } } /* * Evaluates value of single bubble at a point. This is very similar with the * SolidHarmonics simple evaluation, but the results are multiplied with the * polynomial evaluations */ __device__ inline double Bubbles_evaluate_point( // x-coordinate relative to the center of the bubble const double &x, // y-coordinate relative to the center of the bubble const double &y, // z-coordinate relative to the center of the bubble const double &z, // relative distance from the center of the bubble const double &distance, // maximum quantum number 'l' const int &lmax, // number of cells const int &ncell, // number of lagrange integration polyniomials per // cell, i.e., the number of grid points per cell const int &nlip, // position inside the cell const double &r, // k value for the bubble const int &k, // the first address value in bubble for the selected cell const int &address, // constant pointer to a variable double array const double* __restrict__ cf ) { double result = 0.0; int lm_address = address, address2; // NOTE: Here the nlip is replaced by 8 because this gives advantages in loading the stuff // also *cf this should be done const int ncell_nlip = ncell * 8; int l, l2; double top, bottom, new_bottom, prev1, prev2, current, a, b, a2; const double one_per_r = 1.0 / distance;; l = 0; // set value for l=0, m=0 //printf("x: %f, y: %f, z: %f, nlip: %d, ncell: %d, l: 0, address: %d, cf: %ld, r: %f\n", x, y, z, nlip, ncell, 0, lm_address, cf, r); //printf("shared_memory address: %ld\n"); //printf("shared memory first value: %f", shared_memory[0]); result = evaluate_polynomials_shared<NLIP>(lm_address, cf, r); if (lmax >= 1) { // set value for l=1, m=-1 result += y * evaluate_polynomials_shared<NLIP>(address+ncell_nlip, cf, r) * one_per_r; // set value for l=1, m=0 result += z * evaluate_polynomials_shared<NLIP>(address+2*ncell_nlip, cf, r) * one_per_r; // set value for l=1, m=1 result += x * evaluate_polynomials_shared<NLIP>(address+3*ncell_nlip, cf, r) * one_per_r; // set all values where m=-1 prev2 = 0.0; prev1 = y * one_per_r; // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = address + ncell_nlip * 5; l = threadIdx.x % 32; a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l-1)*(l+1)) ); b = (l > 2) ? sqrt( (double)((l-2)*(l)) / (double)((l-1)*(l+1)) ) : 0.0; for (l = 2; l <= lmax; l++) { current = __shfl(a, l) * z*prev1 * one_per_r - __shfl(b, l) * prev2; result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r) ; prev2 = prev1; prev1 = current; // add the address2 to get to the next item with m=-1 address2 += ncell_nlip * (2*l+2); } // set all values where m=0 prev1 = z * one_per_r; prev2 = 1.0; // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = address + ncell_nlip * 6; l = threadIdx.x % 32; a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l)*(l)) ); b = sqrt( (double)((l-1)*(l-1)) / (double)((l)*(l)) ); for (l = 2; l <= lmax; l++) { current = __shfl(a, l) * z * prev1 * one_per_r - __shfl(b, l) * prev2; result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r); prev2 = prev1; prev1 = current; // add the address2 to get to the next item with m=0 address2 += ncell_nlip * (2*l+2); } // set all values where m=1 prev1 = x * one_per_r; // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = address + ncell_nlip * 7; l = threadIdx.x % 32; a = ( 2.0*(double)l-1.0) * rsqrt( 1.0*(double)((l+1)*(l-1)) ); b = (l > 2) ? sqrt( (double)((l)*(l-2)) / (double)((l+1)*(l-1)) ) : 0.0; for (l = 2; l <= lmax; l++) { current = __shfl(a, l) * z*prev1 * one_per_r - __shfl(b, l) * prev2; result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r); prev2 = prev1; prev1 = current; // add the address2 to get to the next item with m=1 address2 += ncell_nlip * (2*l+2); } // go through the rest of the stuff bottom = y * one_per_r; // bottom refers to spherical harmonics value with l=l-1 and m=-(l-1) top = x * one_per_r; // top refers to spherical harmonics value with l=l-1 and m=l-1 lm_address += 4 * ncell_nlip; l = threadIdx.x % 32; a = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)); for (l=2; l <= lmax; l++) { new_bottom = __shfl(a, l) * one_per_r * ( y*top + x*bottom); top = __shfl(a, l) * one_per_r * ( x*top - y*bottom ); // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top previously, so we // have to sacrifice one register temporarily) bottom = new_bottom; result += bottom * evaluate_polynomials_shared<NLIP>(lm_address, cf, r); // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l result += top * evaluate_polynomials_shared<NLIP>(lm_address + 2*l * ncell_nlip, cf, r); // set all values where m=-l prev2 = 0.0; prev1 = bottom; address2 = lm_address + (2*l+2) * ncell_nlip; // set all values where m=l lm_address += 2*l * ncell_nlip; l2 = threadIdx.x % 32; a2 = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2-l)*(l2+l)) ); b = (l2 > l+1) ? sqrt( (double)((l2-l-1)*(l2+l-1)) / (double)((l2-l)*(l2+l)) ) : 0.0; for (l2 = l+1; l2 <= lmax; l2++) { // evaluate spherical harmonics for l=l2, m=-l current = __shfl(a2, l2) * z*prev1 * one_per_r - __shfl(b, l2) * prev2; result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r); prev2 = prev1; prev1 = current; // add the address2 to get to the next item with m=-l address2 += ncell_nlip * (2*l2+2); } prev2 = 0.0; prev1 = top; address2 = lm_address + (2*l+2) * ncell_nlip; l2 = threadIdx.x % 32; a2 = ( 2.0*(double)l2-1.0) * rsqrt( 1.0*(double)((l2+l)*(l2-l)) ) ; b = (l2 > l+1) ? sqrt( (double)((l2+l-1)*(l2-l-1)) / (double)((l2+l)*(l2-l)) ) : 0.0; for (l2 = l+1; l2 <= lmax; l2++) { // evaluate spherical harmonics for l=l2, m=l current = __shfl(a2, l2) * z*prev1 * one_per_r - __shfl(b, l2) * prev2; // the latter term will go to zero, if l2 <= l+1 result += current * evaluate_polynomials_shared<NLIP>(address2, cf, r); prev2 = prev1; prev1 = current; // add the address3 to get to the next item with m=l address2 += ncell_nlip * (2*l2+2); } // get next address lm_address += ncell_nlip; } } // multiply the result with r^k, if k is not 0 // the distance is not too close to 0.0 as this is checked // earlier in this function, NOTE: should never happen, thus // commented away //if (k != 0 && distance > 1e-12) { if (distance < 1e-14) { result = 1.0 * evaluate_polynomials_shared<NLIP>(address, cf, r); } for (int i = 0; i < k; i ++) { result *= distance; } for (int i = 0; i < -k; i ++) { result *= one_per_r; } //} return result; } __device__ int getGlobalIdx_1D_1D() { int id=threadIdx.x + blockIdx.x * blockDim.x; return id; } __device__ int getGlobalIdx_3D_3D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } /* * Get the minimum/maximum and overwrite values with -1 */ __device__ inline void minmax(int *first, int *second) { int temp; if (*first == -1) { *first = *second; } if (*second == -1) { *second = *first; } if (*second < *first) { temp = *second; *second = *first; *first = temp; } } /* * Find the minimum and maximum in array that is as large as a block, and store them as the first * and last value of the input array. NOTE: The arrayLength must be a power of 2. */ __device__ void calculateMinimumMaximum(int *array, int blockThreadId, int arrayLength) { int division = arrayLength / 2; // order so that the larger values of pairs are at the second part of the array // and the smaller are at the end of the array if (blockThreadId < division) { // rearrange the values so that the larger is in the &array[blockThreadId + division] // and smaller is in &array[blockThreadId] minmax(&array[blockThreadId], &array[blockThreadId + division]); } __syncthreads(); division = arrayLength / 4; // if the block while (division >= 1) { if (blockThreadId < division) { minmax(&array[blockThreadId], &array[blockThreadId + division]); } else if (blockThreadId > arrayLength - division) { minmax(&array[blockThreadId - division], &array[blockThreadId]); } division /= 2; __syncthreads(); } } /* * Evaluate Bubbles on a grid * */ template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z > __device__ inline void Bubbles_evaluate_grid(const Bubble* __restrict__ bubble, double* __restrict__ cube, double* __restrict__ gradient_cube_x, double* __restrict__ gradient_cube_y, double* __restrict__ gradient_cube_z, const double* __restrict__ grid_points_x, const double* __restrict__ grid_points_y, const double* __restrict__ grid_points_z, const int shape_x, const int shape_y, const int shape_z, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int k, const int slice_offset, const size_t pitch, const int memory_y_shape, const int slice_count, const int lmin, const double multiplier) { // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. int x, y, z; getXYZ(&x, &y, &z); // get the offset from the input cube pointer const int id = getCubeOffset3D(x, y, z, pitch, memory_y_shape); double value, gradient[3]; double in_cell_position = 0.0; const int ncell = bubble->grid->ncell, nlip = bubble->grid->nlip; int icell; double relative_position_x, relative_position_y, relative_position_z, distance; //printf("X: %f, cell_spacing: %f, ncell: %d", distance, bubble->cell_spacing, ncell); // Check that the point is within the block if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) { // calculate relative position to the zero-point and distance to it calculate_distance(relative_position_x, relative_position_y, relative_position_z, distance, zero_point_x, zero_point_y, zero_point_z, grid_points_x[x], ldg<double>(&grid_points_y[y]), ldg<double>(&grid_points_z[z+slice_offset])); // get the order number of cell the point resides in //icell = calculate_icell(distance, bubble->d, bubble->ncell); calculate_icell_radial(distance, bubble->charge, bubble->grid->r_max, ncell, nlip, &icell, &in_cell_position); //printf("x: %d, y: %d, z:%d, id:%d, vector_id: %d, vector_offset:%d, blockId: %d, blocks_per_vector: %d, %f, %f, %f, %d\n", x, y, z, id, vector_id, vector_offset, blockIdx.x, blocks_per_vector, grid_points_x[x], ldg(&grid_points_y[y]), ldg(&grid_points_z[z]), icell); } else { icell = 1; distance = 0.1; } if (lmin_zero) { // calculate the bubble value for the point with lmin = 0 if (evaluate_value) { value = Bubbles_evaluate_point( relative_position_x, relative_position_y, relative_position_z, distance, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf); } // evaluate gradients if we are evaluating any if (evaluate_gradients_x || evaluate_gradients_y || evaluate_gradients_z) { Bubbles_evaluate_gradient_point <evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z> (relative_position_x, relative_position_y, relative_position_z, distance, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf, bubble->df, false, gradient ); } } else { if (evaluate_value) { // calculate the bubble value for the point with lmin > 0 value = Bubbles_evaluate_point_lmin( relative_position_x, relative_position_y, relative_position_z, distance, lmin, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf ); } } if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count && icell < ncell) { /*if (x == 0 && y == 0) { printf("%d: [x, y, z], id : [%d, %d, %d], %d, icell: %d, in_cell_position:%f, first_bubble-value:%e, distance:%f, coord: [%f, %f, %f] old-value: %e, value: %e, multiplier: %f\n", slice_offset, x, y, z+slice_offset, id, icell, in_cell_position, bubble->cf[icell*8], distance, relative_position_x, relative_position_y, relative_position_z, cube[id], value, multiplier); }*/ if (evaluate_value) cube[id] += multiplier * value; if (evaluate_gradients_x) gradient_cube_x[id] += multiplier * gradient[X_]; if (evaluate_gradients_y) gradient_cube_y[id] += multiplier * gradient[Y_]; if (evaluate_gradients_z) gradient_cube_z[id] += multiplier * gradient[Z_]; } return; } /* * Evaluate Bubbles on a grid */ __global__ void #if (__CUDA_ARCH__ <= 350) __launch_bounds__(128, 6) #else __launch_bounds__(256) #endif Bubbles_evaluate_grid_lmin(const Bubble* __restrict__ bubble, double* __restrict__ cube, const double* __restrict__ grid_points_x, const double* __restrict__ grid_points_y, const double* __restrict__ grid_points_z, const int shape_x, const int shape_y, const int shape_z, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int k, const int slice_offset, const size_t pitch, const int memory_y_shape, const int slice_count, const int lmin, const double multiplier) { Bubbles_evaluate_grid <false, true, false, false, false> ( bubble, cube, /*gradient_cube_x = */NULL, /*gradient_cube_y = */NULL, /*gradient_cube_z = */NULL, grid_points_x, grid_points_y, grid_points_z, shape_x, shape_y, shape_z, zero_point_x, zero_point_y, zero_point_z, k, slice_offset, pitch, memory_y_shape, slice_count, lmin, multiplier); } __global__ void #if (__CUDA_ARCH__ > 350) __launch_bounds__(256) #else __launch_bounds__(128, 8) #endif Bubbles_evaluate_grid_pitched(const Bubble* __restrict__ bubble, double* __restrict__ cube, const double* __restrict__ grid_points_x, const double* __restrict__ grid_points_y, const double* __restrict__ grid_points_z, const int shape_x, const int shape_y, const int shape_z, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int k, const int slice_offset, const size_t pitch, const int memory_y_shape, const int slice_count, const double multiplier) { Bubbles_evaluate_grid <true, true, false, false, false> ( bubble, cube, /*gradient_cube_x = */NULL, /*gradient_cube_y = */NULL, /*gradient_cube_z = */NULL, grid_points_x, grid_points_y, grid_points_z, shape_x, shape_y, shape_z, zero_point_x, zero_point_y, zero_point_z, k, slice_offset, pitch, memory_y_shape, slice_count, /*lmin = */0, multiplier); } template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z > __global__ void #if (__CUDA_ARCH__ > 350) __launch_bounds__(256) #else __launch_bounds__(128, 5) #endif Bubbles_evaluate_grid_gradients(const Bubble* __restrict__ bubble, double* __restrict__ cube, double* __restrict__ gradient_cube_x, double* __restrict__ gradient_cube_y, double* __restrict__ gradient_cube_z, const double* __restrict__ grid_points_x, const double* __restrict__ grid_points_y, const double* __restrict__ grid_points_z, const int shape_x, const int shape_y, const int shape_z, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int k, const int slice_offset, const size_t pitch, const int memory_y_shape, const int slice_count, const double multiplier) { Bubbles_evaluate_grid <lmin_zero, evaluate_value, evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z> ( bubble, cube, gradient_cube_x, gradient_cube_y, gradient_cube_z, grid_points_x, grid_points_y, grid_points_z, shape_x, shape_y, shape_z, zero_point_x, zero_point_y, zero_point_z, k, slice_offset, pitch, memory_y_shape, slice_count, /*lmin = */0, multiplier); } /* * Evaluate Bubbles at points */ template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z> __device__ inline void Bubbles_evaluate_points(const Bubble* __restrict__ bubble, double* __restrict__ result_array, double* __restrict__ device_gradients_x, double* __restrict__ device_gradients_y, double* __restrict__ device_gradients_z, // a 3d array, where the x coordinates are first, // then y coordinates, and finally the z coordinates. This ordering // is selected to get coalesced memory reads const double* __restrict__ points, // total number of points evaluated by this device const int device_number_of_points, // the zero point x-coordinate of bubbles const double zero_point_x, // the zero point y-coordinate of bubbles const double zero_point_y, // the zero point z-coordinate of bubbles const double zero_point_z, // the k value of the bubbles const int k, // the lmin value evaluated const int lmin, // number of points in this kernel call const int point_count, // device_point_offset const int device_point_offset, const double multiplier ) { // Get the point order number within this kernel call int id = blockIdx.x * blockDim.x + threadIdx.x; double value, gradient[3]; double in_cell_position = 0.0; const int ncell = bubble->grid->ncell, nlip = bubble->grid->nlip; int icell = -1; double relative_position_x, relative_position_y, relative_position_z, distance, r_max = bubble->grid->r_max; //printf("X: %f, cell_spacing: %f, ncell: %d", distance, bubble->cell_spacing, ncell); // Check that the point is within the block if (id + device_point_offset < device_number_of_points && id < point_count ) { // calculate relative position to the zero-point and distance to it calculate_distance(relative_position_x, relative_position_y, relative_position_z, distance, zero_point_x, zero_point_y, zero_point_z, points[id + device_point_offset], points[id + device_point_offset + device_number_of_points], points[id + device_point_offset + device_number_of_points*2]); // get the order number of cell the point resides in calculate_icell_radial(distance, bubble->charge, bubble->grid->r_max, ncell, nlip, &icell, &in_cell_position); } else { icell = 1; distance = 0.1; } // calculate the bubble value for the point if (!lmin_zero) { if (evaluate_value) { value = Bubbles_evaluate_point_lmin( relative_position_x, relative_position_y, relative_position_z, distance, lmin, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf ); } } else { if (evaluate_gradients_x || evaluate_gradients_y || evaluate_gradients_z) { Bubbles_evaluate_gradient_point <evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z> (relative_position_x, relative_position_y, relative_position_z, distance, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf, bubble->df, false, //(evaluate_gradients_x != evaluate_gradients_y || evaluate_gradients_x != evaluate_gradients_z) && icell == 0, //evaluate_gradients_x != evaluate_gradients_y || evaluate_gradients_x != evaluate_gradients_z, gradient ); } if (evaluate_value) { value = Bubbles_evaluate_point( relative_position_x, relative_position_y, relative_position_z, distance, bubble->lmax, ncell, nlip, in_cell_position, k, icell * 8, bubble->cf ); } } // store the result to the result array if (id + device_point_offset < device_number_of_points && id < point_count && distance < r_max && icell < ncell ) { if (evaluate_value) result_array[id+device_point_offset] += multiplier * value; //if ((evaluate_gradients_x) && (id + device_point_offset <= 7)) printf("¤%¤%¤%%#¤%#¤ X: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[X_], device_gradients_x[id+device_point_offset]); //if ((evaluate_gradients_y) && (id + device_point_offset <= 7)) printf("¤%¤%¤%%#¤%#¤ Y: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[Y_], device_gradients_y[id+device_point_offset]); //if ((evaluate_gradients_z) && (id + device_point_offset <= 7)) printf("¤%¤%¤%%#¤%#¤ Z: %d: pos: %f, %f, %f, val: %e, remainder: %e\n", id + device_point_offset, relative_position_x, relative_position_y, relative_position_z, gradient[Z_], device_gradients_z[id+device_point_offset]); // add also the gradient value, if we are evaluating them if (evaluate_gradients_x) device_gradients_x[id+device_point_offset] += multiplier * gradient[X_]; if (evaluate_gradients_y) device_gradients_y[id+device_point_offset] += multiplier * gradient[Y_]; if (evaluate_gradients_z) device_gradients_z[id+device_point_offset] += multiplier * gradient[Z_]; } return; } __device__ inline double get_damping_factor(double r) { double result; // erfc: error function if (r > 1e-12) { result = 0.5*erfc(r-2.0/r); } else { result = 1.0; } return result; } template <bool lmin_zero, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z > #if (__CUDA_ARCH__ <= 350) __launch_bounds__(128, 4) #else __launch_bounds__(256) #endif __global__ void Bubbles_evaluate_gradient_points( const Bubble* __restrict__ bubble, double* __restrict__ result_array, double* __restrict__ device_gradients_x, double* __restrict__ device_gradients_y, double* __restrict__ device_gradients_z, // a 3d array, where the x coordinates are first, // then y coordinates, and finally the z coordinates. This ordering // is selected to get coalesced memory reads const double* __restrict__ points, // total number of points evaluated by this device const int device_number_of_points, // the zero point x-coordinate of bubbles const double zero_point_x, // the zero point y-coordinate of bubbles const double zero_point_y, // the zero point z-coordinate of bubbles const double zero_point_z, // the k value of the bubbles const int k, // number of points in this kernel call const int point_count, // device_point_offset const int device_point_offset, const double multiplier ) { Bubbles_evaluate_points<lmin_zero, evaluate_value, evaluate_gradients_x, evaluate_gradients_y, evaluate_gradients_z>( bubble, result_array, device_gradients_x, device_gradients_y, device_gradients_z, points, device_number_of_points, zero_point_x, zero_point_y, zero_point_z, k, 0, point_count, device_point_offset, multiplier ); } #if (__CUDA_ARCH__ <= 350) __launch_bounds__(128, 7) #else __launch_bounds__(256) #endif __global__ void Bubbles_evaluate_points_simple( const Bubble* __restrict__ bubble, double* __restrict__ result_array, // a 3d array, where the x coordinates are first, // then y coordinates, and finally the z coordinates. This ordering // is selected to get coalesced memory reads const double* __restrict__ points, // total number of points evaluated by this device const int device_number_of_points, // the zero point x-coordinate of bubbles const double zero_point_x, // the zero point y-coordinate of bubbles const double zero_point_y, // the zero point z-coordinate of bubbles const double zero_point_z, // the k value of the bubbles const int k, // number of points in this kernel call const int point_count, // device_point_offset const int device_point_offset, const double multiplier ) { Bubbles_evaluate_points<true, true, false, false, false>( bubble, result_array, /*device_gradients_x*/NULL, /*device_gradients_y*/NULL, /*device_gradients_z*/NULL, points, device_number_of_points, zero_point_x, zero_point_y, zero_point_z, k, 0, point_count, device_point_offset, multiplier ); } /*__global__ void Bubble_make_taylor_kernel(Bubble_t *result_bubble, int maximum_taylor_order, double *contaminants, double *c2s_coefficients, int *c2s_lm_ids, int *c2s_term_starts, int offset) { const int index=threadIdx.x + blockIdx.x * blockDim.x + offset; extern __shared__ double shared_memory[]; double *one_per_kappa_factorial = &shared_memory[0]; double *shared_contaminants = &shared_memory[maximum_taylor_order]; int contaminants_size = (maximum_taylor_order+1)*(maximum_taylor_order+2)*(maximum_taylor_order+3)/6; // calculate the 1/kappa! terms to the shared memory if (threadIdx.x < maximum_taylor_order) { int kappa = 1; for (int i = 1; i <= threadIdx.x; i++) { kappa *= i+1; } one_per_kappa_factorial[threadIdx.x] = 1.0 / ((double) kappa); } // load the contaminats to the shared memory if (threadIdx.x < contaminants_size) { int id = threadIdx.x; while (id < contaminants_size) { shared_contaminants[id] = contaminants[id]; id += blockDim.x; } } __syncthreads(); // do the actual calculation double r = result_bubble->gridpoints[index]; double prefactor; double damping_factor = get_damping_factor(r); int k = result_bubble->k, ncell= result_bubble->ncell, nlip = result_bubble->nlip; int result_index = 0, counter = 0, term_counter = 0; for (int x = 0; x <= maximum_taylor_order; x++) { for (int y = 0; y <= maximum_taylor_order - x; y++) { for (int z = 0; z <= maximum_taylor_order - x - y; z++) { prefactor = one_per_kappa_factorial[x+y+z]// 1/[x+y+z] * pow(r, (double)(x+y+z - k)) // r^x+y+z-k * shared_contaminants[counter] // c * damping_factor; // go through all l,m terms which get contribution from x,y,z -term while (term_counter < c2s_term_starts[counter+1]) { // get the index in the result array, note: the -1 is because the indices are in // fortran format, starting from 1 result_index = (c2s_lm_ids[term_counter]-1) * (ncell * (nlip-1) +1) + index; // add the prefactor times the coefficient from cartesion to spherical conversion result_bubble->f[result_index] += c2s_coefficients[term_counter] * prefactor; // add the counter value used to follow the c2s conversion term_counter++; } // add the conter value used to follow cartesian terms counter ++; } } } } */ /* * Kernel that sums the f-values of two bubble objects together. The summation happens * pointwise so that each thread calculates all l,m values for each point. The result * is stored to the bubble_f. */ __global__ void Bubble_sum_kernel(double* __restrict__ bubble_f, const double* __restrict__ bubble1_f, const int lmax, const int max_id, const size_t device_f_pitch) { const int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < max_id) { // go through all l, m values of input bubble 'bubble' for (int ilm = 0; ilm < (lmax+1)*(lmax+1); ilm++) { bubble_f[ilm * device_f_pitch / sizeof(double) + id] += bubble1_f[ilm * device_f_pitch / sizeof(double) + id]; } } } /* * Decreases the k-value of a bubble by k_decrese. The operation happens * pointwise so that each thread calculates all l,m values for each point. The result * is stored to the bubble_f. * * k_decrease is how many k values is decreased */ __global__ void Bubble_decrease_k_kernel(double* __restrict__ bubble_f, const double* __restrict__ r, const int k_decrease, const int lmax, const int max_id, const size_t device_f_pitch) { const int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < max_id) { const double rpow = pow(r[id], (double) k_decrease); // go through all l, m values of input bubble 'bubble' for (int ilm = 0; ilm < (lmax+1)*(lmax+1); ilm++) { bubble_f[ilm * device_f_pitch / sizeof(double) + id] *= rpow; } } } /* * Multiply cubes 1 and 2 and store it to cube1 */ __global__ void multiply_cubes(double *cube1, double *cube2, const int cube_size, const int offset) { // get the id of the point (We are using only the first ) const int index=threadIdx.x + blockIdx.x * blockDim.x + offset; if (index < cube_size) { cube1[index] *= cube2[index]; } } /************************************************************** * Bubble-implementation * **************************************************************/ /* * Evaluate the cf at ALL devices. This is a crucial preparation function for injection. * For correct results, on call the Bubble must have all f-values present. * * NOTE: the function streaming is structured using number of l,m-pairs, like the uploadAll. */ void Bubble::calculateCf() { // calculate the cf int ilmmax = (this->lmax+1)*(this->lmax+1); int block_size = 64; int grid_size; int offset; check_errors(__FILE__, __LINE__); for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); offset = 0; for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { int ilm_per_stream = ilmmax / this->streamContainer->getStreamsPerDevice() + (( ilmmax % this->streamContainer->getStreamsPerDevice()) > stream); int number_of_points = ilm_per_stream * this->grid->ncell; // verify that there is something to calculate the cf for (for instance if ilmmax is 1, some streams // can be left without any points, resulting to a cuda error) if (number_of_points > 0) { grid_size = (number_of_points + block_size - 1) / block_size; calc_cf <<< grid_size, block_size, 0, *this->streamContainer->getStream(device, stream) >>> (this->device_copies[device], offset, number_of_points, this->device_f_pitch[device]); offset += number_of_points; } check_errors(__FILE__, __LINE__); } } } void Bubble::initDeviceMemory(int ibub, Grid1D *grid, double center[3], int lmax, int k, double charge, StreamContainer *streamContainer) { //cudaHostRegister(this, sizeof(Bubble), cudaHostRegisterPortable); //check_errors(__FILE__, __LINE__); this->ibub = ibub; this->lmax = lmax; this->device_memory_lmax = lmax; this->k = k; this->charge = charge; this->streamContainer = streamContainer; this->crd[X_] = center[X_]; this->crd[Y_] = center[Y_]; this->crd[Z_] = center[Z_]; this->integrator = NULL; this->uploaded_events = new cudaEvent_t*[this->streamContainer->getNumberOfDevices()]; this->device_copies = new Bubble * [this->streamContainer->getNumberOfDevices()]; this->device_f = new double *[this->streamContainer->getNumberOfDevices()]; this->device_f_pitch = new size_t [this->streamContainer->getNumberOfDevices()]; this->device_cf = new double * [this->streamContainer->getNumberOfDevices()]; this->device_df = new double * [this->streamContainer->getNumberOfDevices()]; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); size_t sz=sizeof(double)*(grid->ncell*(grid->nlip-1)+1); cudaMallocPitch((void**)&device_f[device], &device_f_pitch[device], sz, (lmax+1)*(lmax+1)); check_errors(__FILE__, __LINE__); cudaMemset(device_f[device], 0, device_f_pitch[device]*(lmax+1)*(lmax+1)); check_errors(__FILE__, __LINE__); sz=sizeof(double)*grid->ncell*8*(lmax+1)*(lmax+1); cudaMalloc(&this->device_cf[device], sz); cudaMalloc(&this->device_df[device], sz); check_errors(__FILE__, __LINE__); // copy the bubble to the device, for which set the device pointers // to be the main-pointers this->f = this->device_f[device]; this->cf = this->device_cf[device]; this->df = this->device_df[device]; this->grid = grid->device_copies[device]; // allocate & copy the bubble to device cudaMalloc(&this->device_copies[device], sizeof(Bubble)); cudaMemcpy(this->device_copies[device], this, sizeof(Bubble), cudaMemcpyHostToDevice); check_errors(__FILE__, __LINE__); } this->grid = grid; } Bubble::Bubble(int ibub, Grid1D *grid, double center[3], int lmax, int k, double *bf, double charge, StreamContainer *streamContainer) { this->initDeviceMemory(ibub, grid, center, lmax, k, charge, streamContainer); // set the host variables and register them for faster data transfer this->f = bf; /*cudaHostRegister(this->f, sizeof(double)*(grid->ncell*(grid->nlip-1)+1)*(lmax+1)*(lmax+1), cudaHostRegisterPortable); check_errors(__FILE__, __LINE__);*/ } Bubble::Bubble(int ibub, Grid1D *grid, double center[3], int lmax, int k, double charge, StreamContainer *streamContainer) { this->initDeviceMemory(ibub, grid, center, lmax, k, charge, streamContainer); } Bubble::Bubble(Bubble *old_bubble, int lmax, int k) { this->initDeviceMemory(old_bubble->ibub, old_bubble->grid, old_bubble->crd, lmax, old_bubble->k, old_bubble->charge, old_bubble->streamContainer); } /* * Uploads all bubble data to all devices (gpus) on all nodes. This kind of approach * is needed when injecting bubbles to cuda. With bubble-multiplication - the upload * -method is preferred. */ void Bubble::uploadAll(double *f, int lmax) { // set the host variables and register them for faster data transfer this->f = f; this->lmax = lmax; size_t host_pitch = (this->grid->ncell * (this->grid->nlip - 1) + 1) * sizeof(double); int ilmmax = (lmax+1)*(lmax+1); check_errors(__FILE__, __LINE__); Grid1D* host_grid = this->grid; // register the host array array //cudaHostRegister(this->f, host_pitch * ilmmax, cudaHostRegisterPortable); check_errors(__FILE__, __LINE__); double *device_f, *host_f; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // get the preallocated device pointers device_f = this->device_f[device]; // NOTE: for all devices the first pointer points to the first value of each array host_f = this->f; for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) { int ilm_per_stream = ilmmax / this->streamContainer->getStreamsPerDevice() + (( ilmmax % this->streamContainer->getStreamsPerDevice()) > stream); // upload the stream data to device cudaMemcpy2DAsync((void *) device_f, this->device_f_pitch[device], (void *) host_f, host_pitch, host_pitch, ilm_per_stream, cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, stream) ); check_errors(__FILE__, __LINE__); // add to the pointers device_f += ilm_per_stream * this->device_f_pitch[device] / sizeof(double); host_f += ilm_per_stream * host_pitch / sizeof(double); } // copy the bubble to the device, for which set the device pointers // to be the main-pointers this->f = this->device_f[device]; this->cf = this->device_cf[device]; this->df = this->device_df[device]; this->grid = host_grid->device_copies[device]; this->lmax = lmax; // copy the bubble to device cudaMemcpyAsync(this->device_copies[device], this, sizeof(Bubble), cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, 0)); check_errors(__FILE__, __LINE__); this->f = f; this->grid = host_grid; } check_errors(__FILE__, __LINE__); this->streamContainer->synchronizeAllDevices(); // calculate the cf this->calculateCf(); // and synchronize the host with the device for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->uploaded_events[device] = this->streamContainer->recordDeviceEvent(device); } // we are not in any case downloading the data back, so we can unregister the array //cudaHostUnregister(this->f); check_errors(__FILE__, __LINE__); } /* * Uploads part of a bubble to the device * * NOTE: in order to use this, the bubble uploaded (i.e., the f-array given as input) * must have the same lmax value as the Bubble-object we are uploading to. * NOTE: registers the input array but does not unregister it, thus after calling this * the user must unregister the f elsewhere, for instance by calling the unregister function. * NOTE: this function is designed to function together with the bubble multiplication */ void Bubble::upload(double *f, int lmax, bool register_host) { // set the host variables and register them for faster data transfer this->f = f; check_errors(__FILE__, __LINE__); this->lmax = lmax; int ilmmax = (lmax + 1) * (lmax + 1); // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; Grid1D* host_grid = this->grid; // register the host array, if not explicitly telling not to /*if (register_host) { cudaHostRegister(this->f, sizeof(double)*ilmmax*total_point_count, cudaHostRegisterPortable); check_errors(__FILE__, __LINE__); }*/ // store the processor variables to be used at downloading time this->processor_order_number = processor_order_number; this->number_of_processors = number_of_processors; size_t host_pitch = total_point_count * sizeof(double); // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % number_of_processors) > processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = processor_order_number * total_point_count / number_of_processors + ((remainder < processor_order_number) ? remainder : processor_order_number); double *device_f; double *host_f = &this->f[offset]; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // get the preallocated device pointers, // NOTE: The memory of bubble is allocated for its entire // length, thus we have to go to the part we want to upload device_f = this->device_f[device]; device_f = &device_f[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); // upload the data to device, copy all ilmmax-rows for stream_point_count columns cudaMemcpy2DAsync((void *) device_f, this->device_f_pitch[device], (void *) host_f, host_pitch, stream_point_count * sizeof(double), ilmmax, cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, stream) ); check_errors(__FILE__, __LINE__); offset += stream_point_count; device_f += stream_point_count; host_f += stream_point_count; } // copy the bubble to the device, for which set the device pointers // to be the main-pointers this->f = this->device_f[device]; this->cf = this->device_cf[device]; this->df = this->device_df[device]; this->grid = host_grid->device_copies[device]; this->lmax = lmax; // copy the bubble to device cudaMemcpyAsync(this->device_copies[device], this, sizeof(Bubble), cudaMemcpyHostToDevice, *this->streamContainer->getStream(device, 0)); check_errors(__FILE__, __LINE__); this->f = f; this->grid = host_grid; } // and synchronize the host with the device for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->uploaded_events[device] = this->streamContainer->recordDeviceEvent(device); } } void Bubble::waitBubbleUploaded(int device, cudaStream_t *stream) { cudaStreamWaitEvent(*stream, *this->uploaded_events[device], 0); } void Bubble::waitBubbleUploaded(int device) { cudaStreamWaitEvent(0, *this->uploaded_events[device], 0); } /* * Sets bubble values to zero * * NOTE: in order to use this, the bubble uploaded (i.e., the f-array given as input) * must have the same lmax value as the Bubble-object we are uploading to. * NOTE: registers the input array but does not unregister it, thus after calling this * the user must unregister the f elsewhere, for instance by calling the unregister function. * NOTE: this function is designed to function together with the bubble multiplication */ void Bubble::setToZero() { // set the host variables and register them for faster data transfer this->f = f; check_errors(__FILE__, __LINE__); int ilmmax = (this->device_memory_lmax + 1) * (this->device_memory_lmax + 1); // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % this->number_of_processors) > this->processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = this->processor_order_number * total_point_count / this->number_of_processors + ((remainder < this->processor_order_number) ? remainder : this->processor_order_number); double *device_f; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // get the preallocated device pointers, // NOTE: The memory of bubble is allocated for its entire // length, thus we have to go to the part we want to upload device_f = this->device_f[device]; device_f = &device_f[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); // upload the data to device, copy all ilmmax-rows for stream_point_count columns cudaMemset2DAsync((void *) device_f, this->device_f_pitch[device], 0, stream_point_count * sizeof(double), ilmmax, *this->streamContainer->getStream(device, stream) ); check_errors(__FILE__, __LINE__); offset += stream_point_count; device_f += stream_point_count; } } } /* * Downloads part of a bubble from the device. Downloads to host exactly the same * part as the upload function above uploads to device. * * NOTE: this function is designed to function together with the bubble multiplication & * summation */ void Bubble::download(int lmax) { // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; size_t host_pitch = total_point_count * sizeof(double); int ilmmax = (lmax + 1) * (lmax + 1); // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % this->number_of_processors) > this->processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = this->processor_order_number * total_point_count / this->number_of_processors + ((remainder < this->processor_order_number) ? remainder : this->processor_order_number); double *device_f; double *host_f = &this->f[offset]; check_errors(__FILE__, __LINE__); for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // get the preallocated device pointers, // NOTE: The memory of bubble is allocated for its entire // length, thus we have to go to the part we want to upload device_f = this->device_f[device]; device_f = &device_f[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); // upload the data to device, copy all ilmmax-rows for stream_point_count columns cudaMemcpy2DAsync((void *) host_f, host_pitch, (void *) device_f, this->device_f_pitch[device], stream_point_count * sizeof(double), ilmmax, cudaMemcpyDeviceToHost, *this->streamContainer->getStream(device, stream) ); check_errors(__FILE__, __LINE__); offset += stream_point_count; device_f += stream_point_count; host_f += stream_point_count; check_errors(__FILE__, __LINE__); } } } /* * Adds together the f-values of 'this' and input bubble 'bubble' * * NOTE: this function is designed to function together with the bubble multiplication * NOTE: this function assumes that the bubbles have identical grids and with that, * identical f_pitches */ void Bubble::add(Bubble *bubble) { // make sure that the k-values of the input functions are the same // this is done by decreasing the larger k-value to be equal // with the smaller check_errors(__FILE__, __LINE__); if (this->k > bubble->k) { this->decreaseK(this->k - bubble->k); } else if (this->k < bubble->k) { bubble->decreaseK(bubble->k - this->k); } check_errors(__FILE__, __LINE__); // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; int smaller_lmax = min(this->lmax, bubble->lmax); // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % this->number_of_processors) > this->processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = this->processor_order_number * total_point_count / this->number_of_processors + ((remainder < this->processor_order_number) ? remainder : this->processor_order_number); double *device_f; double *device_f1; int block_size = 256; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); this->waitBubbleUploaded(device); bubble->waitBubbleUploaded(device); // get the preallocated device pointers, // NOTE: The memory of bubble is allocated for its entire // length, thus we have to go to the part we want to upload device_f = this->device_f[device]; device_f = &device_f[offset]; device_f1 = bubble->device_f[device]; device_f1 = &device_f1[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); int grid_size = (stream_point_count + block_size - 1) / block_size; // call the kernel Bubble_sum_kernel <<<grid_size, block_size, 0, *this->streamContainer->getStream(device, stream)>>> (device_f, device_f1, smaller_lmax, stream_point_count, this->device_f_pitch[device]); check_errors(__FILE__, __LINE__); // add the device pointers and the offset offset += stream_point_count; device_f += stream_point_count; device_f1 += stream_point_count; } } } /* * Decreases the k-value of a bubble by k_decrease * * NOTE: this function is designed to function together with the bubble multiplication * NOTE: this function assumes that the bubbles have identical grids and with that, * identical f_pitches */ void Bubble::decreaseK(int k_decrease) { // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % this->number_of_processors) > this->processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = this->processor_order_number * total_point_count / this->number_of_processors + ((remainder < this->processor_order_number) ? remainder : this->processor_order_number); double *device_f; double *device_r; int block_size = 256; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); this->waitBubbleUploaded(device); // get the preallocated device pointers, // NOTE: The memory of bubble is allocated for its entire // length, thus we have to go to the part we want to upload device_f = this->device_f[device]; device_f = &device_f[offset]; device_r = this->grid->device_gridpoints[device]; device_r = &device_r[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); int grid_size = (stream_point_count + block_size - 1) / block_size; // call the kernel Bubble_decrease_k_kernel <<<grid_size, block_size, 0, *this->streamContainer->getStream(device, stream)>>> (device_f, device_r, k_decrease, this->lmax, stream_point_count, this->device_f_pitch[device]); check_errors(__FILE__, __LINE__); // add the device pointers and the offset offset += stream_point_count; device_f += stream_point_count; device_r += stream_point_count; } } } /* * Integrates over the bubble. We only need to integrate over the s-bubble. */ double Bubble::integrate() { // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->getShape(); // check if the integrator has been inited, if not, init it if (!this->integrator) { this->integrator = new Integrator1D(this->streamContainer, this->grid, this->processor_order_number, this->number_of_processors); } // upload the l,m=0 radial function f to the integrator this->integrator->upload(this->f); check_errors(__FILE__, __LINE__); // determine how many of the points belong to the current mpi-node int processor_point_count = total_point_count / this->number_of_processors + ((total_point_count % this->number_of_processors) > this->processor_order_number); // get the offset to the f-array caused by other processors int remainder = total_point_count % this->number_of_processors; int offset = this->processor_order_number * total_point_count / this->number_of_processors + ((remainder < this->processor_order_number) ? remainder : this->processor_order_number); // get the partial s-bubble device vectors residing now in the integrators device memory double **device_vectors = this->integrator->getDeviceVectors(); double *device_vector; double *device_r; // multiply the integration vector with r^(2+this->k) // get the times we have to multiply the vector with r, i.e., 2+this->k // NOTE: this must be larger or equal to zero int k_change = 2 + this->k; if (k_change > 0) { int block_size = 256; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // get the preallocated device pointers, // NOTE: The memory of gridpoints is allocated for its entire // length, thus we have to go to the part we want to upload // however, the integrator only has the memory it needs, the we don't need to // offset the device_vector device_vector = device_vectors[device]; device_r = this->grid->device_gridpoints[device]; device_r = &device_r[offset]; // detemine how many of the mpi-nodes points belong to this device (gpu) int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() + ((processor_point_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // detemine the number of the points handled by this stream int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() + ((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream); int grid_size = (stream_point_count + block_size - 1) / block_size; // call the decrease_k- kernel by using lmax = 0 Bubble_decrease_k_kernel <<<grid_size, block_size, 0, *this->streamContainer->getStream(device, stream)>>> (device_vector, device_r, k_change, 0, stream_point_count, 0); check_errors(__FILE__, __LINE__); // add the device pointers and the offset offset += stream_point_count; device_vector += stream_point_count; device_r += stream_point_count; check_errors(__FILE__, __LINE__); } } } else if (k_change < 0) { printf("Invalid k-value (%d) at bubble-integrate, must be larger or equal with -2. At file '%s', line number %d", this->k, __FILE__, __LINE__); exit(-1); } return FOURPI_ * this->integrator->integrate(); // } void Bubble::registerHost(double *f) { check_errors(__FILE__, __LINE__); this->f = f; /*int ilmmax = (this->lmax + 1) * (this->lmax + 1); // calculate the total number of points in the bubbles each l,m -pair, int total_point_count = this->grid->ncell * (this->grid->nlip - 1) +1; cudaHostRegister(this->f, sizeof(double)*ilmmax*total_point_count, cudaHostRegisterPortable);*/ check_errors(__FILE__, __LINE__); } void Bubble::destroy() { //this->grid->destroy(); //check_errors(__FILE__, __LINE__); //delete this->grid; this->grid = NULL; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device++) { this->streamContainer->setDevice(device); cudaFree(this->device_f[device]); check_errors(__FILE__, __LINE__); cudaFree(this->device_cf[device]); check_errors(__FILE__, __LINE__); cudaFree(this->device_df[device]); check_errors(__FILE__, __LINE__); cudaFree(this->device_copies[device]); check_errors(__FILE__, __LINE__); } delete[] this->device_copies; delete[] this->device_f; delete[] this->device_df; delete[] this->device_f_pitch; delete[] this->device_cf; delete[] this->uploaded_events; // check if integrator is null pointer, if not // delete the integrator if (this->integrator) { this->integrator->destroy(); delete this->integrator; this->integrator = NULL; } check_errors(__FILE__, __LINE__); //cudaHostUnregister(this); } /* * Set MPI-configuration used by the bubble object. */ void Bubble::setProcessorConfiguration( int processor_order_number, int number_of_processors) { this->number_of_processors = number_of_processors; this->processor_order_number = processor_order_number; } /************************************************************** * Bubbles-implementation * **************************************************************/ int Bubbles::getBubbleCount() { return this->nbub; } Bubbles::Bubbles(int nbub) { this->nbub = nbub; this->bubbles = new Bubble*[nbub]; this->is_sub_bubbles = false; } /* * Init new Bubbles by making a copy of the old. * * NOTE: This makes a deep copy of the old bubbles, meaning that * new memory places are allocated for the underlying Bubble objects. */ Bubbles::Bubbles(Bubbles *old_bubbles, int lmax, int k) { this->is_sub_bubbles = false; this->nbub = old_bubbles->nbub; this->bubbles = new Bubble*[nbub]; for (int i = 0; i < old_bubbles->getBubbleCount(); i++) { this->bubbles[i] = new Bubble(old_bubbles->bubbles[i], lmax, k); } } /* * Get new bubbles object containing some of the original bubbles. * The bubbles selected in the new objects are the ones with * the ibub values matching to those in input parameter 'ibubs'. * NOTE: this function makes a shallow copy of the input bubbles 'this', * i.e., the underlying Bubble objects are copied as references only */ Bubbles *Bubbles::getSubBubbles(int *ibubs, int nbub) { Bubbles *new_bubbles = new Bubbles(nbub); new_bubbles->is_sub_bubbles = true; // copy the references to the wanted Bubble-objects specified // in ibubs for (int i = 0; i < new_bubbles->getBubbleCount(); i++) { new_bubbles->bubbles[i] = this->getBubble(ibubs[i]); } return new_bubbles; } /* * Get the pointer to the Bubble with local order number 'i' equal to * input parameter 'i'. If not found NULL is returned. * * @param i - The local order number of the bubble */ Bubble *Bubbles::getBubbleWithLocalOrderNumber(int i) { if (i < this->nbub) { return this->bubbles[i]; } return NULL; } /* * Get the pointer to the Bubble with global order number 'ibub' equal to * input parameter 'ibub'. If not found NULL is returned. * * @param ibub - The global order number of the bubble */ Bubble *Bubbles::getBubble(int ibub) { for (int i = 0; i < this->getBubbleCount(); i ++) { if (this->bubbles[i]->ibub == ibub) { return this->bubbles[i]; } } return NULL; } /* * Check if the Bubbles contains a Bubble with global order number 'ibub'. * * @param ibub - The global order number of the bubble */ bool Bubbles::containsBubble(int ibub) { Bubble *bubble = this->getBubble(ibub); return (bubble != NULL); } /* * Init a bubble with global order number 'ibub' to the 'i':th slot in the * internal bubbles array. Contains also the values for the bubble. * * @param grid - The grid used in the bubble * @param i - The internal order number of the bubble * @param ibub - The global order number of the bubble * @param center - The global center point of the bubble * @param lmax - The maximum value of quantum number 'l' for the bubble * @param k - The parameter k for the r^k multiplier of the values * @param bf - The values of the bubble * @param charge - The charge of the atom at the center of the bubble * @param streaContainer - The container holding the streams used in cuda evaluation of anything * related to this object */ void Bubbles::initBubble(Grid1D *grid, int i, int ibub, double center[3], int lmax, int k, double *bf, double charge, StreamContainer *streamContainer) { this->bubbles[i] = new Bubble(ibub, grid, center, lmax, k, bf, charge, streamContainer); } /* * Init a bubble with global order number 'ibub' to the 'i':th slot in the * internal bubbles array. Contains also the values for the bubble. * * @param grid - The grid used in the bubble * @param i - The internal order number of the bubble * @param ibub - The global order number of the bubble * @param center - The global center point of the bubble * @param lmax - The maximum value of quantum number 'l' for the bubble * @param k - The parameter k for the r^k multiplier of the values * @param charge - The charge of the atom at the center of the bubble * @param streaContainer - The container holding the streams used in cuda evaluation of anything * related to this object */ void Bubbles::initBubble(Grid1D *grid, int i, int ibub, double center[3], int lmax, int k, double charge, StreamContainer *streamContainer) { check_errors(__FILE__, __LINE__); this->bubbles[i] = new Bubble(ibub, grid, center, lmax, k, charge, streamContainer); } void Bubbles::unregister() { /*for (int ibub = 0; ibub < this->getBubbleCount(); ibub ++) { cudaHostUnregister(this->getBubble(ibub)->f); check_errors(__FILE__, __LINE__); }*/ } void Bubbles::waitBubblesUploaded(int device) { for (int i = 0; i < this->getBubbleCount(); i ++) { this->bubbles[i]->waitBubbleUploaded(device); } } /* * Set MPI-configuration used by the bubble object. */ void Bubbles::setProcessorConfiguration( int processor_order_number, int number_of_processors) { for (int i = 0; i < this->getBubbleCount(); i ++) { this->bubbles[i]->setProcessorConfiguration(processor_order_number, number_of_processors); } } double Bubbles::integrate() { double result = 0.0; for (int i = 0; i < this->getBubbleCount(); i ++) { result += this->getBubbleWithLocalOrderNumber(i)->integrate(); } return result; } void Bubbles::download() { for (int i = 0; i < this->getBubbleCount(); i ++) { this->bubbles[i]->download(this->bubbles[i]->lmax); } } void Bubbles::add(Bubbles *bubbles) { // go through all the Bubble-objects present in this for (int i = 0; i < bubbles->getBubbleCount(); i ++) { // get the matching bubble in the added bubbles Bubble * bubble = bubbles->getBubble(this->bubbles[i]->ibub); // if the corresponding Bubble exists in both the Bubbles, do the add if (bubble) { this->bubbles[i]->add(bubble); } } check_errors(__FILE__, __LINE__); } void Bubbles::destroy() { if (!this->is_sub_bubbles) { for (int ibub = 0; ibub < this->getBubbleCount(); ibub ++) { this->bubbles[ibub]->destroy(); delete this->bubbles[ibub]; } } delete[] this->bubbles; } void Bubbles::inject(Grid3D *grid3d, CudaCube *cube, int lmin, CudaCube *gradients_cube_x, CudaCube *gradients_cube_y, CudaCube *gradients_cube_z, bool evaluate_value, bool evaluate_gradients_x, bool evaluate_gradients_y, bool evaluate_gradients_z) { check_errors(__FILE__, __LINE__); int total_slice_count = cube->getShape(Z_); // the minimum l is 0 always in the multiplication int device_slice_count; // get the pointer arrays from the cubes double **device_cubes = cube->getDeviceCubes(); double **device_gradients_x, **device_gradients_y, **device_gradients_z; // get the device gradient result pointers if (evaluate_gradients_x) device_gradients_x = gradients_cube_x->getDeviceCubes(); if (evaluate_gradients_y) device_gradients_y = gradients_cube_y->getDeviceCubes(); if (evaluate_gradients_z) device_gradients_z = gradients_cube_z->getDeviceCubes(); size_t *device_pitches = cube->getDevicePitches(); int *device_memory_shape = cube->getDeviceMemoryShape(); int slice_offset = 0; Bubble *bubble; StreamContainer *streamContainer = cube->getStreamContainer(); // copy the cubes to the device & execute the kernels for (int device = 0; device < streamContainer->getNumberOfDevices(); device ++) { // set the used device (gpu) streamContainer->setDevice(device); double *dev_cube = device_cubes[device]; double *dev_gradient_x, *dev_gradient_y, *dev_gradient_z; // get the gradient addresses for the device if (evaluate_gradients_x) dev_gradient_x = device_gradients_x[device]; if (evaluate_gradients_y) dev_gradient_y = device_gradients_y[device]; if (evaluate_gradients_z) dev_gradient_z = device_gradients_z[device]; // calculate the number of vectors this device handles device_slice_count = total_slice_count / streamContainer->getNumberOfDevices() + ((total_slice_count % streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < streamContainer->getStreamsPerDevice(); stream++) { // determine the count of vectors handled by this stream int slice_count = device_slice_count / streamContainer->getStreamsPerDevice() + ((device_slice_count % streamContainer->getStreamsPerDevice()) > stream); check_errors(__FILE__, __LINE__); // get the launch configuration for the f1-inject dim3 block, grid; cube->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE); if (slice_count > 0) { // inject bubbles to the cube for (int i = 0; i < this->getBubbleCount(); i++) { bubble = this->getBubbleWithLocalOrderNumber(i); // wait that the bubble is uploaded to the device before starting if (stream == 0) bubble->waitBubbleUploaded(device); // call the kernel if (lmin == 0) { if (evaluate_gradients_x && evaluate_gradients_y && evaluate_gradients_z) { if (evaluate_value) { Bubbles_evaluate_grid_gradients < true, true, true, true, true> <<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], dev_cube, dev_gradient_x, dev_gradient_y, dev_gradient_z, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } else { Bubbles_evaluate_grid_gradients < true, false, true, true, true> <<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], dev_cube, dev_gradient_x, dev_gradient_y, dev_gradient_z, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } } else if (evaluate_gradients_x) { Bubbles_evaluate_grid_gradients < true, false, true, false, false> <<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], dev_cube, dev_gradient_x, dev_gradient_y, dev_gradient_z, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } else if (evaluate_gradients_y) { Bubbles_evaluate_grid_gradients < true, false, false, true, false> <<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], dev_cube, dev_gradient_x, dev_gradient_y, dev_gradient_z, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } else if (evaluate_gradients_z) { Bubbles_evaluate_grid_gradients < true, false, false, false, true> <<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], dev_cube, dev_gradient_x, dev_gradient_y, dev_gradient_z, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } else if (evaluate_value) { Bubbles_evaluate_grid_pitched <<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], dev_cube, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, 1.0); } } else if (evaluate_value) { Bubbles_evaluate_grid_lmin <<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 8, *streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], dev_cube, grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], grid3d->shape[X_], grid3d->shape[Y_], grid3d->shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, device_pitches[device], device_memory_shape[Y_], slice_count, lmin, 1.0); } check_errors(__FILE__, __LINE__); } } // increase the address by the number of vectors in this array if (evaluate_value) dev_cube += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_]; if (evaluate_gradients_x) dev_gradient_x += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_]; if (evaluate_gradients_y) dev_gradient_y += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_]; if (evaluate_gradients_z) dev_gradient_z += slice_count * device_pitches[device] / sizeof(double) * device_memory_shape[Y_]; slice_offset += slice_count; } } } /************************************************************** * BubblesEvaluator function implementations * **************************************************************/ /* * Evaluate the bubbles at preset points. The results are stored in the device memory. * * @param gradient_direction - possible values X_ = 0, Y_ = 1, Z_ = 2, (X_, Y_, Z_) = 3 && this->evaluateGradients * anything else: no gradients */ void BubblesEvaluator::evaluatePoints(Points *result_points, Points *gradient_points_x, Points *gradient_points_y, Points *gradient_points_z, int gradient_direction) { int warp_size = 32; int total_warp_count = result_points->point_coordinates->number_of_points / warp_size + ((result_points->point_coordinates->number_of_points % warp_size) > 0); int point_offset = 0; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { this->streamContainer->setDevice(device); // allocate space for device results and device points int device_warp_count = total_warp_count / this->streamContainer->getNumberOfDevices() + ((total_warp_count % this->streamContainer->getNumberOfDevices()) > device); int device_point_count = device_warp_count * warp_size; int device_point_offset = 0; check_errors(__FILE__, __LINE__); // get the pointers to the device points & results double *device_points_ptr = result_points->point_coordinates->device_coordinates[device]; double *device_results_ptr = result_points->device_values[device]; double *device_gradients_x_ptr = NULL; double *device_gradients_y_ptr = NULL; double *device_gradients_z_ptr = NULL; if (gradient_direction == 3) { device_gradients_x_ptr = gradient_points_x->device_values[device]; device_gradients_y_ptr = gradient_points_y->device_values[device]; device_gradients_z_ptr = gradient_points_z->device_values[device]; } else if (gradient_direction < 3 && gradient_direction >= 0) { device_gradients_x_ptr = result_points->device_values[device]; device_gradients_y_ptr = result_points->device_values[device]; device_gradients_z_ptr = result_points->device_values[device]; } for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // get the number of points that are in the responsibility of this stream int stream_warp_count = device_warp_count / this->streamContainer->getStreamsPerDevice() + ((device_warp_count % streamContainer->getStreamsPerDevice()) > stream); int stream_point_count = stream_warp_count * warp_size; // make sure that the last stream does not go over board if (stream_point_count + point_offset > result_points->point_coordinates->number_of_points) { stream_point_count = result_points->point_coordinates->number_of_points - point_offset; } check_errors(__FILE__, __LINE__); if (stream_point_count > 0) { for (int i = 0; i < this->bubbles->getBubbleCount(); i++) { Bubble *bubble = this->bubbles->getBubbleWithLocalOrderNumber(i); // wait that the bubble is uploaded before calling the kernel if (stream == 0) bubble->waitBubbleUploaded(device); int grid_size = (stream_point_count + INJECT_BLOCK_SIZE - 1) / INJECT_BLOCK_SIZE; //printf("ibub: %d, device: %d, stream: %d, grid_size: %d, block_size: %d, stream_point_count: %d, device_point_offset: %d, device_point_count: %d, point_count: %d\n", // ibub, device, stream, grid_size, INJECT_BLOCK_SIZE, stream_point_count, device_point_offset, device_point_count, this->point_count); if (gradient_direction == X_) { Bubbles_evaluate_gradient_points <true, false, true, false, false> <<< grid_size, INJECT_BLOCK_SIZE, INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], device_results_ptr, device_gradients_x_ptr, device_gradients_y_ptr, device_gradients_z_ptr, device_points_ptr, device_point_count, bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, stream_point_count, device_point_offset, 1.0 ); } else if (gradient_direction == Y_) { Bubbles_evaluate_gradient_points <true, false, false, true, false> <<< grid_size, INJECT_BLOCK_SIZE, INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], device_results_ptr, device_gradients_x_ptr, device_gradients_y_ptr, device_gradients_z_ptr, device_points_ptr, device_point_count, bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, stream_point_count, device_point_offset, 1.0 ); } else if (gradient_direction == Z_) { Bubbles_evaluate_gradient_points <true, false, false, false, true> <<< grid_size, INJECT_BLOCK_SIZE, INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], device_results_ptr, device_gradients_x_ptr, device_gradients_y_ptr, device_gradients_z_ptr, device_points_ptr, device_point_count, bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, stream_point_count, device_point_offset, 1.0 ); } else if (gradient_direction == 3) { Bubbles_evaluate_gradient_points <true, true, true, true, true> <<< grid_size, INJECT_BLOCK_SIZE, INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], device_results_ptr, device_gradients_x_ptr, device_gradients_y_ptr, device_gradients_z_ptr, device_points_ptr, device_point_count, bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, stream_point_count, device_point_offset, 1.0 ); } else { Bubbles_evaluate_points_simple <<< grid_size, INJECT_BLOCK_SIZE, INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], device_results_ptr, device_points_ptr, device_point_count, bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, stream_point_count, device_point_offset, 1.0 ); } check_errors(__FILE__, __LINE__); } } // add the pointers point_offset += stream_point_count; device_point_offset += stream_point_count; } check_errors(__FILE__, __LINE__); } } /************************************************************** * Function3DMultiplier-implementation * **************************************************************/ /* * Injects the f1_bubbles to this->cube1 and f2_bubbles to this->cube2, * multiplies this->cube1 with this->cube2 and de-injects the 'result_bubbles' * from 'this->cube1' * * @param f1_bubbles * @param f2_bubbles * @param result_bubbles */ void Function3DMultiplier::multiply(Bubbles *f1_bubbles, Bubbles *f2_bubbles, Bubbles *result_bubbles) { int total_slice_count = this->cube1->getShape(Z_); // the minimum l is 0 always in the multiplication int device_slice_count; // get the pointer arrays from the cubes double **f1_device_cubes = this->cube1->getDeviceCubes(); size_t *f1_device_pitches = this->cube1->getDevicePitches(); double **f2_device_cubes = this->cube2->getDeviceCubes(); size_t *f2_device_pitches = this->cube2->getDevicePitches(); int *f1_device_memory_shape = this->cube1->getDeviceMemoryShape(); int *f2_device_memory_shape = this->cube2->getDeviceMemoryShape(); int f1_shape[3]; f1_shape[X_] = this->cube1->getShape(X_); f1_shape[Y_] = this->cube1->getShape(Y_); f1_shape[Z_] = this->cube1->getShape(Z_); int f2_shape[3]; f2_shape[X_] = this->cube2->getShape(X_); f2_shape[Y_] = this->cube2->getShape(Y_); f2_shape[Z_] = this->cube2->getShape(Z_); int slice_offset = 0; Bubble *bubble; // copy the cubes to the device & execute the kernels for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // set the used device (gpu) this->streamContainer->setDevice(device); //cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); //int first_block = 0; double *dev_f1_cube = f1_device_cubes[device]; double *dev_f2_cube = f2_device_cubes[device]; // calculate the number of vectors this device handles device_slice_count = total_slice_count / this->streamContainer->getNumberOfDevices() + ((total_slice_count % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream++) { // determine the count of vectors handled by this stream int slice_count = device_slice_count / this->streamContainer->getStreamsPerDevice() + ((device_slice_count % this->streamContainer->getStreamsPerDevice()) > stream); if (slice_count > 0) { // get the launch configuration for the f1-inject dim3 block, grid; this->cube1->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE); check_errors(__FILE__, __LINE__); // inject the f1 bubbles to the f1_cube (and sum) for (int i = 0; i < f1_bubbles->getBubbleCount(); i++) { bubble = f1_bubbles->getBubbleWithLocalOrderNumber(i); // wait that the bubble is uploaded to the device before starting if (stream == 0) bubble->waitBubbleUploaded(device); Bubbles_evaluate_grid_pitched <<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], dev_f1_cube, this->grid->axis[X_]->device_gridpoints[device], this->grid->axis[Y_]->device_gridpoints[device], this->grid->axis[Z_]->device_gridpoints[device], f1_shape[X_], f1_shape[Y_], f1_shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, f1_device_pitches[device], f1_device_memory_shape[Y_], slice_count, 1.0); check_errors(__FILE__, __LINE__); } check_errors(__FILE__, __LINE__); // get the launch configuration for the f2-inject this->cube2->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE); // inject the f2 bubbles to the f1_cube (and sum) for (int i = 0; i < f2_bubbles->getBubbleCount(); i++) { bubble = f2_bubbles->getBubbleWithLocalOrderNumber(i); // wait that the bubble is uploaded to the device before starting if (stream == 0) bubble->waitBubbleUploaded(device); // call the kernel Bubbles_evaluate_grid_pitched <<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], dev_f2_cube, this->grid->axis[X_]->device_gridpoints[device], this->grid->axis[Y_]->device_gridpoints[device], this->grid->axis[Z_]->device_gridpoints[device], f2_shape[X_], f2_shape[Y_], f2_shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, f2_device_pitches[device], f2_device_memory_shape[Y_], slice_count, 1.0); check_errors(__FILE__, __LINE__); } // get the launch configuration for the multiplication and result-inject this->cube2->getLaunchConfiguration(&grid, &block, slice_count, INJECT_BLOCK_SIZE); // multiply dev_f1_cube with dev_f2_cube and store the result to dev_f1_cube multiply_3d_cubes(dev_f1_cube, f1_shape[X_], f1_shape[Y_], f1_device_memory_shape[Y_], f1_device_pitches[device], dev_f2_cube, f2_shape[X_], f2_shape[Y_], f2_device_memory_shape[Y_], f2_device_pitches[device], slice_count, &grid, &block, this->streamContainer->getStream(device, stream)); check_errors(__FILE__, __LINE__); // de-inject (deduct) the result bubbles from the dev_f1_cube for (int i = 0; i < result_bubbles->getBubbleCount(); i++) { bubble = result_bubbles->getBubbleWithLocalOrderNumber(i); // wait that the bubble is uploaded to the device before starting if (stream == 0) bubble->waitBubbleUploaded(device); // call the kernel Bubbles_evaluate_grid_pitched <<< grid, block, INJECT_BLOCK_SIZE * sizeof(double) * 7, *this->streamContainer->getStream(device, stream) >>> (bubble->device_copies[device], dev_f1_cube, this->grid->axis[X_]->device_gridpoints[device], this->grid->axis[Y_]->device_gridpoints[device], this->grid->axis[Z_]->device_gridpoints[device], f1_shape[X_], f1_shape[Y_], f1_shape[Z_], bubble->crd[X_], bubble->crd[Y_], bubble->crd[Z_], bubble->k, slice_offset, f1_device_pitches[device], f1_device_memory_shape[Y_], slice_count, -1.0); check_errors(__FILE__, __LINE__); } // increase the address by the number of vectors in this array // something else dev_f1_cube += slice_count * f1_device_pitches[device] / sizeof(double) * f1_device_memory_shape[Y_]; dev_f2_cube += slice_count * f2_device_pitches[device] / sizeof(double) * f2_device_memory_shape[Y_]; slice_offset += slice_count; } } } } /******************************************** * Fortran interfaces * ********************************************/ extern "C" void bubbles_add_cuda(Bubbles *bubbles, Bubbles *bubbles1) { bubbles->add(bubbles1); } extern "C" Bubbles* bubbles_get_sub_bubbles_cuda(Bubbles *bubbles, int *ibubs, int nbub) { return bubbles->getSubBubbles(ibubs, nbub); } extern "C" Bubbles *bubbles_init_cuda(int nbub) { Bubbles *new_bubbles = new Bubbles(nbub); check_errors(__FILE__, __LINE__); return new_bubbles; } /* * * @param id - local index of the bubble inited in Fortran format: first index is 1. */ extern "C" void bubble_init_cuda(Bubbles *bubbles, Grid1D *grid, int i, int ibub, double center[3], int lmax, int k, double charge, StreamContainer *streamContainer) { bubbles->initBubble(grid, i-1, ibub, center, lmax, k, charge, streamContainer); check_errors(__FILE__, __LINE__); } /* * Upload the content ('bf') of the Bubble with global order number 'ibub' to the device. * * @param ibub - tHe global order number of the bubble */ extern "C" void bubble_upload_all_cuda(Bubbles *bubbles, int ibub, int lmax, int k, double *bf) { if (bubbles->containsBubble(ibub)) { bubbles->getBubble(ibub)->k = k; bubbles->getBubble(ibub)->uploadAll(bf, lmax); check_errors(__FILE__, __LINE__); } } extern "C" void bubble_upload_cuda(Bubbles *bubbles, int ibub, int lmax, double *bf) { if (bubbles->containsBubble(ibub)) { bubbles->getBubble(ibub)->upload(bf, lmax); check_errors(__FILE__, __LINE__); } } extern "C" void bubble_add_cuda(Bubbles *bubbles, Bubbles *bubbles1, int ibub) { bubbles->getBubble(ibub)->add(bubbles1->getBubble(ibub)); check_errors(__FILE__, __LINE__); } extern "C" void bubbles_destroy_cuda(Bubbles* bubbles){ if (bubbles) { bubbles->destroy(); delete bubbles; check_errors(__FILE__, __LINE__); } } extern "C" double bubbles_integrate_cuda(Bubbles *bubbles) { return bubbles->integrate(); } extern "C" void bubbles_set_processor_configuration_cuda(Bubbles *bubbles, int processor_order_number, int number_of_processors) { bubbles->setProcessorConfiguration(processor_order_number, number_of_processors); } extern "C" void bubbles_inject_cuda(Bubbles *bubbles, Grid3D *grid, int lmin, CudaCube *cube) { bubbles->inject(grid, cube, lmin); } extern "C" void bubbles_inject_to_cuda(Bubbles *bubbles, Grid3D *grid, int lmin, CudaCube *cudaCube, double *cube, int offset, int cube_host_shape[3]) { cudaCube->initHost(&cube[offset], cube_host_shape, true); cudaCube->upload(); bubbles->inject(grid, cudaCube, lmin); } extern "C" double *bubbles_init_page_locked_f_cuda(int lmax, int shape){ //allocated += 1; double * result_f; check_errors(__FILE__, __LINE__); cudaHostAlloc((void **)&result_f, sizeof(double) * (lmax+1) * (lmax+1) * shape, cudaHostAllocPortable); check_errors(__FILE__, __LINE__); //printf("Allocated 1, Now allocated %d, address: %ld\n", allocated, result_f); return result_f; } extern "C" void bubbles_destroy_page_locked_f_cuda(double * f){ //allocated -= 1; //printf("Deallocated 1, Now allocated %d, address: %ld\n", allocated, f); cudaFreeHost(f); check_errors(__FILE__, __LINE__); }
a03d694182f8125b5c362685b131a6f0b465f240.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <sys/resource.h> #include <math.h> double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } __global__ void vecMult(double *d_vecA,unsigned long n, unsigned long blockSize){ __shared__ double sdata[sizeof(double)*128]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; __syncthreads(); while (i < n) { sdata[tid] += d_vecA[i] + d_vecA[i+blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } if (tid == 0) d_vecA[blockIdx.x] = sdata[0]; } int main(int argc, char *argv[]){ if (argc != 2){ printf("Falta argumento: N\n"); return 0; } //declaracion de variables hipError_t error; unsigned long N = atoi (argv[1]); unsigned long CUDA_BLK = 128,GRID_BLK; unsigned long numBytes = sizeof(double)*N; double *vecA,result,*d_vecA,timetick; unsigned long i; vecA = (double *)malloc(numBytes); result = 1; for (i = 0; i < N; i++){ vecA[i] = 2; } //comment hipMalloc((void **) &d_vecA, numBytes); // Bloque unidimencional de hilos (*cb* hilos) dim3 dimBlock(CUDA_BLK); //promedio timetick = dwalltime(); hipMemcpy(d_vecA, vecA, numBytes, hipMemcpyHostToDevice); // CPU -> GPU for(i = N ; i > 1; i /= CUDA_BLK){ printf("%lu %lu\n\n",i,CUDA_BLK); GRID_BLK = i / CUDA_BLK ; dim3 dimGrid(GRID_BLK); hipLaunchKernelGGL(( vecMult), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vecA,i,CUDA_BLK); hipDeviceSynchronize(); } hipMemcpy(vecA, d_vecA, sizeof(double)*GRID_BLK, hipMemcpyDeviceToHost); // GPU -> CPU for (i = 0; i < GRID_BLK; i++){ result *= vecA[i]; } printf("Tiempo para la GPU: %f\n",dwalltime() - timetick); error = hipGetLastError(); printf("error: %d\n\n",error); /* for (i = 0; i < GRID_BLK; i++){ printf("%f|",vecA[i]); } printf("\n\n");*/ printf("%f|",result); printf("\n\n"); hipFree(d_vecA); free(vecA); return 0; }
a03d694182f8125b5c362685b131a6f0b465f240.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <sys/resource.h> #include <math.h> double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } __global__ void vecMult(double *d_vecA,unsigned long n, unsigned long blockSize){ __shared__ double sdata[sizeof(double)*128]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; __syncthreads(); while (i < n) { sdata[tid] += d_vecA[i] + d_vecA[i+blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } if (tid == 0) d_vecA[blockIdx.x] = sdata[0]; } int main(int argc, char *argv[]){ if (argc != 2){ printf("Falta argumento: N\n"); return 0; } //declaracion de variables cudaError_t error; unsigned long N = atoi (argv[1]); unsigned long CUDA_BLK = 128,GRID_BLK; unsigned long numBytes = sizeof(double)*N; double *vecA,result,*d_vecA,timetick; unsigned long i; vecA = (double *)malloc(numBytes); result = 1; for (i = 0; i < N; i++){ vecA[i] = 2; } //comment cudaMalloc((void **) &d_vecA, numBytes); // Bloque unidimencional de hilos (*cb* hilos) dim3 dimBlock(CUDA_BLK); //promedio timetick = dwalltime(); cudaMemcpy(d_vecA, vecA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU for(i = N ; i > 1; i /= CUDA_BLK){ printf("%lu %lu\n\n",i,CUDA_BLK); GRID_BLK = i / CUDA_BLK ; dim3 dimGrid(GRID_BLK); vecMult<<<dimGrid, dimBlock>>>(d_vecA,i,CUDA_BLK); cudaThreadSynchronize(); } cudaMemcpy(vecA, d_vecA, sizeof(double)*GRID_BLK, cudaMemcpyDeviceToHost); // GPU -> CPU for (i = 0; i < GRID_BLK; i++){ result *= vecA[i]; } printf("Tiempo para la GPU: %f\n",dwalltime() - timetick); error = cudaGetLastError(); printf("error: %d\n\n",error); /* for (i = 0; i < GRID_BLK; i++){ printf("%f|",vecA[i]); } printf("\n\n");*/ printf("%f|",result); printf("\n\n"); cudaFree(d_vecA); free(vecA); return 0; }
4644e129f5b2c392fce9a78dbf9cb3f1fa11b9cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" #include "hip/hip_runtime.h" #include "DS_timer.h" #include <iostream> #include <stdio.h> #include <omp.h> #include <math.h> #ifdef __INTELLISENSE__ void __syncthreads(); #endif #define M_SIZE 2048 #define N_SIZE 2048 #define K_SIZE 2048 DS_timer *timer = new DS_timer(7); float* matrixGen(int m, int n); float* matrixMultiCPU(float *matrixA, float *matrixB, int row_size, int k_size, int col_size); float* matrixMultiGPU(float *matrixA, float *matrixB, int row_size, int k_size, int col_size); bool matrixEqual(float *matrixA, float *matrixB, int row_size, int col_size); __global__ void matMul_kernel(float *_A, float *_B, float *_C, int row_size, int k_size, int col_size) { int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; int index = row * col_size + col; _C[index] = 0; if (row >= row_size || col >= col_size) { } else for (int k = 0; k < k_size; k++) _C[index] += _A[row*k_size + k] * _B[k * col_size + col]; } __global__ void matMul_kernel_previous_lab(float *_A, float *_B, float *_C, int row_size, int k_size, int col_size) { int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; int index = row * col_size + col; __shared__ float sA[4][K_SIZE]; //8kb(2048*4) * 4 = 32 __shared__ float sB[K_SIZE][2]; //8kb * 2 = 16 for (int k = 0; k < K_SIZE; k++) { sA[threadIdx.y][k] = _A[row*K_SIZE + k]; sB[k][threadIdx.x] = _B[col + col_size * k]; } __syncthreads(); _C[index] = 0; if (row >= row_size || col >= col_size) { } else for (int k = 0; k < k_size; k++) _C[index] += sA[threadIdx.y][k] * sB[k][threadIdx.x]; } __global__ void matMul_kernel_shared(float *_A, float *_B, float *_C, int row_size, int k_size, int col_size) { int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; int index = row * col_size + col; __shared__ float sA[16][16]; __shared__ float sB[16][16]; int localRow = threadIdx.y; int localCol = threadIdx.x; float val = 0; for (int bID = 0; bID < ceil((float)k_size / 16); bID++) { int offset = bID * 16; if (row >= row_size || offset + localCol >= k_size) sA[localRow][localCol] = 0; else sA[localRow][localCol] = _A[row*K_SIZE + offset + localCol]; if (col >= col_size || offset + localRow >= k_size) sB[localRow][localCol] = 0; else sB[localRow][localCol] = _B[(offset + localRow)*col_size + col]; __syncthreads(); for (int k = 0; k < 16; k++) { val += __fmul_rn(sA[localRow][k], sB[k][localCol]); } __syncthreads(); } if (row >= row_size || col >= col_size) { } _C[index] = val; } __global__ void matMul_kernel_shared_ver2(float *_A, float *_B, float *_C, int row_size, int k_size, int col_size) { int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; int index = row * col_size + col; __shared__ float sA[16][16]; __shared__ float sB[16][16]; int localRow = threadIdx.y; int localCol = threadIdx.x; float val = 0; for (int bID = 0; bID < ceil((float)k_size / 16); bID++) { int offset = bID * 16; if (row >= row_size || offset + localCol >= k_size) sA[localCol][localRow] = 0; else sA[localCol][localRow] = _A[row*K_SIZE + offset + localCol]; if (col >= col_size || offset + localRow >= k_size) sB[localRow][localCol] = 0; else sB[localRow][localCol] = _B[(offset + localRow)*col_size + col]; __syncthreads(); for (int k = 0; k < 16; k++) { val += __fmul_rn(sA[k][localRow], sB[k][localCol]); } __syncthreads(); } if (row >= row_size || col >= col_size) { } _C[index] = val; } int main() { timer->initTimers(); timer->setTimerName(0, "host calc matrix "); timer->setTimerName(1, "gpu calc matrix "); timer->setTimerName(2, "gpu calc matrix (shared memory previous model)"); timer->setTimerName(3, "gpu calc matrix (shared memory)"); timer->setTimerName(4, "gpu calc matrix (shared memory ver2)"); timer->setTimerName(5, "memcpy host to device "); timer->setTimerName(6, "memcpy device to host "); float *matrixA = matrixGen(M_SIZE, K_SIZE); float *matrixB = matrixGen(K_SIZE, N_SIZE); std::cout << "gen" << std::endl; timer->onTimer(0); float *hostMatrix = matrixMultiCPU(matrixA, matrixB, M_SIZE, K_SIZE, N_SIZE); timer->offTimer(0); std::cout << "host calc is end!" << std::endl; float *gpuMatrix = matrixMultiGPU(matrixA, matrixB, M_SIZE, K_SIZE, N_SIZE); std::cout << "gpu calc is end!" << std::endl; if (matrixEqual(hostMatrix, gpuMatrix, M_SIZE, N_SIZE)) std::cout << "Two Matrix is equal!" << std::endl; else { std::cout << "Two Matrix is not equal!" << std::endl; } timer->printTimer(); delete[] matrixA; delete[] matrixB; return 0; } float* matrixGen(int m, int n) { float *newMatrix = new float[m*n]; #pragma omp parallel for for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { newMatrix[i * n + j] = rand() /RAND_MAX * 10; } } return newMatrix; } float* matrixMultiCPU(float *matrixA, float *matrixB, int row_size, int k_size, int col_size) { float *newMatrix = new float[row_size*col_size]; #pragma omp parallel for for (int i = 0; i < row_size; i++) { for (int j = 0; j < col_size; j++) { newMatrix[i * col_size + j] = 0.0; for (int k = 0; k < k_size; k++) { newMatrix[i * col_size + j] += matrixA[i * k_size + k] * matrixB[k * col_size + j]; } } } return newMatrix; } bool matrixEqual(float *matrixA, float *matrixB, int row_size, int col_size) { int size = row_size * col_size; for (int i = 0; i < size; i++) { float diff = matrixA[i] - matrixB[i]; if (fabs(diff) > 0.00001) { std::cout << diff << std::endl; std::cout << "matrixA[" << i << "] : " << matrixA[i] << " != matrixB[" << i << "] : " << matrixB[i] << std::endl; return false; } } return true; } float* matrixMultiGPU(float *matrixA, float *matrixB, int row_size, int k_size, int col_size) { float *newMatrix = new float[row_size*col_size]; float *dA = NULL, *dB = NULL, *dC = NULL; //device memoryAlloc hipMalloc(&dA, sizeof(float)*row_size*k_size); hipMalloc(&dB, sizeof(float)*col_size*k_size); hipMalloc(&dC, sizeof(float)*row_size*col_size); timer->onTimer(5); //cpy matrix data h to d hipMemcpy(dA, matrixA, sizeof(float)*row_size*k_size, hipMemcpyHostToDevice); hipMemcpy(dB, matrixB, sizeof(float)*k_size*col_size, hipMemcpyHostToDevice); timer->offTimer(5); dim3 blockDim(16, 16); //256 threads per block dim3 gridDim(ceil((float)col_size / 16.0), ceil((float)row_size / 16.0)); timer->onTimer(1); matMul_kernel << <gridDim, blockDim >> > (dA, dB, dC, row_size, k_size, col_size); hipDeviceSynchronize(); timer->offTimer(1); dim3 blockDim2(2, 4); //8 threads per block dim3 gridDim2(ceil((float)col_size / 2.0), ceil((float)row_size / 4.0)); timer->onTimer(2); matMul_kernel_previous_lab << <gridDim2, blockDim2 >> > (dA, dB, dC, row_size, k_size, col_size); hipDeviceSynchronize(); timer->offTimer(2); timer->onTimer(3); matMul_kernel_shared << <gridDim, blockDim >> > (dA, dB, dC, row_size, k_size, col_size); hipDeviceSynchronize(); timer->offTimer(3); timer->onTimer(4); matMul_kernel_shared_ver2 << <gridDim, blockDim >> > (dA, dB, dC, row_size, k_size, col_size); hipDeviceSynchronize(); timer->offTimer(4); timer->onTimer(6); hipMemcpy(newMatrix, dC, sizeof(float)*row_size*col_size, hipMemcpyDeviceToHost); timer->offTimer(6); hipFree(&dA); hipFree(&dB); hipFree(&dC); return newMatrix; }
4644e129f5b2c392fce9a78dbf9cb3f1fa11b9cb.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include "cuda.h" #include "DS_timer.h" #include <iostream> #include <stdio.h> #include <omp.h> #include <math.h> #ifdef __INTELLISENSE__ void __syncthreads(); #endif #define M_SIZE 2048 #define N_SIZE 2048 #define K_SIZE 2048 DS_timer *timer = new DS_timer(7); float* matrixGen(int m, int n); float* matrixMultiCPU(float *matrixA, float *matrixB, int row_size, int k_size, int col_size); float* matrixMultiGPU(float *matrixA, float *matrixB, int row_size, int k_size, int col_size); bool matrixEqual(float *matrixA, float *matrixB, int row_size, int col_size); __global__ void matMul_kernel(float *_A, float *_B, float *_C, int row_size, int k_size, int col_size) { int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; int index = row * col_size + col; _C[index] = 0; if (row >= row_size || col >= col_size) { } else for (int k = 0; k < k_size; k++) _C[index] += _A[row*k_size + k] * _B[k * col_size + col]; } __global__ void matMul_kernel_previous_lab(float *_A, float *_B, float *_C, int row_size, int k_size, int col_size) { int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; int index = row * col_size + col; __shared__ float sA[4][K_SIZE]; //8kb(2048*4) * 4 = 32 __shared__ float sB[K_SIZE][2]; //8kb * 2 = 16 for (int k = 0; k < K_SIZE; k++) { sA[threadIdx.y][k] = _A[row*K_SIZE + k]; sB[k][threadIdx.x] = _B[col + col_size * k]; } __syncthreads(); _C[index] = 0; if (row >= row_size || col >= col_size) { } else for (int k = 0; k < k_size; k++) _C[index] += sA[threadIdx.y][k] * sB[k][threadIdx.x]; } __global__ void matMul_kernel_shared(float *_A, float *_B, float *_C, int row_size, int k_size, int col_size) { int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; int index = row * col_size + col; __shared__ float sA[16][16]; __shared__ float sB[16][16]; int localRow = threadIdx.y; int localCol = threadIdx.x; float val = 0; for (int bID = 0; bID < ceil((float)k_size / 16); bID++) { int offset = bID * 16; if (row >= row_size || offset + localCol >= k_size) sA[localRow][localCol] = 0; else sA[localRow][localCol] = _A[row*K_SIZE + offset + localCol]; if (col >= col_size || offset + localRow >= k_size) sB[localRow][localCol] = 0; else sB[localRow][localCol] = _B[(offset + localRow)*col_size + col]; __syncthreads(); for (int k = 0; k < 16; k++) { val += __fmul_rn(sA[localRow][k], sB[k][localCol]); } __syncthreads(); } if (row >= row_size || col >= col_size) { } _C[index] = val; } __global__ void matMul_kernel_shared_ver2(float *_A, float *_B, float *_C, int row_size, int k_size, int col_size) { int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; int index = row * col_size + col; __shared__ float sA[16][16]; __shared__ float sB[16][16]; int localRow = threadIdx.y; int localCol = threadIdx.x; float val = 0; for (int bID = 0; bID < ceil((float)k_size / 16); bID++) { int offset = bID * 16; if (row >= row_size || offset + localCol >= k_size) sA[localCol][localRow] = 0; else sA[localCol][localRow] = _A[row*K_SIZE + offset + localCol]; if (col >= col_size || offset + localRow >= k_size) sB[localRow][localCol] = 0; else sB[localRow][localCol] = _B[(offset + localRow)*col_size + col]; __syncthreads(); for (int k = 0; k < 16; k++) { val += __fmul_rn(sA[k][localRow], sB[k][localCol]); } __syncthreads(); } if (row >= row_size || col >= col_size) { } _C[index] = val; } int main() { timer->initTimers(); timer->setTimerName(0, "host calc matrix "); timer->setTimerName(1, "gpu calc matrix "); timer->setTimerName(2, "gpu calc matrix (shared memory previous model)"); timer->setTimerName(3, "gpu calc matrix (shared memory)"); timer->setTimerName(4, "gpu calc matrix (shared memory ver2)"); timer->setTimerName(5, "memcpy host to device "); timer->setTimerName(6, "memcpy device to host "); float *matrixA = matrixGen(M_SIZE, K_SIZE); float *matrixB = matrixGen(K_SIZE, N_SIZE); std::cout << "gen" << std::endl; timer->onTimer(0); float *hostMatrix = matrixMultiCPU(matrixA, matrixB, M_SIZE, K_SIZE, N_SIZE); timer->offTimer(0); std::cout << "host calc is end!" << std::endl; float *gpuMatrix = matrixMultiGPU(matrixA, matrixB, M_SIZE, K_SIZE, N_SIZE); std::cout << "gpu calc is end!" << std::endl; if (matrixEqual(hostMatrix, gpuMatrix, M_SIZE, N_SIZE)) std::cout << "Two Matrix is equal!" << std::endl; else { std::cout << "Two Matrix is not equal!" << std::endl; } timer->printTimer(); delete[] matrixA; delete[] matrixB; return 0; } float* matrixGen(int m, int n) { float *newMatrix = new float[m*n]; #pragma omp parallel for for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { newMatrix[i * n + j] = rand() /RAND_MAX * 10; } } return newMatrix; } float* matrixMultiCPU(float *matrixA, float *matrixB, int row_size, int k_size, int col_size) { float *newMatrix = new float[row_size*col_size]; #pragma omp parallel for for (int i = 0; i < row_size; i++) { for (int j = 0; j < col_size; j++) { newMatrix[i * col_size + j] = 0.0; for (int k = 0; k < k_size; k++) { newMatrix[i * col_size + j] += matrixA[i * k_size + k] * matrixB[k * col_size + j]; } } } return newMatrix; } bool matrixEqual(float *matrixA, float *matrixB, int row_size, int col_size) { int size = row_size * col_size; for (int i = 0; i < size; i++) { float diff = matrixA[i] - matrixB[i]; if (fabs(diff) > 0.00001) { std::cout << diff << std::endl; std::cout << "matrixA[" << i << "] : " << matrixA[i] << " != matrixB[" << i << "] : " << matrixB[i] << std::endl; return false; } } return true; } float* matrixMultiGPU(float *matrixA, float *matrixB, int row_size, int k_size, int col_size) { float *newMatrix = new float[row_size*col_size]; float *dA = NULL, *dB = NULL, *dC = NULL; //device memoryAlloc cudaMalloc(&dA, sizeof(float)*row_size*k_size); cudaMalloc(&dB, sizeof(float)*col_size*k_size); cudaMalloc(&dC, sizeof(float)*row_size*col_size); timer->onTimer(5); //cpy matrix data h to d cudaMemcpy(dA, matrixA, sizeof(float)*row_size*k_size, cudaMemcpyHostToDevice); cudaMemcpy(dB, matrixB, sizeof(float)*k_size*col_size, cudaMemcpyHostToDevice); timer->offTimer(5); dim3 blockDim(16, 16); //256 threads per block dim3 gridDim(ceil((float)col_size / 16.0), ceil((float)row_size / 16.0)); timer->onTimer(1); matMul_kernel << <gridDim, blockDim >> > (dA, dB, dC, row_size, k_size, col_size); cudaThreadSynchronize(); timer->offTimer(1); dim3 blockDim2(2, 4); //8 threads per block dim3 gridDim2(ceil((float)col_size / 2.0), ceil((float)row_size / 4.0)); timer->onTimer(2); matMul_kernel_previous_lab << <gridDim2, blockDim2 >> > (dA, dB, dC, row_size, k_size, col_size); cudaThreadSynchronize(); timer->offTimer(2); timer->onTimer(3); matMul_kernel_shared << <gridDim, blockDim >> > (dA, dB, dC, row_size, k_size, col_size); cudaThreadSynchronize(); timer->offTimer(3); timer->onTimer(4); matMul_kernel_shared_ver2 << <gridDim, blockDim >> > (dA, dB, dC, row_size, k_size, col_size); cudaThreadSynchronize(); timer->offTimer(4); timer->onTimer(6); cudaMemcpy(newMatrix, dC, sizeof(float)*row_size*col_size, cudaMemcpyDeviceToHost); timer->offTimer(6); cudaFree(&dA); cudaFree(&dB); cudaFree(&dC); return newMatrix; }
27a1af99185210fde16b4c77d1edf2444ab6b070.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <rocblas.h> #define M 16 #define N 128 #define K 512 int main(){ float *d_a, *d_b, *d_c; const float alpha = 1.0f; const float beta = 0.0f; hipMalloc(&d_b, K*M*sizeof(float)); hipMalloc(&d_c, N*M*sizeof(float)); hipMalloc(&d_a, N*K*sizeof(float)); hipblasHandle_t my_handle; hipblasStatus_t my_status = hipblasCreate(&my_handle); if (my_status != HIPBLAS_STATUS_SUCCESS) {printf("handle failure %d\n", (int)my_status); return 1;} hipMemset(d_a, 0, N*K*sizeof(float)); hipMemset(d_b, 0, K*M*sizeof(float)); my_status = hipblasSgemm(my_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, d_b, M, d_a, K, &beta, d_c, M); if (my_status != HIPBLAS_STATUS_SUCCESS) {printf("Sgemm failure %d\n", (int)my_status); return 1;} printf("Success\n"); return 0; }
27a1af99185210fde16b4c77d1edf2444ab6b070.cu
#include <stdio.h> #include <cublas_v2.h> #define M 16 #define N 128 #define K 512 int main(){ float *d_a, *d_b, *d_c; const float alpha = 1.0f; const float beta = 0.0f; cudaMalloc(&d_b, K*M*sizeof(float)); cudaMalloc(&d_c, N*M*sizeof(float)); cudaMalloc(&d_a, N*K*sizeof(float)); cublasHandle_t my_handle; cublasStatus_t my_status = cublasCreate(&my_handle); if (my_status != CUBLAS_STATUS_SUCCESS) {printf("handle failure %d\n", (int)my_status); return 1;} cudaMemset(d_a, 0, N*K*sizeof(float)); cudaMemset(d_b, 0, K*M*sizeof(float)); my_status = cublasSgemm(my_handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, d_b, M, d_a, K, &beta, d_c, M); if (my_status != CUBLAS_STATUS_SUCCESS) {printf("Sgemm failure %d\n", (int)my_status); return 1;} printf("Success\n"); return 0; }
b39e4468d6c295f1e35982ac0f09c8166171e565.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/eltwise_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Dtype> void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* mutable_coeff = NULL; switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; case EltwiseParameter_EltwiseOp_SORT: caffe_gpu_set(count, Dtype(1), top_data); for (int i = 0; i < bottom.size(); ++i) { caffe_copy(count, bottom[i]->gpu_data(), sort_temp_.mutable_gpu_data()); caffe_gpu_add_scalar(count, Dtype(1), sort_temp_.mutable_gpu_data()); caffe_gpu_mul(count, top_data, sort_temp_.gpu_data(), top_data); } caffe_gpu_add_scalar(count, Dtype(-1), top_data); break; case EltwiseParameter_EltwiseOp_WEIGHTEDSUM: mutable_coeff = this->blobs_[0]->gpu_data(); caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, mutable_coeff[i], bottom[i]->gpu_data(), top_data); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Dtype> void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; Dtype* mutable_coeff_diff = NULL; const Dtype* mutable_coeff_data = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { caffe_copy(count, top_diff, bottom_diff); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, i, mask, bottom_diff); break; case EltwiseParameter_EltwiseOp_SORT: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); caffe_gpu_add_scalar(count, Dtype(1), bottom_diff); initialized = true; } else { caffe_copy(count, bottom[j]->gpu_data(), sort_temp_.mutable_gpu_data()); caffe_gpu_add_scalar(count, Dtype(1), sort_temp_.mutable_gpu_data()); caffe_gpu_mul(count, sort_temp_.gpu_data(), bottom_diff, bottom_diff); } } } else { /*caffe_copy(count, bottom_data, sort_temp_.mutable_gpu_data()); caffe_gpu_add_scalar(count, Dtype(1), sort_temp_.mutable_gpu_data()); caffe_gpu_div(count, top_data, sort_temp_.gpu_data(), bottom_diff);*/ NOT_IMPLEMENTED; } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_WEIGHTEDSUM: mutable_coeff_diff = this->blobs_[0]->mutable_gpu_diff(); mutable_coeff_data = this->blobs_[0]->gpu_data(); caffe_gpu_scale(count, mutable_coeff_data[i], top_diff, bottom_diff); caffe_gpu_dot(count, top_diff, bottom_data, &mutable_coeff_diff[i]); mutable_coeff_diff[i] /= Dtype(bottom[0]->num()); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer); } // namespace caffe
b39e4468d6c295f1e35982ac0f09c8166171e565.cu
#include <cfloat> #include <vector> #include "caffe/layers/eltwise_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Dtype> void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* mutable_coeff = NULL; switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; case EltwiseParameter_EltwiseOp_SORT: caffe_gpu_set(count, Dtype(1), top_data); for (int i = 0; i < bottom.size(); ++i) { caffe_copy(count, bottom[i]->gpu_data(), sort_temp_.mutable_gpu_data()); caffe_gpu_add_scalar(count, Dtype(1), sort_temp_.mutable_gpu_data()); caffe_gpu_mul(count, top_data, sort_temp_.gpu_data(), top_data); } caffe_gpu_add_scalar(count, Dtype(-1), top_data); break; case EltwiseParameter_EltwiseOp_WEIGHTEDSUM: mutable_coeff = this->blobs_[0]->gpu_data(); caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, mutable_coeff[i], bottom[i]->gpu_data(), top_data); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Dtype> void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; Dtype* mutable_coeff_diff = NULL; const Dtype* mutable_coeff_data = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { caffe_copy(count, top_diff, bottom_diff); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, i, mask, bottom_diff); break; case EltwiseParameter_EltwiseOp_SORT: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); caffe_gpu_add_scalar(count, Dtype(1), bottom_diff); initialized = true; } else { caffe_copy(count, bottom[j]->gpu_data(), sort_temp_.mutable_gpu_data()); caffe_gpu_add_scalar(count, Dtype(1), sort_temp_.mutable_gpu_data()); caffe_gpu_mul(count, sort_temp_.gpu_data(), bottom_diff, bottom_diff); } } } else { /*caffe_copy(count, bottom_data, sort_temp_.mutable_gpu_data()); caffe_gpu_add_scalar(count, Dtype(1), sort_temp_.mutable_gpu_data()); caffe_gpu_div(count, top_data, sort_temp_.gpu_data(), bottom_diff);*/ NOT_IMPLEMENTED; } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_WEIGHTEDSUM: mutable_coeff_diff = this->blobs_[0]->mutable_gpu_diff(); mutable_coeff_data = this->blobs_[0]->gpu_data(); caffe_gpu_scale(count, mutable_coeff_data[i], top_diff, bottom_diff); caffe_gpu_dot(count, top_diff, bottom_data, &mutable_coeff_diff[i]); mutable_coeff_diff[i] /= Dtype(bottom[0]->num()); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer); } // namespace caffe
eabe8326bbb953cec35ca56012853beafcf925a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_utils.cuh" #include "kernel_utils.cuh" #include "math_utils.cuh" #include "verlet_integrator.hpp" #include "kernels/k_integrator.cuh" namespace timemachine { VelocityVerletIntegrator::VelocityVerletIntegrator(int N, double dt, const double *h_cbs) : N_(N), dt_(dt), initialized_(false), runner_() { d_cbs_ = gpuErrchkCudaMallocAndCopy(h_cbs, N); cudaSafeMalloc(&d_du_dx_, N * 3 * sizeof(*d_du_dx_)); } VelocityVerletIntegrator::~VelocityVerletIntegrator() { gpuErrchk(hipFree(d_cbs_)); gpuErrchk(hipFree(d_du_dx_)); } void VelocityVerletIntegrator::step_fwd( std::vector<std::shared_ptr<BoundPotential>> &bps, double *d_x_t, double *d_v_t, double *d_box_t, unsigned int *d_idxs, hipStream_t stream) { gpuErrchk(hipMemsetAsync(d_du_dx_, 0, N_ * 3 * sizeof(*d_du_dx_), stream)); const int D = 3; size_t tpb = DEFAULT_THREADS_PER_BLOCK; size_t n_blocks = ceil_divide(N_, tpb); dim3 dimGrid_dx(n_blocks, D); for (int i = 0; i < bps.size(); i++) { bps[i]->execute_device(N_, d_x_t, d_box_t, d_du_dx_, nullptr, nullptr, stream); } hipLaunchKernelGGL(( update_forward_velocity_verlet<double>) , dim3(dimGrid_dx), dim3(tpb), 0, stream, N_, D, d_idxs, d_cbs_, d_x_t, d_v_t, d_du_dx_, dt_); gpuErrchk(hipPeekAtLastError()); } void VelocityVerletIntegrator::initialize( std::vector<std::shared_ptr<BoundPotential>> &bps, double *d_x_t, double *d_v_t, double *d_box_t, unsigned int *d_idxs, hipStream_t stream) { if (initialized_) { throw std::runtime_error("initialized twice"); } gpuErrchk(hipMemsetAsync(d_du_dx_, 0, N_ * 3 * sizeof(*d_du_dx_), stream)); const int D = 3; size_t tpb = DEFAULT_THREADS_PER_BLOCK; size_t n_blocks = ceil_divide(N_, tpb); dim3 dimGrid_dx(n_blocks, D); runner_.execute_potentials( bps, N_, d_x_t, d_box_t, d_du_dx_, // we only need the forces nullptr, nullptr, stream); hipLaunchKernelGGL(( half_step_velocity_verlet<double, true>) , dim3(dimGrid_dx), dim3(tpb), 0, stream, N_, D, d_idxs, d_cbs_, d_x_t, d_v_t, d_du_dx_, dt_); gpuErrchk(hipPeekAtLastError()); initialized_ = true; }; void VelocityVerletIntegrator::finalize( std::vector<std::shared_ptr<BoundPotential>> &bps, double *d_x_t, double *d_v_t, double *d_box_t, unsigned int *d_idxs, hipStream_t stream) { if (!initialized_) { throw std::runtime_error("not initialized"); } gpuErrchk(hipMemsetAsync(d_du_dx_, 0, N_ * 3 * sizeof(*d_du_dx_), stream)); const int D = 3; size_t tpb = DEFAULT_THREADS_PER_BLOCK; size_t n_blocks = ceil_divide(N_, tpb); dim3 dimGrid_dx(n_blocks, D); for (int i = 0; i < bps.size(); i++) { bps[i]->execute_device( N_, d_x_t, d_box_t, d_du_dx_, // we only need the forces nullptr, nullptr, stream); } hipLaunchKernelGGL(( half_step_velocity_verlet<double, false>) , dim3(dimGrid_dx), dim3(tpb), 0, stream, N_, D, d_idxs, d_cbs_, d_x_t, d_v_t, d_du_dx_, dt_); gpuErrchk(hipPeekAtLastError()); initialized_ = false; }; } // end namespace timemachine
eabe8326bbb953cec35ca56012853beafcf925a2.cu
#include "gpu_utils.cuh" #include "kernel_utils.cuh" #include "math_utils.cuh" #include "verlet_integrator.hpp" #include "kernels/k_integrator.cuh" namespace timemachine { VelocityVerletIntegrator::VelocityVerletIntegrator(int N, double dt, const double *h_cbs) : N_(N), dt_(dt), initialized_(false), runner_() { d_cbs_ = gpuErrchkCudaMallocAndCopy(h_cbs, N); cudaSafeMalloc(&d_du_dx_, N * 3 * sizeof(*d_du_dx_)); } VelocityVerletIntegrator::~VelocityVerletIntegrator() { gpuErrchk(cudaFree(d_cbs_)); gpuErrchk(cudaFree(d_du_dx_)); } void VelocityVerletIntegrator::step_fwd( std::vector<std::shared_ptr<BoundPotential>> &bps, double *d_x_t, double *d_v_t, double *d_box_t, unsigned int *d_idxs, cudaStream_t stream) { gpuErrchk(cudaMemsetAsync(d_du_dx_, 0, N_ * 3 * sizeof(*d_du_dx_), stream)); const int D = 3; size_t tpb = DEFAULT_THREADS_PER_BLOCK; size_t n_blocks = ceil_divide(N_, tpb); dim3 dimGrid_dx(n_blocks, D); for (int i = 0; i < bps.size(); i++) { bps[i]->execute_device(N_, d_x_t, d_box_t, d_du_dx_, nullptr, nullptr, stream); } update_forward_velocity_verlet<double> <<<dimGrid_dx, tpb, 0, stream>>>(N_, D, d_idxs, d_cbs_, d_x_t, d_v_t, d_du_dx_, dt_); gpuErrchk(cudaPeekAtLastError()); } void VelocityVerletIntegrator::initialize( std::vector<std::shared_ptr<BoundPotential>> &bps, double *d_x_t, double *d_v_t, double *d_box_t, unsigned int *d_idxs, cudaStream_t stream) { if (initialized_) { throw std::runtime_error("initialized twice"); } gpuErrchk(cudaMemsetAsync(d_du_dx_, 0, N_ * 3 * sizeof(*d_du_dx_), stream)); const int D = 3; size_t tpb = DEFAULT_THREADS_PER_BLOCK; size_t n_blocks = ceil_divide(N_, tpb); dim3 dimGrid_dx(n_blocks, D); runner_.execute_potentials( bps, N_, d_x_t, d_box_t, d_du_dx_, // we only need the forces nullptr, nullptr, stream); half_step_velocity_verlet<double, true> <<<dimGrid_dx, tpb, 0, stream>>>(N_, D, d_idxs, d_cbs_, d_x_t, d_v_t, d_du_dx_, dt_); gpuErrchk(cudaPeekAtLastError()); initialized_ = true; }; void VelocityVerletIntegrator::finalize( std::vector<std::shared_ptr<BoundPotential>> &bps, double *d_x_t, double *d_v_t, double *d_box_t, unsigned int *d_idxs, cudaStream_t stream) { if (!initialized_) { throw std::runtime_error("not initialized"); } gpuErrchk(cudaMemsetAsync(d_du_dx_, 0, N_ * 3 * sizeof(*d_du_dx_), stream)); const int D = 3; size_t tpb = DEFAULT_THREADS_PER_BLOCK; size_t n_blocks = ceil_divide(N_, tpb); dim3 dimGrid_dx(n_blocks, D); for (int i = 0; i < bps.size(); i++) { bps[i]->execute_device( N_, d_x_t, d_box_t, d_du_dx_, // we only need the forces nullptr, nullptr, stream); } half_step_velocity_verlet<double, false> <<<dimGrid_dx, tpb, 0, stream>>>(N_, D, d_idxs, d_cbs_, d_x_t, d_v_t, d_du_dx_, dt_); gpuErrchk(cudaPeekAtLastError()); initialized_ = false; }; } // end namespace timemachine
9304a84ae586b88c3799d3b63e10065015498bc0.hip
// !!! This is a file automatically generated by hipify!!! #include <benchmark/benchmark.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "graphic/kernels/trace.h" #include "graphic/render/shading.h" #include "management/window.h" #include "management/world.h" #include <chrono> #include <thread> void raytrace_many_cuda(hipSurfaceObject_t& Surface, const camera& c, const triangle* Triangles, std::size_t TriangleCount) { dim3 dimBlock(32, 32); dim3 dimGrid((c.width() + dimBlock.x) / dimBlock.x, (c.height() + dimBlock.y) / dimBlock.y); hipLaunchKernelGGL(( trace_many_triangles_with_camera), dim3(dimGrid), dim3(dimBlock), 0, 0, Surface, c, {Triangles, TriangleCount}, c.width(), c.height()); hipDeviceSynchronize(); } static void BM_SceneRender(benchmark::State& state) { window win(800, 600, "Material Scene"); auto w = win.getWindow(); glfwMakeContextCurrent(w); camera c(win.getWidth(), win.getHeight(), {0.0f, 0.5f, 2.5f}, {0.01f, 0.f, -1.f}); surface_raii render_surface(win.getWidth(), win.getHeight()); world_geometry scene("material_scene.obj"); state.counters["vertices"] = scene.vertex_count(); state.counters["normals"] = scene.normal_count(); state.counters["triangles"] = scene.triangle_count(); state.counters["lights"] = 4; // Light Setup similar to blender (position and stuff taken from there) float spec[3] = {0.8f, 0.8f, 0.8f}; float diff[3] = {0.8f, 0.8f, 0.8f}; scene.add_light(phong_light(spec, diff), {0.8f, 0.9f, 1.5f}); scene.add_light(phong_light(spec, diff), {1.7f, -1.1f, -0.3f}); scene.add_light(phong_light(spec, diff), {-1.3f, 0.8f, 2.0f}); scene.add_light(phong_light(spec, diff), {-1.7f, -1.7f, 0.8f}); const auto& triangles = scene.triangles(); while (state.KeepRunning()) { render_flat<no_shadow_tag>(render_surface.getSurface(), c, scene.handle()); } render_surface.render_gl_texture(); render_surface.save_as_png("material_scene.png"); } static void BM_SceneDepth(benchmark::State& state) { window win(800, 600, "Material Scene"); auto w = win.getWindow(); glfwMakeContextCurrent(w); camera c(win.getWidth(), win.getHeight(), {0.0f, 0.5f, 2.5f}, {0.01f, 0.f, -1.f}); surface_raii render_surface(win.getWidth(), win.getHeight()); world_geometry scene("material_scene.obj"); state.counters["vertices"] = scene.vertex_count(); state.counters["normals"] = scene.normal_count(); state.counters["triangles"] = scene.triangle_count(); const auto& triangles = scene.triangles(); while (state.KeepRunning()) { raytrace_many_cuda(render_surface.getSurface(), c, triangles.data().get(), triangles.size()); } render_surface.render_gl_texture(); render_surface.save_as_png("material_depth.png"); } BENCHMARK(BM_SceneRender)->Unit(benchmark::kMicrosecond)->MinTime(1.0); BENCHMARK(BM_SceneDepth)->Unit(benchmark::kMicrosecond)->MinTime(1.0); BENCHMARK_MAIN()
9304a84ae586b88c3799d3b63e10065015498bc0.cu
#include <benchmark/benchmark.h> #include <cuda.h> #include <cuda_runtime.h> #include "graphic/kernels/trace.h" #include "graphic/render/shading.h" #include "management/window.h" #include "management/world.h" #include <chrono> #include <thread> void raytrace_many_cuda(cudaSurfaceObject_t& Surface, const camera& c, const triangle* Triangles, std::size_t TriangleCount) { dim3 dimBlock(32, 32); dim3 dimGrid((c.width() + dimBlock.x) / dimBlock.x, (c.height() + dimBlock.y) / dimBlock.y); trace_many_triangles_with_camera<<<dimGrid, dimBlock>>>( Surface, c, {Triangles, TriangleCount}, c.width(), c.height()); cudaDeviceSynchronize(); } static void BM_SceneRender(benchmark::State& state) { window win(800, 600, "Material Scene"); auto w = win.getWindow(); glfwMakeContextCurrent(w); camera c(win.getWidth(), win.getHeight(), {0.0f, 0.5f, 2.5f}, {0.01f, 0.f, -1.f}); surface_raii render_surface(win.getWidth(), win.getHeight()); world_geometry scene("material_scene.obj"); state.counters["vertices"] = scene.vertex_count(); state.counters["normals"] = scene.normal_count(); state.counters["triangles"] = scene.triangle_count(); state.counters["lights"] = 4; // Light Setup similar to blender (position and stuff taken from there) float spec[3] = {0.8f, 0.8f, 0.8f}; float diff[3] = {0.8f, 0.8f, 0.8f}; scene.add_light(phong_light(spec, diff), {0.8f, 0.9f, 1.5f}); scene.add_light(phong_light(spec, diff), {1.7f, -1.1f, -0.3f}); scene.add_light(phong_light(spec, diff), {-1.3f, 0.8f, 2.0f}); scene.add_light(phong_light(spec, diff), {-1.7f, -1.7f, 0.8f}); const auto& triangles = scene.triangles(); while (state.KeepRunning()) { render_flat<no_shadow_tag>(render_surface.getSurface(), c, scene.handle()); } render_surface.render_gl_texture(); render_surface.save_as_png("material_scene.png"); } static void BM_SceneDepth(benchmark::State& state) { window win(800, 600, "Material Scene"); auto w = win.getWindow(); glfwMakeContextCurrent(w); camera c(win.getWidth(), win.getHeight(), {0.0f, 0.5f, 2.5f}, {0.01f, 0.f, -1.f}); surface_raii render_surface(win.getWidth(), win.getHeight()); world_geometry scene("material_scene.obj"); state.counters["vertices"] = scene.vertex_count(); state.counters["normals"] = scene.normal_count(); state.counters["triangles"] = scene.triangle_count(); const auto& triangles = scene.triangles(); while (state.KeepRunning()) { raytrace_many_cuda(render_surface.getSurface(), c, triangles.data().get(), triangles.size()); } render_surface.render_gl_texture(); render_surface.save_as_png("material_depth.png"); } BENCHMARK(BM_SceneRender)->Unit(benchmark::kMicrosecond)->MinTime(1.0); BENCHMARK(BM_SceneDepth)->Unit(benchmark::kMicrosecond)->MinTime(1.0); BENCHMARK_MAIN()
e29b80cedff24aa73469c3053b9c20667ee5f05c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(numRows/16+1, numCols/16+1, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize( 16, 16, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, h_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth) // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
e29b80cedff24aa73469c3053b9c20667ee5f05c.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(numRows/16+1, numCols/16+1, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize( 16, 16, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(h_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth) // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
4aeb2f29edaeefb1bf2aba7610f7196a4a14f3fd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "copyAndRemove.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *errosArray = NULL; hipMalloc(&errosArray, XSIZE*YSIZE); unsigned int *_encodedPosition_d = NULL; hipMalloc(&_encodedPosition_d, XSIZE*YSIZE); float *_mismatch_d = NULL; hipMalloc(&_mismatch_d, XSIZE*YSIZE); const unsigned int i = 1; const float val = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( copyAndRemove), dim3(gridBlock),dim3(threadBlock), 0, 0, errosArray,_encodedPosition_d,_mismatch_d,i,val); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( copyAndRemove), dim3(gridBlock),dim3(threadBlock), 0, 0, errosArray,_encodedPosition_d,_mismatch_d,i,val); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( copyAndRemove), dim3(gridBlock),dim3(threadBlock), 0, 0, errosArray,_encodedPosition_d,_mismatch_d,i,val); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4aeb2f29edaeefb1bf2aba7610f7196a4a14f3fd.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "copyAndRemove.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *errosArray = NULL; cudaMalloc(&errosArray, XSIZE*YSIZE); unsigned int *_encodedPosition_d = NULL; cudaMalloc(&_encodedPosition_d, XSIZE*YSIZE); float *_mismatch_d = NULL; cudaMalloc(&_mismatch_d, XSIZE*YSIZE); const unsigned int i = 1; const float val = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); copyAndRemove<<<gridBlock,threadBlock>>>(errosArray,_encodedPosition_d,_mismatch_d,i,val); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { copyAndRemove<<<gridBlock,threadBlock>>>(errosArray,_encodedPosition_d,_mismatch_d,i,val); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { copyAndRemove<<<gridBlock,threadBlock>>>(errosArray,_encodedPosition_d,_mismatch_d,i,val); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
13b81c56038fdcd842be50ab02b43d7fcc98128e.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. /*! \file EwaldDriverPotentialPairGPU.cu \brief Defines the driver functions for computing all types of pair forces on the GPU */ #include "EvaluatorPairEwald.h" #include "AllDriverPotentialPairGPU.cuh" hipError_t gpu_compute_ewald_forces(const pair_args_t& pair_args, const Scalar2 *d_params) { return gpu_compute_pair_forces<EvaluatorPairEwald>(pair_args, d_params); }
13b81c56038fdcd842be50ab02b43d7fcc98128e.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. /*! \file EwaldDriverPotentialPairGPU.cu \brief Defines the driver functions for computing all types of pair forces on the GPU */ #include "EvaluatorPairEwald.h" #include "AllDriverPotentialPairGPU.cuh" cudaError_t gpu_compute_ewald_forces(const pair_args_t& pair_args, const Scalar2 *d_params) { return gpu_compute_pair_forces<EvaluatorPairEwald>(pair_args, d_params); }
8148c9430709a2f886a9de9de5b0bd2fc67c9e00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> // So, a __global__ to tell NVCC this is a device function for calling from the host // and a pointer to some memory to write the result to __global__ void addTwoNumbers(int x, int y, int *result) { *result = x + y; } int main(int argc, char** args) { // This is in system memory int cpuVisibleResult; // This is just an unitialized pointer int *gpuVisibleResult; // This sets that pointer to point at a memory location on device memory hipMalloc( (void**)&gpuVisibleResult, sizeof(int) ); // Call the method, ignore 1,1 for now hipLaunchKernelGGL(( addTwoNumbers), dim3(1),dim3(1), 0, 0, 2,7, gpuVisibleResult); // Download the result from the device to the host hipMemcpy( &cpuVisibleResult, gpuVisibleResult, sizeof(int), hipMemcpyDeviceToHost ); // Print the results printf( " 2 + 7 = %d\n", cpuVisibleResult); // Free up that memory on the device hipFree( gpuVisibleResult ); return 1; }
8148c9430709a2f886a9de9de5b0bd2fc67c9e00.cu
#include <stdio.h> // So, a __global__ to tell NVCC this is a device function for calling from the host // and a pointer to some memory to write the result to __global__ void addTwoNumbers(int x, int y, int *result) { *result = x + y; } int main(int argc, char** args) { // This is in system memory int cpuVisibleResult; // This is just an unitialized pointer int *gpuVisibleResult; // This sets that pointer to point at a memory location on device memory cudaMalloc( (void**)&gpuVisibleResult, sizeof(int) ); // Call the method, ignore 1,1 for now addTwoNumbers<<<1,1>>>(2,7, gpuVisibleResult); // Download the result from the device to the host cudaMemcpy( &cpuVisibleResult, gpuVisibleResult, sizeof(int), cudaMemcpyDeviceToHost ); // Print the results printf( " 2 + 7 = %d\n", cpuVisibleResult); // Free up that memory on the device cudaFree( gpuVisibleResult ); return 1; }
a0154933a45051a05187c3f5e882e4bd6da02757.hip
// !!! This is a file automatically generated by hipify!!! //https://fisica.cab.cnea.gov.ar/gpgpu/images/charlas/partec%20-%20programacinbasicoii.pdf #include <hip/hip_runtime.h> #include <iostream> using namespace std; __global__ void SumaColMatrizKernel(int M, float* Md, float* Nd) { __shared__ float Nds[512]; float Pvalue = 0; int aux = blockIdx.y * M + threadIdx.x * (M / blockDim.x); int aux2 = aux + (M / blockDim.x); for (int k = aux; k < aux2; ++k) { Pvalue = Pvalue + Md[k]; } Nds[threadIdx.x] = Pvalue; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < blockDim.x; ++i) { Nds[0] = Nds[0] + Nds[i]; } Nd[blockIdx.y] = Nds[0]; } } int main() { int f = 1024, c = 512; int size = f * c * sizeof(float); int size2 = c * sizeof(float); float* Mh = (float*)malloc(size); float* Nh = (float*)malloc(size2); for (int i = 0; i < f * c; i++) Mh[i] = 1.0; float* Md, * Nd; hipMalloc(&Md, size); hipMalloc(&Nd, size2); hipMemcpy(Md, Mh, size, hipMemcpyHostToDevice); hipMemset(Nd, 0, size2); int M = f; int N = c; int chunk = 32; dim3 tamGrid(1, N); dim3 tamBlock(M / chunk, 1, 1); SumaColMatrizKernel << <tamGrid, tamBlock >> > (M, Md, Nd); hipMemcpy(Nh, Nd, size2, hipMemcpyDeviceToHost); hipFree(Md); hipFree(Nd); cout << "\nResultados: " << endl; for (int i = 0; i < c; i++) { cout << Nh[i] << " "; } return 0; }
a0154933a45051a05187c3f5e882e4bd6da02757.cu
//https://fisica.cab.cnea.gov.ar/gpgpu/images/charlas/partec%20-%20programacinbasicoii.pdf #include <cuda_runtime.h> #include <iostream> using namespace std; __global__ void SumaColMatrizKernel(int M, float* Md, float* Nd) { __shared__ float Nds[512]; float Pvalue = 0; int aux = blockIdx.y * M + threadIdx.x * (M / blockDim.x); int aux2 = aux + (M / blockDim.x); for (int k = aux; k < aux2; ++k) { Pvalue = Pvalue + Md[k]; } Nds[threadIdx.x] = Pvalue; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < blockDim.x; ++i) { Nds[0] = Nds[0] + Nds[i]; } Nd[blockIdx.y] = Nds[0]; } } int main() { int f = 1024, c = 512; int size = f * c * sizeof(float); int size2 = c * sizeof(float); float* Mh = (float*)malloc(size); float* Nh = (float*)malloc(size2); for (int i = 0; i < f * c; i++) Mh[i] = 1.0; float* Md, * Nd; cudaMalloc(&Md, size); cudaMalloc(&Nd, size2); cudaMemcpy(Md, Mh, size, cudaMemcpyHostToDevice); cudaMemset(Nd, 0, size2); int M = f; int N = c; int chunk = 32; dim3 tamGrid(1, N); dim3 tamBlock(M / chunk, 1, 1); SumaColMatrizKernel << <tamGrid, tamBlock >> > (M, Md, Nd); cudaMemcpy(Nh, Nd, size2, cudaMemcpyDeviceToHost); cudaFree(Md); cudaFree(Nd); cout << "\nResultados: " << endl; for (int i = 0; i < c; i++) { cout << Nh[i] << " "; } return 0; }
4ccabb319a2993a0cc9bd398df605134999e5f33.hip
// !!! This is a file automatically generated by hipify!!! /* GPCA - A Cellular Automata library powered by CUDA. Copyright (C) 2011 Sam Gunaratne University of Plymouth This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.*/ #include "CellularAutomata_kernal.cu" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_vector_types.h> template<typename CAFunction> extern float CUDATimeStep(unsigned int* pFlatGrid, int DIM, CAFunction *func) { unsigned int *dev_pFlatGrid; //Pointers to device allocated memory int *dev_DIM; int *dev_born; //to bornNo int *dev_survive; //to surviveNo CAFunction *dev_func; int* tempBorn; int* tempSurv; hipEvent_t start,stop; //Events for timings //START: Record duration of GPGPU processing hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); size_t noCells = DIM * DIM * sizeof(unsigned int); //Might need to flatten the 2d array ormaybe try "int2" type //TODO fix this name size_t size = sizeof(CAFunction); //Allocate suitable size memory on device hipMalloc((void**) &dev_pFlatGrid, noCells); hipMalloc((void**) &dev_DIM, sizeof(int)); hipMalloc((void**) &dev_func, sizeof(CAFunction)); hipMalloc((void**) &dev_born, sizeof(int) * func->bornSize); hipMalloc((void**) &dev_survive, sizeof(int) * func->surviveSize); //Make our 2D grid of blocks & threads (DIM/No of threads) //One pixel is one thread. dim3 blocks (DIM/20,DIM/20); dim3 threads(20,20); //copy our two dynamic arrays hipMemcpy(dev_born, func->bornNo, sizeof(int) * func->bornSize, hipMemcpyHostToDevice); hipMemcpy(dev_survive, func->surviveNo, sizeof(int) * func->surviveSize, hipMemcpyHostToDevice); //We want to temporarily hold our pointers so we can reassign them after the object copy... tempBorn = func->bornNo; tempSurv = func->surviveNo; //reassign our pointers so we know where we put our dynamic arrays func->surviveNo = dev_survive; func->bornNo = dev_born; //Copy our memory from Host to Device hipMemcpy(dev_pFlatGrid, pFlatGrid, noCells, hipMemcpyHostToDevice); hipMemcpy(dev_DIM, &DIM, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_func, func, sizeof(CAFunction), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernal), dim3(blocks),dim3(threads), 0, 0, dev_pFlatGrid, dev_DIM, dev_func); //Copy back to host hipMemcpy(pFlatGrid, dev_pFlatGrid, noCells, hipMemcpyDeviceToHost); //Reassign our dynamic array pointers func->surviveNo = tempSurv; func->bornNo = tempBorn; //STOP : processing done hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedTime = 0; hipEventElapsedTime(&elapsedTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); //fix up states - normalize for (int i = 0; i < DIM; ++i) { for (int j = 0; j < DIM; ++j) { pFlatGrid[i * DIM +j] = pFlatGrid[i * DIM +j] >> func->getNoBits(); } } //Free memory on Device hipFree(dev_pFlatGrid); hipFree(dev_DIM); hipFree(dev_born); hipFree(dev_survive); hipFree(dev_func); return elapsedTime; } template<typename CAFunction> extern float CUDATimeStep3D(unsigned int* pFlatGrid, int DIM, CAFunction *func) { unsigned int *dev_pFlatGrid; //Pointers to device allocated memory int *dev_DIM; int *dev_born; //to bornNo int *dev_survive; //to surviveNo unsigned int* dev_neighCount; CAFunction *dev_func; int* tempBorn; int* tempSurv; unsigned int* tempNeigh; hipEvent_t start,stop; //Events for timings //START: Record duration of GPGPU processing hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); size_t noCells = DIM * DIM * DIM * sizeof(unsigned int); //Might need to flatten the 2d array ormaybe try "int2" type //TODO fix this name size_t size = sizeof(CAFunction); //Allocate suitable size memory on device hipMalloc((void**) &dev_pFlatGrid, noCells); hipMalloc((void**) &dev_DIM, sizeof(int)); hipMalloc((void**) &dev_func, sizeof(CAFunction)); hipMalloc((void**) &dev_born, sizeof(int) * func->bornSize); hipMalloc((void**) &dev_survive, sizeof(int) * func->surviveSize); hipMalloc((void**) &dev_neighCount, noCells); //Make our 3D grid of blocks & threads (DIM/No of threads) //One pixel is one thread. /*dim3 blocks (1,1,1); dim3 threads(8,8,8);*/ dim3 threads(16,16); dim3 blocks (DIM/threads.x + 1,(DIM/threads.y + 1) * DIM); //copy our two dynamic arrays hipMemcpy(dev_born, func->bornNo, sizeof(int) * func->bornSize, hipMemcpyHostToDevice); hipMemcpy(dev_survive, func->surviveNo, sizeof(int) * func->surviveSize, hipMemcpyHostToDevice); hipMemcpy(dev_neighCount, func->neighbourCount, noCells, hipMemcpyHostToDevice); //We want to temporarily hold our pointers so we can reassign them after the object copy... tempBorn = func->bornNo; tempSurv = func->surviveNo; tempNeigh = func->neighbourCount; //reassign our pointers so we know where we put our dynamic arrays func->surviveNo = dev_survive; func->bornNo = dev_born; func->neighbourCount = dev_neighCount; //Copy our memory from Host to Device hipMemcpy(dev_pFlatGrid, pFlatGrid, noCells, hipMemcpyHostToDevice); hipMemcpy(dev_DIM, &DIM, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_func, func, sizeof(CAFunction), hipMemcpyHostToDevice); //TODO memory leak? //delete[] pFlatGrid; hipLaunchKernelGGL(( kernal3DTest), dim3(blocks),dim3(threads), 0, 0, dev_pFlatGrid, dev_DIM, dev_func); //Copy back to host hipMemcpy(pFlatGrid, dev_pFlatGrid, noCells, hipMemcpyDeviceToHost); //Because of our func currently holding a device pointer, we need to use a //temp pointer. hipMemcpy(tempNeigh, dev_neighCount, noCells, hipMemcpyDeviceToHost); //Reassign our dynamic array pointers func->surviveNo = tempSurv; func->bornNo = tempBorn; func->neighbourCount = tempNeigh; //STOP : processing done hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedTime = 0; hipEventElapsedTime(&elapsedTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); //fix up states - normalize, this could be another kernal really.. for (int i = 0; i < DIM * DIM; ++i) { for (int j = 0; j < DIM; ++j) { pFlatGrid[i * DIM +j] = pFlatGrid[i * DIM +j] >> func->getNoBits(); } } //Free memory on Device hipFree(dev_pFlatGrid); hipFree(dev_DIM); hipFree(dev_born); hipFree(dev_survive); hipFree(dev_func); hipFree(dev_neighCount); return elapsedTime; }
4ccabb319a2993a0cc9bd398df605134999e5f33.cu
/* GPCA - A Cellular Automata library powered by CUDA. Copyright (C) 2011 Sam Gunaratne University of Plymouth This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.*/ #include "CellularAutomata_kernal.cu" #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <vector_types.h> template<typename CAFunction> extern float CUDATimeStep(unsigned int* pFlatGrid, int DIM, CAFunction *func) { unsigned int *dev_pFlatGrid; //Pointers to device allocated memory int *dev_DIM; int *dev_born; //to bornNo int *dev_survive; //to surviveNo CAFunction *dev_func; int* tempBorn; int* tempSurv; cudaEvent_t start,stop; //Events for timings //START: Record duration of GPGPU processing cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); size_t noCells = DIM * DIM * sizeof(unsigned int); //Might need to flatten the 2d array ormaybe try "int2" type //TODO fix this name size_t size = sizeof(CAFunction); //Allocate suitable size memory on device cudaMalloc((void**) &dev_pFlatGrid, noCells); cudaMalloc((void**) &dev_DIM, sizeof(int)); cudaMalloc((void**) &dev_func, sizeof(CAFunction)); cudaMalloc((void**) &dev_born, sizeof(int) * func->bornSize); cudaMalloc((void**) &dev_survive, sizeof(int) * func->surviveSize); //Make our 2D grid of blocks & threads (DIM/No of threads) //One pixel is one thread. dim3 blocks (DIM/20,DIM/20); dim3 threads(20,20); //copy our two dynamic arrays cudaMemcpy(dev_born, func->bornNo, sizeof(int) * func->bornSize, cudaMemcpyHostToDevice); cudaMemcpy(dev_survive, func->surviveNo, sizeof(int) * func->surviveSize, cudaMemcpyHostToDevice); //We want to temporarily hold our pointers so we can reassign them after the object copy... tempBorn = func->bornNo; tempSurv = func->surviveNo; //reassign our pointers so we know where we put our dynamic arrays func->surviveNo = dev_survive; func->bornNo = dev_born; //Copy our memory from Host to Device cudaMemcpy(dev_pFlatGrid, pFlatGrid, noCells, cudaMemcpyHostToDevice); cudaMemcpy(dev_DIM, &DIM, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_func, func, sizeof(CAFunction), cudaMemcpyHostToDevice); kernal<<<blocks,threads>>>(dev_pFlatGrid, dev_DIM, dev_func); //Copy back to host cudaMemcpy(pFlatGrid, dev_pFlatGrid, noCells, cudaMemcpyDeviceToHost); //Reassign our dynamic array pointers func->surviveNo = tempSurv; func->bornNo = tempBorn; //STOP : processing done cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime = 0; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); //fix up states - normalize for (int i = 0; i < DIM; ++i) { for (int j = 0; j < DIM; ++j) { pFlatGrid[i * DIM +j] = pFlatGrid[i * DIM +j] >> func->getNoBits(); } } //Free memory on Device cudaFree(dev_pFlatGrid); cudaFree(dev_DIM); cudaFree(dev_born); cudaFree(dev_survive); cudaFree(dev_func); return elapsedTime; } template<typename CAFunction> extern float CUDATimeStep3D(unsigned int* pFlatGrid, int DIM, CAFunction *func) { unsigned int *dev_pFlatGrid; //Pointers to device allocated memory int *dev_DIM; int *dev_born; //to bornNo int *dev_survive; //to surviveNo unsigned int* dev_neighCount; CAFunction *dev_func; int* tempBorn; int* tempSurv; unsigned int* tempNeigh; cudaEvent_t start,stop; //Events for timings //START: Record duration of GPGPU processing cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); size_t noCells = DIM * DIM * DIM * sizeof(unsigned int); //Might need to flatten the 2d array ormaybe try "int2" type //TODO fix this name size_t size = sizeof(CAFunction); //Allocate suitable size memory on device cudaMalloc((void**) &dev_pFlatGrid, noCells); cudaMalloc((void**) &dev_DIM, sizeof(int)); cudaMalloc((void**) &dev_func, sizeof(CAFunction)); cudaMalloc((void**) &dev_born, sizeof(int) * func->bornSize); cudaMalloc((void**) &dev_survive, sizeof(int) * func->surviveSize); cudaMalloc((void**) &dev_neighCount, noCells); //Make our 3D grid of blocks & threads (DIM/No of threads) //One pixel is one thread. /*dim3 blocks (1,1,1); dim3 threads(8,8,8);*/ dim3 threads(16,16); dim3 blocks (DIM/threads.x + 1,(DIM/threads.y + 1) * DIM); //copy our two dynamic arrays cudaMemcpy(dev_born, func->bornNo, sizeof(int) * func->bornSize, cudaMemcpyHostToDevice); cudaMemcpy(dev_survive, func->surviveNo, sizeof(int) * func->surviveSize, cudaMemcpyHostToDevice); cudaMemcpy(dev_neighCount, func->neighbourCount, noCells, cudaMemcpyHostToDevice); //We want to temporarily hold our pointers so we can reassign them after the object copy... tempBorn = func->bornNo; tempSurv = func->surviveNo; tempNeigh = func->neighbourCount; //reassign our pointers so we know where we put our dynamic arrays func->surviveNo = dev_survive; func->bornNo = dev_born; func->neighbourCount = dev_neighCount; //Copy our memory from Host to Device cudaMemcpy(dev_pFlatGrid, pFlatGrid, noCells, cudaMemcpyHostToDevice); cudaMemcpy(dev_DIM, &DIM, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_func, func, sizeof(CAFunction), cudaMemcpyHostToDevice); //TODO memory leak? //delete[] pFlatGrid; kernal3DTest<<<blocks,threads>>>(dev_pFlatGrid, dev_DIM, dev_func); //Copy back to host cudaMemcpy(pFlatGrid, dev_pFlatGrid, noCells, cudaMemcpyDeviceToHost); //Because of our func currently holding a device pointer, we need to use a //temp pointer. cudaMemcpy(tempNeigh, dev_neighCount, noCells, cudaMemcpyDeviceToHost); //Reassign our dynamic array pointers func->surviveNo = tempSurv; func->bornNo = tempBorn; func->neighbourCount = tempNeigh; //STOP : processing done cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime = 0; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); //fix up states - normalize, this could be another kernal really.. for (int i = 0; i < DIM * DIM; ++i) { for (int j = 0; j < DIM; ++j) { pFlatGrid[i * DIM +j] = pFlatGrid[i * DIM +j] >> func->getNoBits(); } } //Free memory on Device cudaFree(dev_pFlatGrid); cudaFree(dev_DIM); cudaFree(dev_born); cudaFree(dev_survive); cudaFree(dev_func); cudaFree(dev_neighCount); return elapsedTime; }
a0e6e1f6a3d58c86f252d7ccf28454edc703be92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zpotf2.cu, normal z -> d, Sun Nov 20 20:20:31 2016 */ #include "magma_internal.h" #define REAL #define ddot_max_bs 512 // 512 is max threads for 1.x cards void dpotf2_dscal( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ); void dpotf2_ddot( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ); #ifdef COMPLEX void magmablas_dlacgv( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ); #endif // TODO: this function could be in .cpp file -- it has no CUDA code in it. /***************************************************************************//** Purpose ------- dpotf2 computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U**H * U, if UPLO = MagmaUpper, or A = L * L**H, if UPLO = MagmaLower, where U is an upper triangular matrix and L is lower triangular. This is the unblocked version of the algorithm, calling Level 2 BLAS. Arguments --------- @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is stored. - = MagmaUpper: Upper triangular - = MagmaLower: Lower triangular @param[in] n INTEGER The order of the matrix A. N >= 0 and N <= 512. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. \n On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U**H * U or A = L * L**H. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,N). @param[in] queue magma_queue_t Queue to execute in. @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, the leading minor of order k is not positive definite, and the factorization could not be completed. @ingroup magma_potf2 *******************************************************************************/ extern "C" magma_int_t magma_dpotf2_gpu( magma_uplo_t uplo, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t j; *info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { *info = -1; } else if (n < 0 || n > ddot_max_bs) { *info = -2; } else if (ldda < max(1,n)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (n == 0) { return *info; } double alpha = MAGMA_D_NEG_ONE; double beta = MAGMA_D_ONE; if (uplo == MagmaUpper) { for (j = 0; j < n; j++) { dpotf2_ddot( j, dA(0,j), 1, queue ); // including ddot product and update a(j,j) if (j < n) { #ifdef COMPLEX magmablas_dlacgv( j, dA(0, j), 1, queue ); #endif magma_dgemv( MagmaTrans, j, n-j-1, alpha, dA(0, j+1), ldda, dA(0, j), 1, beta, dA(j, j+1), ldda, queue ); #ifdef COMPLEX magmablas_dlacgv( j, dA(0, j), 1, queue ); #endif dpotf2_dscal( n-j, dA(j,j), ldda, queue ); } } } else { for (j = 0; j < n; j++) { dpotf2_ddot( j, dA(j,0), ldda, queue ); // including ddot product and update a(j,j) if (j < n) { #ifdef COMPLEX magmablas_dlacgv( j, dA(j, 0), ldda, queue ); #endif magma_dgemv( MagmaNoTrans, n-j-1, j, alpha, dA(j+1, 0), ldda, dA(j,0), ldda, beta, dA(j+1, j), 1, queue ); #ifdef COMPLEX magmablas_dlacgv( j, dA(j, 0), ldda, queue ); #endif dpotf2_dscal( n-j, dA(j,j), 1, queue ); } } } return *info; } #define dscal_bs 32 #define ddot_bs 512 #define dlacgv_bs 512 // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ double shared_data[]; __global__ void kernel_ddot(int n, double *x, int incx, int threadSize) { int tx = threadIdx.x; double *sdata = shared_data; double res = MAGMA_D_ZERO; if (tx < n) { res = x[tx*incx]; } sdata[tx] = MAGMA_D_REAL(res * MAGMA_D_CONJ(res)); __syncthreads(); for (int s = blockDim.x/2; s > 32; s >>= 1 ) { if (tx < s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if (tx < 32) { volatile double* smem = sdata; smem[tx] += smem[tx+32]; smem[tx] += smem[tx+16]; smem[tx] += smem[tx+8]; smem[tx] += smem[tx+4]; smem[tx] += smem[tx+2]; smem[tx] += smem[tx+1]; } if (tx == 0) { double xreal = MAGMA_D_REAL(x[n*incx]); x[n*incx] = MAGMA_D_MAKE( sqrt(xreal - sdata[0]), 0 ); } } void dpotf2_ddot( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ) { /* Specialized Ddot 1) performs ddot sum = x[0:n-1]*conj(x[0:n-1]) 2) updates x[n] = sqrt(x[n]-sum); */ if (n > ddot_max_bs) { fprintf( stderr, "n = %lld > %lld is not supported in dpotf2_ddot\n", (long long) n, (long long) ddot_max_bs ); return; } int threadSize; if (n <= 1024 && n > 512) { threadSize = 1024; } else if (n <= 512 && n > 256 ) { threadSize = 512; } else if (n <= 256 && n > 128) { threadSize = 256; } else if (n <= 128 && n > 64) { threadSize = 128; } else { threadSize = 64; } size_t shmem = threadSize * sizeof(double); hipLaunchKernelGGL(( kernel_ddot) , dim3(1), dim3(threadSize), shmem, queue->cuda_stream() , n, x, incx, threadSize); } __global__ void kernel_dscal(int n, double *x, int incx) { int id = blockIdx.x * dscal_bs + threadIdx.x; __shared__ double factor; if (threadIdx.x == 0) { factor = MAGMA_D_MAKE(1.0/MAGMA_D_REAL(x[0]), 0.0); } __syncthreads(); if ( id < n && id > 0) { x[id*incx] = x[id*incx] * factor; } } void dpotf2_dscal( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ) { /* Specialized dscal perform x[1:n-1] / x[0] */ dim3 threads(dscal_bs, 1, 1); int num_blocks = magma_ceildiv( n, dscal_bs ); dim3 grid(num_blocks,1); hipLaunchKernelGGL(( kernel_dscal) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, x, incx); } #ifdef COMPLEX __global__ void kernel_dlacgv(int n, double *x, int incx) { int id = blockIdx.x * dlacgv_bs + threadIdx.x; if ( id < n ) { x[id*incx] = MAGMA_D_CONJ(x[id*incx]); } } /***************************************************************************//** Purpose ------- DLACGV conjugates a real vector of length N. Arguments --------- @param[in] n INTEGER The length of the vector X. N >= 0. @param[in,out] x DOUBLE PRECISION array, dimension (1+(N-1)*abs(INCX)) On entry, the vector of length N to be conjugated. On exit, X is overwritten with conjg(X). @param[in] incx INTEGER The spacing between successive elements of X. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lacgv *******************************************************************************/ void magmablas_dlacgv( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ) { dim3 threads(dlacgv_bs, 1, 1); int num_blocks = magma_ceildiv( n, dlacgv_bs ); dim3 grid(num_blocks,1); hipLaunchKernelGGL(( kernel_dlacgv) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, x, incx); } #endif // COMPLEX
a0e6e1f6a3d58c86f252d7ccf28454edc703be92.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zpotf2.cu, normal z -> d, Sun Nov 20 20:20:31 2016 */ #include "magma_internal.h" #define REAL #define ddot_max_bs 512 // 512 is max threads for 1.x cards void dpotf2_dscal( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ); void dpotf2_ddot( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ); #ifdef COMPLEX void magmablas_dlacgv( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ); #endif // TODO: this function could be in .cpp file -- it has no CUDA code in it. /***************************************************************************//** Purpose ------- dpotf2 computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U**H * U, if UPLO = MagmaUpper, or A = L * L**H, if UPLO = MagmaLower, where U is an upper triangular matrix and L is lower triangular. This is the unblocked version of the algorithm, calling Level 2 BLAS. Arguments --------- @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is stored. - = MagmaUpper: Upper triangular - = MagmaLower: Lower triangular @param[in] n INTEGER The order of the matrix A. N >= 0 and N <= 512. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. \n On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U**H * U or A = L * L**H. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,N). @param[in] queue magma_queue_t Queue to execute in. @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, the leading minor of order k is not positive definite, and the factorization could not be completed. @ingroup magma_potf2 *******************************************************************************/ extern "C" magma_int_t magma_dpotf2_gpu( magma_uplo_t uplo, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t j; *info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { *info = -1; } else if (n < 0 || n > ddot_max_bs) { *info = -2; } else if (ldda < max(1,n)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (n == 0) { return *info; } double alpha = MAGMA_D_NEG_ONE; double beta = MAGMA_D_ONE; if (uplo == MagmaUpper) { for (j = 0; j < n; j++) { dpotf2_ddot( j, dA(0,j), 1, queue ); // including ddot product and update a(j,j) if (j < n) { #ifdef COMPLEX magmablas_dlacgv( j, dA(0, j), 1, queue ); #endif magma_dgemv( MagmaTrans, j, n-j-1, alpha, dA(0, j+1), ldda, dA(0, j), 1, beta, dA(j, j+1), ldda, queue ); #ifdef COMPLEX magmablas_dlacgv( j, dA(0, j), 1, queue ); #endif dpotf2_dscal( n-j, dA(j,j), ldda, queue ); } } } else { for (j = 0; j < n; j++) { dpotf2_ddot( j, dA(j,0), ldda, queue ); // including ddot product and update a(j,j) if (j < n) { #ifdef COMPLEX magmablas_dlacgv( j, dA(j, 0), ldda, queue ); #endif magma_dgemv( MagmaNoTrans, n-j-1, j, alpha, dA(j+1, 0), ldda, dA(j,0), ldda, beta, dA(j+1, j), 1, queue ); #ifdef COMPLEX magmablas_dlacgv( j, dA(j, 0), ldda, queue ); #endif dpotf2_dscal( n-j, dA(j,j), 1, queue ); } } } return *info; } #define dscal_bs 32 #define ddot_bs 512 #define dlacgv_bs 512 // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ double shared_data[]; __global__ void kernel_ddot(int n, double *x, int incx, int threadSize) { int tx = threadIdx.x; double *sdata = shared_data; double res = MAGMA_D_ZERO; if (tx < n) { res = x[tx*incx]; } sdata[tx] = MAGMA_D_REAL(res * MAGMA_D_CONJ(res)); __syncthreads(); for (int s = blockDim.x/2; s > 32; s >>= 1 ) { if (tx < s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if (tx < 32) { volatile double* smem = sdata; smem[tx] += smem[tx+32]; smem[tx] += smem[tx+16]; smem[tx] += smem[tx+8]; smem[tx] += smem[tx+4]; smem[tx] += smem[tx+2]; smem[tx] += smem[tx+1]; } if (tx == 0) { double xreal = MAGMA_D_REAL(x[n*incx]); x[n*incx] = MAGMA_D_MAKE( sqrt(xreal - sdata[0]), 0 ); } } void dpotf2_ddot( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ) { /* Specialized Ddot 1) performs ddot sum = x[0:n-1]*conj(x[0:n-1]) 2) updates x[n] = sqrt(x[n]-sum); */ if (n > ddot_max_bs) { fprintf( stderr, "n = %lld > %lld is not supported in dpotf2_ddot\n", (long long) n, (long long) ddot_max_bs ); return; } int threadSize; if (n <= 1024 && n > 512) { threadSize = 1024; } else if (n <= 512 && n > 256 ) { threadSize = 512; } else if (n <= 256 && n > 128) { threadSize = 256; } else if (n <= 128 && n > 64) { threadSize = 128; } else { threadSize = 64; } size_t shmem = threadSize * sizeof(double); kernel_ddot <<< 1, threadSize, shmem, queue->cuda_stream() >>> (n, x, incx, threadSize); } __global__ void kernel_dscal(int n, double *x, int incx) { int id = blockIdx.x * dscal_bs + threadIdx.x; __shared__ double factor; if (threadIdx.x == 0) { factor = MAGMA_D_MAKE(1.0/MAGMA_D_REAL(x[0]), 0.0); } __syncthreads(); if ( id < n && id > 0) { x[id*incx] = x[id*incx] * factor; } } void dpotf2_dscal( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ) { /* Specialized dscal perform x[1:n-1] / x[0] */ dim3 threads(dscal_bs, 1, 1); int num_blocks = magma_ceildiv( n, dscal_bs ); dim3 grid(num_blocks,1); kernel_dscal <<< grid, threads, 0, queue->cuda_stream() >>> (n, x, incx); } #ifdef COMPLEX __global__ void kernel_dlacgv(int n, double *x, int incx) { int id = blockIdx.x * dlacgv_bs + threadIdx.x; if ( id < n ) { x[id*incx] = MAGMA_D_CONJ(x[id*incx]); } } /***************************************************************************//** Purpose ------- DLACGV conjugates a real vector of length N. Arguments --------- @param[in] n INTEGER The length of the vector X. N >= 0. @param[in,out] x DOUBLE PRECISION array, dimension (1+(N-1)*abs(INCX)) On entry, the vector of length N to be conjugated. On exit, X is overwritten with conjg(X). @param[in] incx INTEGER The spacing between successive elements of X. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lacgv *******************************************************************************/ void magmablas_dlacgv( magma_int_t n, double *x, magma_int_t incx, magma_queue_t queue ) { dim3 threads(dlacgv_bs, 1, 1); int num_blocks = magma_ceildiv( n, dlacgv_bs ); dim3 grid(num_blocks,1); kernel_dlacgv <<< grid, threads, 0, queue->cuda_stream() >>> (n, x, incx); } #endif // COMPLEX
0b29565673833ab510410ffa21eae73a740bcf6c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> s d c @author Adrien REMY */ #include "common_magma.h" #define block_height 32 #define block_width 4 #define block_length 256 #define NB 64 ///////////////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void magmablas_zelementary_multiplication_devfunc( magma_int_t n, magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *du, magmaDoubleComplex *dv) { magma_int_t idx, idy; idx = blockIdx.x * blockDim.x + threadIdx.x; idy = blockIdx.y * blockDim.y + threadIdx.y; if ((idx < n/2)&&(idy < n/2)){ dA += idx + idy * ldda; magmaDoubleComplex a00, a10, a01, a11, b1, b2, b3, b4; __shared__ magmaDoubleComplex u1[block_height], u2[block_height], v1[block_width], v2[block_width]; du += idx; dv += idy; u1[threadIdx.x]=du[0]; u2[threadIdx.x]=du[n/2]; v1[threadIdx.y]=dv[0]; v2[threadIdx.y]=dv[n/2]; __syncthreads(); a00 = dA[0]; a01 = dA[ldda*n/2]; a10 = dA[n/2]; a11 = dA[ldda*n/2+n/2]; b1 = a00 + a01; b2 = a10 + a11; b3 = a00 - a01; b4 = a10 - a11; dA[0] = u1[threadIdx.x] * v1[threadIdx.y] * (b1 + b2); dA[ldda*n/2] = u1[threadIdx.x] * v2[threadIdx.y] * (b3 + b4); dA[n/2] = u2[threadIdx.x] * v1[threadIdx.y] * (b1 - b2); dA[ldda*n/2+n/2] = u2[threadIdx.x] * v2[threadIdx.y] *(b3 - b4); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zelementary_multiplication_kernel( magma_int_t n, magmaDoubleComplex *dA, magma_int_t offsetA, magma_int_t ldda, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *dv, magma_int_t offsetv) { magmablas_zelementary_multiplication_devfunc( n, dA+offsetA, ldda, du+offsetu, dv+offsetv); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zelementary_multiplication_kernel_batched( magma_int_t n, magmaDoubleComplex **dA_array, magma_int_t offsetA, magma_int_t ldda, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *dv, magma_int_t offsetv) { int batchid = blockIdx.z; magmablas_zelementary_multiplication_devfunc( n, dA_array[batchid]+offsetA, ldda, du+offsetu, dv+offsetv); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void magmablas_zapply_vector_devfunc( magma_int_t n, magmaDoubleComplex *du, magmaDoubleComplex *db) { magma_int_t idx; idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n/2){ du += idx; db += idx; magmaDoubleComplex a1,a2; a1 = du[0]*db[0]; a2 = du[n/2]*db[n/2]; db[0] = a1 + a2; db[n/2] = a1 -a2; } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zapply_vector_kernel( magma_int_t n, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *db, magma_int_t offsetb ) { magmablas_zapply_vector_devfunc(n, du+offsetu, db+offsetb); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zapply_vector_kernel_batched( magma_int_t n, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex **db_array, magma_int_t offsetb ) { int batchid = blockIdx.y; magmablas_zapply_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void magmablas_zapply_transpose_vector_devfunc( magma_int_t n, magmaDoubleComplex *du,magmaDoubleComplex *db ) { magma_int_t idx; idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n/2){ du += idx; db += idx; magmaDoubleComplex a1,a2; a1 = db[0] + db[n/2]; a2 = db[0] - db[n/2]; db[0] = du[0]*a1; db[n/2] = du[n/2]*a2; } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zapply_transpose_vector_kernel( magma_int_t n, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *db, magma_int_t offsetb ) { magmablas_zapply_transpose_vector_devfunc(n, du+offsetu, db+offsetb); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zapply_transpose_vector_kernel_batched( magma_int_t n, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex **db_array, magma_int_t offsetb ) { int batchid = blockIdx.y; magmablas_zapply_transpose_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////
0b29565673833ab510410ffa21eae73a740bcf6c.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> s d c @author Adrien REMY */ #include "common_magma.h" #define block_height 32 #define block_width 4 #define block_length 256 #define NB 64 ///////////////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void magmablas_zelementary_multiplication_devfunc( magma_int_t n, magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *du, magmaDoubleComplex *dv) { magma_int_t idx, idy; idx = blockIdx.x * blockDim.x + threadIdx.x; idy = blockIdx.y * blockDim.y + threadIdx.y; if ((idx < n/2)&&(idy < n/2)){ dA += idx + idy * ldda; magmaDoubleComplex a00, a10, a01, a11, b1, b2, b3, b4; __shared__ magmaDoubleComplex u1[block_height], u2[block_height], v1[block_width], v2[block_width]; du += idx; dv += idy; u1[threadIdx.x]=du[0]; u2[threadIdx.x]=du[n/2]; v1[threadIdx.y]=dv[0]; v2[threadIdx.y]=dv[n/2]; __syncthreads(); a00 = dA[0]; a01 = dA[ldda*n/2]; a10 = dA[n/2]; a11 = dA[ldda*n/2+n/2]; b1 = a00 + a01; b2 = a10 + a11; b3 = a00 - a01; b4 = a10 - a11; dA[0] = u1[threadIdx.x] * v1[threadIdx.y] * (b1 + b2); dA[ldda*n/2] = u1[threadIdx.x] * v2[threadIdx.y] * (b3 + b4); dA[n/2] = u2[threadIdx.x] * v1[threadIdx.y] * (b1 - b2); dA[ldda*n/2+n/2] = u2[threadIdx.x] * v2[threadIdx.y] *(b3 - b4); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zelementary_multiplication_kernel( magma_int_t n, magmaDoubleComplex *dA, magma_int_t offsetA, magma_int_t ldda, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *dv, magma_int_t offsetv) { magmablas_zelementary_multiplication_devfunc( n, dA+offsetA, ldda, du+offsetu, dv+offsetv); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zelementary_multiplication_kernel_batched( magma_int_t n, magmaDoubleComplex **dA_array, magma_int_t offsetA, magma_int_t ldda, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *dv, magma_int_t offsetv) { int batchid = blockIdx.z; magmablas_zelementary_multiplication_devfunc( n, dA_array[batchid]+offsetA, ldda, du+offsetu, dv+offsetv); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void magmablas_zapply_vector_devfunc( magma_int_t n, magmaDoubleComplex *du, magmaDoubleComplex *db) { magma_int_t idx; idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n/2){ du += idx; db += idx; magmaDoubleComplex a1,a2; a1 = du[0]*db[0]; a2 = du[n/2]*db[n/2]; db[0] = a1 + a2; db[n/2] = a1 -a2; } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zapply_vector_kernel( magma_int_t n, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *db, magma_int_t offsetb ) { magmablas_zapply_vector_devfunc(n, du+offsetu, db+offsetb); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zapply_vector_kernel_batched( magma_int_t n, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex **db_array, magma_int_t offsetb ) { int batchid = blockIdx.y; magmablas_zapply_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void magmablas_zapply_transpose_vector_devfunc( magma_int_t n, magmaDoubleComplex *du,magmaDoubleComplex *db ) { magma_int_t idx; idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n/2){ du += idx; db += idx; magmaDoubleComplex a1,a2; a1 = db[0] + db[n/2]; a2 = db[0] - db[n/2]; db[0] = du[0]*a1; db[n/2] = du[n/2]*a2; } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zapply_transpose_vector_kernel( magma_int_t n, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex *db, magma_int_t offsetb ) { magmablas_zapply_transpose_vector_devfunc(n, du+offsetu, db+offsetb); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void magmablas_zapply_transpose_vector_kernel_batched( magma_int_t n, magmaDoubleComplex *du, magma_int_t offsetu, magmaDoubleComplex **db_array, magma_int_t offsetb ) { int batchid = blockIdx.y; magmablas_zapply_transpose_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////