hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
3b38cd02e724223a19b546d3101d392425b33780.hip
// !!! This is a file automatically generated by hipify!!! #include "kernel.h" #include <hip/hip_runtime.h> #include <list> #include <string> #include <assert.h> #include <omp.h> using namespace::std; typedef struct KernelVectorAddCBTrigILP2Params { public: dim3 m_bs; dim3 m_gs; int m_NumberOfElements; KernelVectorAddCBTrigILP2Params(int bsx, int bsy, int bsz, int gsx, int gsy, int gsz, int numele) : m_bs(bsx, bsy, bsz), m_gs(gsx, gsy, gsz), m_NumberOfElements(numele) { if (bsx < 1) { printf("\n***Error bsx < 1\n"); exit(EXIT_FAILURE); } if (bsx > 1024) { printf("\n***Error bsx > 1024\n"); exit(EXIT_FAILURE); } if (bsy != 1) { printf("\n***Error bsy != 1\n"); exit(EXIT_FAILURE); } if (bsz != 1) { printf("\n***Error bsz != 1\n"); exit(EXIT_FAILURE); } if (gsx < 1) { printf("\n***Error gsx < 1\n"); exit(EXIT_FAILURE); } if (gsy != 1) { printf("\n***Error gsy != 1\n"); exit(EXIT_FAILURE); } if (gsz != 1) { printf("\n***Error gsz != 1\n"); exit(EXIT_FAILURE); } if (numele < 1) { printf("\n***Error numele < 1\n"); exit(EXIT_FAILURE); } } } KernelVectorAddCBTrigILP2Params_t; void QueryKernelVectorAddCBTrigILP2(char *KernelName, int bs_start, int bs_end, int bs_inc, int gs_start, int gs_end, int gs_inc, int numele) { list<KernelVectorAddCBTrigILP2Params_t*> params; for (int gsx = gs_start; gsx < gs_end; gsx += gs_inc) for (int bsx = bs_start; bsx < bs_end; bsx += bs_inc) params.push_back(new KernelVectorAddCBTrigILP2Params_t(bsx, 1, 1, gsx, 1, 1, numele)); printf("#\n# %s\n#", KernelName); list<KernelVectorAddCBTrigILP2Params_t*>::iterator i = params.begin(); printf("\n%s: compile: params -bs %4d,%d,%d -gs %4d,%d,%d -numele %d", KernelName, (*i)->m_bs.x, (*i)->m_bs.y, (*i)->m_bs.z, (*i)->m_gs.x, (*i)->m_gs.y, (*i)->m_gs.z, (*i)->m_NumberOfElements); for (i++; i != params.end(); ++i) { printf("\n%s: nocompile: params -bs %4d,%d,%d -gs %4d,%d,%d -numele %d", KernelName, (*i)->m_bs.x, (*i)->m_bs.y, (*i)->m_bs.z, (*i)->m_gs.x, (*i)->m_gs.y, (*i)->m_gs.z, (*i)->m_NumberOfElements); } printf("\n"); } // // compute bound version of vector add kernel // __global__ void kernelVectorAddCBTrigILP2(const float *A, const float *B, float *C, float K1, float K2, int numElements) { const int s1 = blockDim.x * gridDim.x; const int stride = blockDim.x * gridDim.x * 2; for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < numElements; i += stride) { // C[i] = sin(K1)*A[i] + cos(K2)*B[i] // C[i+s] = sin(K1)*A[i+s] + cos(K2)*B[i+s] float T1 = A[i]; float T2 = B[i]; float T3 = i + s1 < numElements ? A[i + s1] : 0; float T4 = i + s1 < numElements ? B[i + s1] : 0; float T5 = sin(K1); float T6 = cos(K2); float T7 = sin(K1); float T8 = cos(K2); float T9 = T1*T5; float T10 = T2*T6; float T11 = T3*T7; float T12 = T4*T8; C[i] = T9 + T10; if (i + s1 < numElements) C[i + s1] = T11 + T12; } } void LaunchKernelVectorAddCBTrigILP2(dim3& gs, dim3& bs, char **argv, int argc, int nextarg) { printf("\nPreparing %s", KernelVectorAddCBTrigILP2Name); if (strcmp(argv[nextarg], "-numele") == 0) { printf("\nAllocating RAM"); hipError_t err = hipSuccess; int numElements = stoi(argv[nextarg + 1], nullptr); size_t size = numElements * sizeof(float); KernelVectorAddCBTrigILP2Params_t Verify(bs.x, bs.y, bs.z, gs.x, gs.y, gs.z, numElements); float *h_B = new float[numElements]; float *h_C = new float[numElements]; // Verify that allocations succeeded if (h_B == NULL || h_C == NULL) { printf("Failed to allocate host vectors in LaunchKernelVectorAddCBTrigILP2\n"); exit(EXIT_FAILURE); } float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { printf("Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { printf("Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { printf("Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("\nInitializing GPU RAM"); InitRandomSequence(d_A, numElements); InitRandomSequence(d_B, numElements); printf("\nLaunching kernel: kernelVectorAddCBTrigILP2"); printf("\n\tgridsize (%d,%d,%d)", gs.x, gs.y, gs.z); printf("\n\tblocksize (%d,%d,%d)", bs.x, bs.y, bs.z); printf("\n\tNumElements %d", numElements); kernelVectorAddCBTrigILP2 << <gs, bs >> > (d_A, d_B, d_C, 0.0f, 0.0f, numElements); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { printf("Failed to launch kernelVectorAddCBTrigILP2 (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(h_B, d_B, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("Failed to copy vector B from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct printf("\nValidating results ..."); #pragma omp parallel for for (int i = 0; i < numElements; ++i) { if (fabs(h_B[i] - h_C[i]) > 1e-5) { printf("Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf(" success!\n"); err = hipFree(d_A); if (err != hipSuccess) { printf("Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { printf("Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { printf("Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } delete[]h_B; delete[]h_C; } else { printf("\nExpecting -numele, but saw %s", argv[nextarg]); exit(EXIT_FAILURE); } }
3b38cd02e724223a19b546d3101d392425b33780.cu
#include "kernel.h" #include <cuda_runtime.h> #include <list> #include <string> #include <assert.h> #include <omp.h> using namespace::std; typedef struct KernelVectorAddCBTrigILP2Params { public: dim3 m_bs; dim3 m_gs; int m_NumberOfElements; KernelVectorAddCBTrigILP2Params(int bsx, int bsy, int bsz, int gsx, int gsy, int gsz, int numele) : m_bs(bsx, bsy, bsz), m_gs(gsx, gsy, gsz), m_NumberOfElements(numele) { if (bsx < 1) { printf("\n***Error bsx < 1\n"); exit(EXIT_FAILURE); } if (bsx > 1024) { printf("\n***Error bsx > 1024\n"); exit(EXIT_FAILURE); } if (bsy != 1) { printf("\n***Error bsy != 1\n"); exit(EXIT_FAILURE); } if (bsz != 1) { printf("\n***Error bsz != 1\n"); exit(EXIT_FAILURE); } if (gsx < 1) { printf("\n***Error gsx < 1\n"); exit(EXIT_FAILURE); } if (gsy != 1) { printf("\n***Error gsy != 1\n"); exit(EXIT_FAILURE); } if (gsz != 1) { printf("\n***Error gsz != 1\n"); exit(EXIT_FAILURE); } if (numele < 1) { printf("\n***Error numele < 1\n"); exit(EXIT_FAILURE); } } } KernelVectorAddCBTrigILP2Params_t; void QueryKernelVectorAddCBTrigILP2(char *KernelName, int bs_start, int bs_end, int bs_inc, int gs_start, int gs_end, int gs_inc, int numele) { list<KernelVectorAddCBTrigILP2Params_t*> params; for (int gsx = gs_start; gsx < gs_end; gsx += gs_inc) for (int bsx = bs_start; bsx < bs_end; bsx += bs_inc) params.push_back(new KernelVectorAddCBTrigILP2Params_t(bsx, 1, 1, gsx, 1, 1, numele)); printf("#\n# %s\n#", KernelName); list<KernelVectorAddCBTrigILP2Params_t*>::iterator i = params.begin(); printf("\n%s: compile: params -bs %4d,%d,%d -gs %4d,%d,%d -numele %d", KernelName, (*i)->m_bs.x, (*i)->m_bs.y, (*i)->m_bs.z, (*i)->m_gs.x, (*i)->m_gs.y, (*i)->m_gs.z, (*i)->m_NumberOfElements); for (i++; i != params.end(); ++i) { printf("\n%s: nocompile: params -bs %4d,%d,%d -gs %4d,%d,%d -numele %d", KernelName, (*i)->m_bs.x, (*i)->m_bs.y, (*i)->m_bs.z, (*i)->m_gs.x, (*i)->m_gs.y, (*i)->m_gs.z, (*i)->m_NumberOfElements); } printf("\n"); } // // compute bound version of vector add kernel // __global__ void kernelVectorAddCBTrigILP2(const float *A, const float *B, float *C, float K1, float K2, int numElements) { const int s1 = blockDim.x * gridDim.x; const int stride = blockDim.x * gridDim.x * 2; for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < numElements; i += stride) { // C[i] = sin(K1)*A[i] + cos(K2)*B[i] // C[i+s] = sin(K1)*A[i+s] + cos(K2)*B[i+s] float T1 = A[i]; float T2 = B[i]; float T3 = i + s1 < numElements ? A[i + s1] : 0; float T4 = i + s1 < numElements ? B[i + s1] : 0; float T5 = sin(K1); float T6 = cos(K2); float T7 = sin(K1); float T8 = cos(K2); float T9 = T1*T5; float T10 = T2*T6; float T11 = T3*T7; float T12 = T4*T8; C[i] = T9 + T10; if (i + s1 < numElements) C[i + s1] = T11 + T12; } } void LaunchKernelVectorAddCBTrigILP2(dim3& gs, dim3& bs, char **argv, int argc, int nextarg) { printf("\nPreparing %s", KernelVectorAddCBTrigILP2Name); if (strcmp(argv[nextarg], "-numele") == 0) { printf("\nAllocating RAM"); cudaError_t err = cudaSuccess; int numElements = stoi(argv[nextarg + 1], nullptr); size_t size = numElements * sizeof(float); KernelVectorAddCBTrigILP2Params_t Verify(bs.x, bs.y, bs.z, gs.x, gs.y, gs.z, numElements); float *h_B = new float[numElements]; float *h_C = new float[numElements]; // Verify that allocations succeeded if (h_B == NULL || h_C == NULL) { printf("Failed to allocate host vectors in LaunchKernelVectorAddCBTrigILP2\n"); exit(EXIT_FAILURE); } float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { printf("Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { printf("Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { printf("Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("\nInitializing GPU RAM"); InitRandomSequence(d_A, numElements); InitRandomSequence(d_B, numElements); printf("\nLaunching kernel: kernelVectorAddCBTrigILP2"); printf("\n\tgridsize (%d,%d,%d)", gs.x, gs.y, gs.z); printf("\n\tblocksize (%d,%d,%d)", bs.x, bs.y, bs.z); printf("\n\tNumElements %d", numElements); kernelVectorAddCBTrigILP2 << <gs, bs >> > (d_A, d_B, d_C, 0.0f, 0.0f, numElements); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { printf("Failed to launch kernelVectorAddCBTrigILP2 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("Failed to copy vector B from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct printf("\nValidating results ..."); #pragma omp parallel for for (int i = 0; i < numElements; ++i) { if (fabs(h_B[i] - h_C[i]) > 1e-5) { printf("Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf(" success!\n"); err = cudaFree(d_A); if (err != cudaSuccess) { printf("Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { printf("Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { printf("Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } delete[]h_B; delete[]h_C; } else { printf("\nExpecting -numele, but saw %s", argv[nextarg]); exit(EXIT_FAILURE); } }
46132e7f3d63cf8828d1fd1dc553aff079fc494d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include <cstdio> #include <cstdint> #include <cstdlib> extern "C" __device__ void ptxTestFunc(int* dst, const int* src); static __global__ void ptxTestKernel(int* dst, const int* src) { ptxTestFunc(dst, src); } int main(void) { hipDeviceProp_t props{ }; auto cudaStatus = hipGetDeviceProperties(&props, 0); if (cudaStatus != hipSuccess) { printf("hipGetDeviceProperties failed: %s\n", hipGetErrorString(cudaStatus)); return 0; } auto const maxBlocksPerSM = props.maxBlocksPerMultiProcessor; auto const maxThreadsPerBlock = props.maxThreadsPerBlock; auto const maxThreadsPerSM = props.maxThreadsPerMultiProcessor; auto const regsPerSM = props.regsPerMultiprocessor; auto const sharedMemSizePerBlock = props.sharedMemPerBlock; auto const constMemSize = props.totalConstMem; printf("Current GPU: %s\n", props.name); printf("compute capability: %d.%d\n", props.major, props.minor); printf("max blocks per SM: %d\n", maxBlocksPerSM); printf("max threads per block: %d\n", maxThreadsPerBlock); printf("max threads per SM: %d\n", maxThreadsPerSM); printf("registers per SM: %d\n", regsPerSM); printf("shared memroy size per block: %zuKB\n", sharedMemSizePerBlock / 1024); printf("constant memory size on the device: %zuKB\n", constMemSize / 1024); puts("\n======== ptxTestKernel kernel info ========"); hipFuncAttributes funcAttrs{ }; cudaStatus = hipFuncGetAttributes(&funcAttrs, ptxTestKernel); if (cudaStatus != hipSuccess) { printf("hipFuncGetAttributes failed: %s\n", hipGetErrorString(cudaStatus)); return 0; } printf("max threads per block: %d\n", funcAttrs.maxThreadsPerBlock); printf("number of registers by each thread: %d\n", funcAttrs.numRegs); printf("local memory size by each thread: %zu bytes\n", funcAttrs.localSizeBytes); printf("shared memory size per block: %zu bytes\n", funcAttrs.sharedSizeBytes); printf("constant memory size: %zu bytes\n", funcAttrs.constSizeBytes); puts(""); constexpr int elemCount = 4096; int* hostSrc = new int[elemCount]; for (int i = 0; i < elemCount; i++) hostSrc[i] = i + 1; int* devDst = nullptr; int* devSrc = nullptr; constexpr auto bufferSize = elemCount * sizeof(*hostSrc); do { cudaStatus = hipMalloc(&devDst, bufferSize); if (cudaStatus != hipSuccess) { printf("hipMalloc devDst failed: %s\n", hipGetErrorString(cudaStatus)); break; } cudaStatus = hipMemcpy(devDst, hostSrc, bufferSize, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("hipMemcpy to devDst failed: %s\n", hipGetErrorString(cudaStatus)); break; } cudaStatus = hipMalloc(&devSrc, bufferSize); if (cudaStatus != hipSuccess) { printf("hipMalloc devSrc failed: %s\n", hipGetErrorString(cudaStatus)); break; } cudaStatus = hipMemcpy(devSrc, hostSrc, bufferSize, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("hipMemcpy to devSrc failed: %s\n", hipGetErrorString(cudaStatus)); break; } constexpr int threadsPerBlock = 256; constexpr auto nBlocks = elemCount / threadsPerBlock; hipLaunchKernelGGL(( ptxTestKernel) , dim3(nBlocks), dim3(threadsPerBlock) , 0, 0, devDst, devSrc); cudaStatus = hipMemcpy(hostSrc, devDst, bufferSize, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) printf("hipMemcpy to hostSrc failed: %s\n", hipGetErrorString(cudaStatus)); // result verification bool success = true; for (int i = 0; i < elemCount; i++) { if (hostSrc[i] != (i + 1) * 2) { success = false; break; } } printf("Is equal? %s\n", success ? "YES" : "NO"); } while (false); if (hostSrc != nullptr) delete[] hostSrc; if (devDst != nullptr) hipFree(devDst); if (devSrc != nullptr) hipFree(devSrc); return 0; }
46132e7f3d63cf8828d1fd1dc553aff079fc494d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda.h" #include <cstdio> #include <cstdint> #include <cstdlib> extern "C" __device__ void ptxTestFunc(int* dst, const int* src); static __global__ void ptxTestKernel(int* dst, const int* src) { ptxTestFunc(dst, src); } int main(void) { cudaDeviceProp props{ }; auto cudaStatus = cudaGetDeviceProperties(&props, 0); if (cudaStatus != cudaSuccess) { printf("cudaGetDeviceProperties failed: %s\n", cudaGetErrorString(cudaStatus)); return 0; } auto const maxBlocksPerSM = props.maxBlocksPerMultiProcessor; auto const maxThreadsPerBlock = props.maxThreadsPerBlock; auto const maxThreadsPerSM = props.maxThreadsPerMultiProcessor; auto const regsPerSM = props.regsPerMultiprocessor; auto const sharedMemSizePerBlock = props.sharedMemPerBlock; auto const constMemSize = props.totalConstMem; printf("Current GPU: %s\n", props.name); printf("compute capability: %d.%d\n", props.major, props.minor); printf("max blocks per SM: %d\n", maxBlocksPerSM); printf("max threads per block: %d\n", maxThreadsPerBlock); printf("max threads per SM: %d\n", maxThreadsPerSM); printf("registers per SM: %d\n", regsPerSM); printf("shared memroy size per block: %zuKB\n", sharedMemSizePerBlock / 1024); printf("constant memory size on the device: %zuKB\n", constMemSize / 1024); puts("\n======== ptxTestKernel kernel info ========"); cudaFuncAttributes funcAttrs{ }; cudaStatus = cudaFuncGetAttributes(&funcAttrs, ptxTestKernel); if (cudaStatus != cudaSuccess) { printf("cudaFuncGetAttributes failed: %s\n", cudaGetErrorString(cudaStatus)); return 0; } printf("max threads per block: %d\n", funcAttrs.maxThreadsPerBlock); printf("number of registers by each thread: %d\n", funcAttrs.numRegs); printf("local memory size by each thread: %zu bytes\n", funcAttrs.localSizeBytes); printf("shared memory size per block: %zu bytes\n", funcAttrs.sharedSizeBytes); printf("constant memory size: %zu bytes\n", funcAttrs.constSizeBytes); puts(""); constexpr int elemCount = 4096; int* hostSrc = new int[elemCount]; for (int i = 0; i < elemCount; i++) hostSrc[i] = i + 1; int* devDst = nullptr; int* devSrc = nullptr; constexpr auto bufferSize = elemCount * sizeof(*hostSrc); do { cudaStatus = cudaMalloc(&devDst, bufferSize); if (cudaStatus != cudaSuccess) { printf("cudaMalloc devDst failed: %s\n", cudaGetErrorString(cudaStatus)); break; } cudaStatus = cudaMemcpy(devDst, hostSrc, bufferSize, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("cudaMemcpy to devDst failed: %s\n", cudaGetErrorString(cudaStatus)); break; } cudaStatus = cudaMalloc(&devSrc, bufferSize); if (cudaStatus != cudaSuccess) { printf("cudaMalloc devSrc failed: %s\n", cudaGetErrorString(cudaStatus)); break; } cudaStatus = cudaMemcpy(devSrc, hostSrc, bufferSize, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("cudaMemcpy to devSrc failed: %s\n", cudaGetErrorString(cudaStatus)); break; } constexpr int threadsPerBlock = 256; constexpr auto nBlocks = elemCount / threadsPerBlock; ptxTestKernel <<< nBlocks, threadsPerBlock >>> (devDst, devSrc); cudaStatus = cudaMemcpy(hostSrc, devDst, bufferSize, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) printf("cudaMemcpy to hostSrc failed: %s\n", cudaGetErrorString(cudaStatus)); // result verification bool success = true; for (int i = 0; i < elemCount; i++) { if (hostSrc[i] != (i + 1) * 2) { success = false; break; } } printf("Is equal? %s\n", success ? "YES" : "NO"); } while (false); if (hostSrc != nullptr) delete[] hostSrc; if (devDst != nullptr) cudaFree(devDst); if (devSrc != nullptr) cudaFree(devSrc); return 0; }
f0da4ab142086ece021d5808f6a9583552923c14.hip
// !!! This is a file automatically generated by hipify!!! #include <unittest/unittest.h> #include <thrust/scatter.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> #include <thrust/fill.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> template <class Vector> void TestScatterSimple(void) { typedef typename Vector::value_type T; Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; thrust::scatter(src.begin(), src.end(), map.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 4); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); } DECLARE_VECTOR_UNITTEST(TestScatterSimple); void TestScatterFromHostToDevice(void) { // source vector thrust::host_vector<int> src(5); src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; // scatter indices thrust::host_vector<int> h_map(5); h_map[0] = 6; h_map[1] = 3; h_map[2] = 1; h_map[3] = 7; h_map[4] = 2; thrust::device_vector<int> d_map = h_map; // destination vector thrust::device_vector<int> dst(8, (int) 0); // expected result thrust::device_vector<int> result = dst; result[1] = 2; result[2] = 4; result[3] = 1; result[7] = 3; // with map on the host thrust::scatter(src.begin(), src.end(), h_map.begin(), dst.begin()); ASSERT_EQUAL(result, dst); // clear the destination vector thrust::fill(dst.begin(), dst.end(), (int) 0); // with map on the device thrust::scatter(src.begin(), src.end(), d_map.begin(), dst.begin()); ASSERT_EQUAL(result, dst); } DECLARE_UNITTEST(TestScatterFromHostToDevice); void TestScatterFromDeviceToHost(void) { // source vector thrust::device_vector<int> src(5); src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; // scatter indices thrust::host_vector<int> h_map(5); h_map[0] = 6; h_map[1] = 3; h_map[2] = 1; h_map[3] = 7; h_map[4] = 2; thrust::device_vector<int> d_map = h_map; // destination vector thrust::host_vector<int> dst(8, (int) 0); // expected result thrust::host_vector<int> result = dst; result[1] = 2; result[2] = 4; result[3] = 1; result[7] = 3; // with map on the host thrust::scatter(src.begin(), src.end(), h_map.begin(), dst.begin()); // clear the destination vector thrust::fill(dst.begin(), dst.end(), (int) 0); // with map on the device thrust::scatter(src.begin(), src.end(), d_map.begin(), dst.begin()); } DECLARE_UNITTEST(TestScatterFromDeviceToHost); template <typename T> void TestScatter(const size_t n) { const size_t output_size = ::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), h_output.begin()); thrust::scatter(d_input.begin(), d_input.end(), d_map.begin(), d_output.begin()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestScatter); template <class Vector> void TestScatterIfSimple(void) { typedef typename Vector::value_type T; Vector flg(5); // predicate array Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0; map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; thrust::scatter_if(src.begin(), src.end(), map.begin(), flg.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 0); ASSERT_EQUAL(dst[2], 0); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); } DECLARE_VECTOR_UNITTEST(TestScatterIfSimple); template <typename T> class is_even_scatter_if { public: __host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; } }; template <typename T> void TestScatterIf(const size_t n) { const size_t output_size = ::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), h_output.begin(), is_even_scatter_if<unsigned int>()); thrust::scatter_if(d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), d_output.begin(), is_even_scatter_if<unsigned int>()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestScatterIf); template <typename Vector> void TestScatterCountingIterator(void) { typedef typename Vector::value_type T; Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector output(10); // source has any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), map.begin(), output.begin()); ASSERT_EQUAL(output, map); // map has any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(source.begin(), source.end(), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_VECTOR_UNITTEST(TestScatterCountingIterator); template <typename Vector> void TestScatterIfCountingIterator(void) { typedef typename Vector::value_type T; Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector stencil(10, 1); Vector output(10); // source has any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), map.begin(), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); // map has any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(source.begin(), source.end(), thrust::make_counting_iterator(0), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), thrust::make_counting_iterator(0), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_VECTOR_UNITTEST(TestScatterIfCountingIterator);
f0da4ab142086ece021d5808f6a9583552923c14.cu
#include <unittest/unittest.h> #include <thrust/scatter.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> #include <thrust/fill.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> template <class Vector> void TestScatterSimple(void) { typedef typename Vector::value_type T; Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; thrust::scatter(src.begin(), src.end(), map.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 4); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); } DECLARE_VECTOR_UNITTEST(TestScatterSimple); void TestScatterFromHostToDevice(void) { // source vector thrust::host_vector<int> src(5); src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; // scatter indices thrust::host_vector<int> h_map(5); h_map[0] = 6; h_map[1] = 3; h_map[2] = 1; h_map[3] = 7; h_map[4] = 2; thrust::device_vector<int> d_map = h_map; // destination vector thrust::device_vector<int> dst(8, (int) 0); // expected result thrust::device_vector<int> result = dst; result[1] = 2; result[2] = 4; result[3] = 1; result[7] = 3; // with map on the host thrust::scatter(src.begin(), src.end(), h_map.begin(), dst.begin()); ASSERT_EQUAL(result, dst); // clear the destination vector thrust::fill(dst.begin(), dst.end(), (int) 0); // with map on the device thrust::scatter(src.begin(), src.end(), d_map.begin(), dst.begin()); ASSERT_EQUAL(result, dst); } DECLARE_UNITTEST(TestScatterFromHostToDevice); void TestScatterFromDeviceToHost(void) { // source vector thrust::device_vector<int> src(5); src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; // scatter indices thrust::host_vector<int> h_map(5); h_map[0] = 6; h_map[1] = 3; h_map[2] = 1; h_map[3] = 7; h_map[4] = 2; thrust::device_vector<int> d_map = h_map; // destination vector thrust::host_vector<int> dst(8, (int) 0); // expected result thrust::host_vector<int> result = dst; result[1] = 2; result[2] = 4; result[3] = 1; result[7] = 3; // with map on the host thrust::scatter(src.begin(), src.end(), h_map.begin(), dst.begin()); // clear the destination vector thrust::fill(dst.begin(), dst.end(), (int) 0); // with map on the device thrust::scatter(src.begin(), src.end(), d_map.begin(), dst.begin()); } DECLARE_UNITTEST(TestScatterFromDeviceToHost); template <typename T> void TestScatter(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), h_output.begin()); thrust::scatter(d_input.begin(), d_input.end(), d_map.begin(), d_output.begin()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestScatter); template <class Vector> void TestScatterIfSimple(void) { typedef typename Vector::value_type T; Vector flg(5); // predicate array Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0; map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; thrust::scatter_if(src.begin(), src.end(), map.begin(), flg.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 0); ASSERT_EQUAL(dst[2], 0); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); } DECLARE_VECTOR_UNITTEST(TestScatterIfSimple); template <typename T> class is_even_scatter_if { public: __host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; } }; template <typename T> void TestScatterIf(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), h_output.begin(), is_even_scatter_if<unsigned int>()); thrust::scatter_if(d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), d_output.begin(), is_even_scatter_if<unsigned int>()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestScatterIf); template <typename Vector> void TestScatterCountingIterator(void) { typedef typename Vector::value_type T; Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector output(10); // source has any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), map.begin(), output.begin()); ASSERT_EQUAL(output, map); // map has any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(source.begin(), source.end(), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_VECTOR_UNITTEST(TestScatterCountingIterator); template <typename Vector> void TestScatterIfCountingIterator(void) { typedef typename Vector::value_type T; Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector stencil(10, 1); Vector output(10); // source has any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), map.begin(), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); // map has any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(source.begin(), source.end(), thrust::make_counting_iterator(0), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_space_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), thrust::make_counting_iterator(0), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_VECTOR_UNITTEST(TestScatterIfCountingIterator);
54c2e1126c09636cf78d4ef269e51bc5a43b6d21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "crop_layer.h" #include "utils.h" #include "hip/hip_runtime.h" #include "image.h" } __device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c) { if(x < 0 || x >= w || y < 0 || y >= h) return 0; return image[x + w*(y + c*h)]; } __device__ float3 rgb_to_hsv_kernel(float3 rgb) { float r = rgb.x; float g = rgb.y; float b = rgb.z; float h, s, v; float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b); float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b); float delta = max - min; v = max; if(max == 0){ s = 0; h = -1; }else{ s = delta/max; if(r == max){ h = (g - b) / delta; } else if (g == max) { h = 2 + (b - r) / delta; } else { h = 4 + (r - g) / delta; } if (h < 0) h += 6; } return make_float3(h, s, v); } __device__ float3 hsv_to_rgb_kernel(float3 hsv) { float h = hsv.x; float s = hsv.y; float v = hsv.z; float r, g, b; float f, p, q, t; if (s == 0) { r = g = b = v; } else { int index = (int) floorf(h); f = h - index; p = v*(1-s); q = v*(1-s*f); t = v*(1-s*(1-f)); if(index == 0){ r = v; g = t; b = p; } else if(index == 1){ r = q; g = v; b = p; } else if(index == 2){ r = p; g = v; b = t; } else if(index == 3){ r = p; g = q; b = v; } else if(index == 4){ r = t; g = p; b = v; } else { r = v; g = p; b = q; } } r = (r < 0) ? 0 : ((r > 1) ? 1 : r); g = (g < 0) ? 0 : ((g > 1) ? 1 : g); b = (b < 0) ? 0 : ((b > 1) ? 1 : b); return make_float3(r, g, b); } __device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c) { int ix = (int) floorf(x); int iy = (int) floorf(y); float dx = x - ix; float dy = y - iy; float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) + dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) + (1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) + dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c); return val; } __global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift) { int size = batch * w * h; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= size) return; int x = id % w; id /= w; int y = id % h; id /= h; float rshift = rand[0]; float gshift = rand[1]; float bshift = rand[2]; float r0 = rand[8*id + 0]; float r1 = rand[8*id + 1]; float r2 = rand[8*id + 2]; float r3 = rand[8*id + 3]; saturation = r0*(saturation - 1) + 1; saturation = (r1 > .5) ? 1./saturation : saturation; exposure = r2*(exposure - 1) + 1; exposure = (r3 > .5) ? 1./exposure : exposure; size_t offset = id * h * w * 3; image += offset; float r = image[x + w*(y + h*0)]; float g = image[x + w*(y + h*1)]; float b = image[x + w*(y + h*2)]; float3 rgb = make_float3(r,g,b); if(train){ float3 hsv = rgb_to_hsv_kernel(rgb); hsv.y *= saturation; hsv.z *= exposure; rgb = hsv_to_rgb_kernel(hsv); } else { shift = 0; } image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5)*shift; image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5)*shift; image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5)*shift; } __global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= size) return; float cx = w/2.; float cy = h/2.; int count = id; int j = id % crop_width; id /= crop_width; int i = id % crop_height; id /= crop_height; int k = id % c; id /= c; int b = id; float r4 = rand[8*b + 4]; float r5 = rand[8*b + 5]; float r6 = rand[8*b + 6]; float r7 = rand[8*b + 7]; float dw = (w - crop_width)*r4; float dh = (h - crop_height)*r5; flip = (flip && (r6 > .5)); angle = 2*angle*r7 - angle; if(!train){ dw = (w - crop_width)/2.; dh = (h - crop_height)/2.; flip = 0; angle = 0; } input += w*h*c*b; float x = (flip) ? w - dw - j - 1 : j + dw; float y = i + dh; float rx = cos(angle)*(x-cx) - sin(angle)*(y-cy) + cx; float ry = sin(angle)*(x-cx) + cos(angle)*(y-cy) + cy; output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k); } extern "C" void forward_crop_layer_gpu(crop_layer layer, network_state state) { cuda_random(layer.rand_gpu, layer.batch*8); float radians = layer.angle*3.14159265/180.; float scale = 2; float translate = -1; if(layer.noadjust){ scale = 1; translate = 0; } int size = layer.batch * layer.w * layer.h; hipLaunchKernelGGL(( levels_image_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, state.input, layer.rand_gpu, layer.batch, layer.w, layer.h, state.train, layer.saturation, layer.exposure, translate, scale, layer.shift); check_error(hipPeekAtLastError()); size = layer.batch*layer.c*layer.out_w*layer.out_h; hipLaunchKernelGGL(( forward_crop_layer_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, state.input, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, state.train, layer.flip, radians, layer.output_gpu); check_error(hipPeekAtLastError()); /* cuda_pull_array(layer.output_gpu, layer.output, size); image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch)); image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch)); image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch)); translate_image(im, -translate); scale_image(im, 1/scale); translate_image(im2, -translate); scale_image(im2, 1/scale); translate_image(im3, -translate); scale_image(im3, 1/scale); show_image(im, "cropped"); show_image(im2, "cropped2"); show_image(im3, "cropped3"); cvWaitKey(0); */ }
54c2e1126c09636cf78d4ef269e51bc5a43b6d21.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "crop_layer.h" #include "utils.h" #include "cuda.h" #include "image.h" } __device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c) { if(x < 0 || x >= w || y < 0 || y >= h) return 0; return image[x + w*(y + c*h)]; } __device__ float3 rgb_to_hsv_kernel(float3 rgb) { float r = rgb.x; float g = rgb.y; float b = rgb.z; float h, s, v; float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b); float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b); float delta = max - min; v = max; if(max == 0){ s = 0; h = -1; }else{ s = delta/max; if(r == max){ h = (g - b) / delta; } else if (g == max) { h = 2 + (b - r) / delta; } else { h = 4 + (r - g) / delta; } if (h < 0) h += 6; } return make_float3(h, s, v); } __device__ float3 hsv_to_rgb_kernel(float3 hsv) { float h = hsv.x; float s = hsv.y; float v = hsv.z; float r, g, b; float f, p, q, t; if (s == 0) { r = g = b = v; } else { int index = (int) floorf(h); f = h - index; p = v*(1-s); q = v*(1-s*f); t = v*(1-s*(1-f)); if(index == 0){ r = v; g = t; b = p; } else if(index == 1){ r = q; g = v; b = p; } else if(index == 2){ r = p; g = v; b = t; } else if(index == 3){ r = p; g = q; b = v; } else if(index == 4){ r = t; g = p; b = v; } else { r = v; g = p; b = q; } } r = (r < 0) ? 0 : ((r > 1) ? 1 : r); g = (g < 0) ? 0 : ((g > 1) ? 1 : g); b = (b < 0) ? 0 : ((b > 1) ? 1 : b); return make_float3(r, g, b); } __device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c) { int ix = (int) floorf(x); int iy = (int) floorf(y); float dx = x - ix; float dy = y - iy; float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) + dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) + (1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) + dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c); return val; } __global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift) { int size = batch * w * h; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= size) return; int x = id % w; id /= w; int y = id % h; id /= h; float rshift = rand[0]; float gshift = rand[1]; float bshift = rand[2]; float r0 = rand[8*id + 0]; float r1 = rand[8*id + 1]; float r2 = rand[8*id + 2]; float r3 = rand[8*id + 3]; saturation = r0*(saturation - 1) + 1; saturation = (r1 > .5) ? 1./saturation : saturation; exposure = r2*(exposure - 1) + 1; exposure = (r3 > .5) ? 1./exposure : exposure; size_t offset = id * h * w * 3; image += offset; float r = image[x + w*(y + h*0)]; float g = image[x + w*(y + h*1)]; float b = image[x + w*(y + h*2)]; float3 rgb = make_float3(r,g,b); if(train){ float3 hsv = rgb_to_hsv_kernel(rgb); hsv.y *= saturation; hsv.z *= exposure; rgb = hsv_to_rgb_kernel(hsv); } else { shift = 0; } image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5)*shift; image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5)*shift; image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5)*shift; } __global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= size) return; float cx = w/2.; float cy = h/2.; int count = id; int j = id % crop_width; id /= crop_width; int i = id % crop_height; id /= crop_height; int k = id % c; id /= c; int b = id; float r4 = rand[8*b + 4]; float r5 = rand[8*b + 5]; float r6 = rand[8*b + 6]; float r7 = rand[8*b + 7]; float dw = (w - crop_width)*r4; float dh = (h - crop_height)*r5; flip = (flip && (r6 > .5)); angle = 2*angle*r7 - angle; if(!train){ dw = (w - crop_width)/2.; dh = (h - crop_height)/2.; flip = 0; angle = 0; } input += w*h*c*b; float x = (flip) ? w - dw - j - 1 : j + dw; float y = i + dh; float rx = cos(angle)*(x-cx) - sin(angle)*(y-cy) + cx; float ry = sin(angle)*(x-cx) + cos(angle)*(y-cy) + cy; output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k); } extern "C" void forward_crop_layer_gpu(crop_layer layer, network_state state) { cuda_random(layer.rand_gpu, layer.batch*8); float radians = layer.angle*3.14159265/180.; float scale = 2; float translate = -1; if(layer.noadjust){ scale = 1; translate = 0; } int size = layer.batch * layer.w * layer.h; levels_image_kernel<<<cuda_gridsize(size), BLOCK>>>(state.input, layer.rand_gpu, layer.batch, layer.w, layer.h, state.train, layer.saturation, layer.exposure, translate, scale, layer.shift); check_error(cudaPeekAtLastError()); size = layer.batch*layer.c*layer.out_w*layer.out_h; forward_crop_layer_kernel<<<cuda_gridsize(size), BLOCK>>>(state.input, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, state.train, layer.flip, radians, layer.output_gpu); check_error(cudaPeekAtLastError()); /* cuda_pull_array(layer.output_gpu, layer.output, size); image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch)); image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch)); image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch)); translate_image(im, -translate); scale_image(im, 1/scale); translate_image(im2, -translate); scale_image(im2, 1/scale); translate_image(im3, -translate); scale_image(im3, 1/scale); show_image(im, "cropped"); show_image(im2, "cropped2"); show_image(im3, "cropped3"); cvWaitKey(0); */ }
52d4ca871cfc739382bd4ac48aa62829a11d4831.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/operators/math/normalize/normalize.h" #include "dali/core/math_util.h" #include "dali/core/tensor_layout.h" #include "dali/kernels/normalize/normalize_gpu.h" #include "dali/kernels/reduce/reduce_gpu.h" #include "dali/kernels/common/copy.h" namespace dali { using namespace kernels; // NOLINT template <> class Normalize<GPUBackend> : public NormalizeBase<GPUBackend> { public: explicit Normalize(const OpSpec &spec) : NormalizeBase<GPUBackend>(spec) {} private: friend class NormalizeBase<GPUBackend>; template <typename OutputType, typename InputType> void SetupTyped(const DeviceWorkspace &ws); template <typename OutputType, typename InputType> void RunTyped(DeviceWorkspace &ws); void AllocTempStorage(); void FoldMeans(); void FoldStdDev(); template <typename ParamType, typename InputType> MeanGPU<ParamType, InputType> &GetMeanKernel() { return mean_kernel_.create_or_get<MeanGPU<ParamType, InputType>>(); } template <typename ParamType, typename InputType> InvStdDevGPU<ParamType, InputType> &GetInvStdDevKernel() { return stddev_kernel_.create_or_get<InvStdDevGPU<ParamType, InputType>>(); } template <typename OutputType, typename InputType> NormalizeGPU<OutputType, InputType> &GetNormalizeKernel() { return normalize_kernel_.create_or_get<NormalizeGPU<OutputType, InputType>>(); } TensorListView<StorageGPU, float> BroadcastMean(KernelContext &ctx, float value) const; AnyKernelInstance mean_kernel_, stddev_kernel_, normalize_kernel_; ScratchpadAllocator alloc_; }; DALI_REGISTER_OPERATOR(Normalize, Normalize<GPUBackend>, GPU); namespace { template <typename ToUpdate, typename Other> inline void MaxInPlace(ToUpdate &inout, const Other &other) { auto b1 = dali::begin(inout); auto b2 = dali::begin(other); auto e1 = dali::end(inout); auto e2 = dali::end(other); for (; b1 != e1 && b2 != e2; b1++, b2++) { if (*b1 < *b2) { *b1 = *b2; } } } using scratch_sizes_t = std::array<size_t, static_cast<size_t>(AllocType::Count)>; class ScratchpadSnapshot { public: explicit ScratchpadSnapshot(PreallocatedScratchpad &scratch) : scratch_(scratch) { for (size_t i = 0; i < ss_.size(); i++) ss_[i] = scratch_.allocs[i].used(); } ~ScratchpadSnapshot() { restore(); } private: void restore() { scratch_.Clear(); // this doesn't clear the memory - just resets the usage counter to 0 for (size_t i = 0; i < ss_.size(); i++) scratch_.allocs[i].alloc(ss_[i]); } scratch_sizes_t ss_; PreallocatedScratchpad &scratch_; }; template <int ndim> int64_t MaxSampleSize(const TensorListShape<ndim> &tls) { int64_t max_sample_size = 0; for (int i = 0; i < tls.num_samples(); i++) { int64_t v = volume(tls.tensor_shape_span(i)); if (v > max_sample_size) max_sample_size = v; } return max_sample_size; } template <typename T> __global__ void Fill(T *data, size_t count, T value) { auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (i < count) data[i] = value; } } // namespace TensorListView<StorageGPU, float> Normalize<GPUBackend>::BroadcastMean(KernelContext &ctx, float value) const { TensorListView<StorageGPU, float> mean_gpu; mean_gpu.shape = param_shape_; mean_gpu.data.resize(param_shape_.num_samples()); // allocate enough memory to hold the largest sample... int64_t max_sample_size = MaxSampleSize(param_shape_); float *gpu_mean_data = ctx.scratchpad->Allocate<float>(AllocType::GPU, max_sample_size); int grid = div_ceil(max_sample_size, 1024); int block = std::min<int64_t>(max_sample_size, 1024); // ...fill it with given value... hipLaunchKernelGGL(( Fill), dim3(grid), dim3(block), 0, ctx.gpu.stream, gpu_mean_data, max_sample_size, value); // ...and reuse the memory for all samples for (auto &ptr : mean_gpu.data) ptr = gpu_mean_data; return mean_gpu; } template <typename OutputType, typename InputType> void Normalize<GPUBackend>::SetupTyped(const DeviceWorkspace &ws) { auto &input = ws.InputRef<GPUBackend>(0); int nsamples = input.ntensor(); KernelContext ctx; ctx.gpu.stream = ws.stream(); ScratchpadEstimator se; int64_t param_volume = param_shape_.num_elements(); // estimate memory requirements for intermediate buffers if (!has_scalar_mean_) { se.add<float>(AllocType::GPU, param_volume); } else { if (ShouldCalcStdDev()) { // StdDev kernel requires the mean to have the same shape as the output. // We can save memory by broadcasting the mean only to the size of the largest sample // and repeat the pointer for all samples. se.add<float>(AllocType::GPU, MaxSampleSize(param_shape_)); } } if (!has_scalar_stddev_) { se.add<float>(AllocType::GPU, param_volume); } // setup and get memory requirements from kernels auto &norm = GetNormalizeKernel<OutputType, InputType>(); // if stddev is calculated internally, it's already inverse bool scale_is_stddev = !ShouldCalcStdDev(); auto req = norm.Setup(ctx, data_shape_, make_span(axes_), has_scalar_mean_, has_scalar_stddev_, scale_is_stddev); if (ShouldCalcMean()) { auto &mean = GetMeanKernel<float, InputType>(); auto mean_req = mean.Setup(ctx, data_shape_, make_span(axes_), true, batch_norm_); assert(mean_req.output_shapes[0] == param_shape_); MaxInPlace(req.scratch_sizes, mean_req.scratch_sizes); } if (ShouldCalcStdDev()) { auto &stddev = GetInvStdDevKernel<float, InputType>(); auto stddev_req = stddev.Setup(ctx, data_shape_, make_span(axes_), true, batch_norm_); assert(stddev_req.output_shapes[0] == param_shape_); MaxInPlace(req.scratch_sizes, stddev_req.scratch_sizes); } for (size_t i = 0; i < se.sizes.size(); i++) { se.add<uint8_t>(static_cast<AllocType>(i), req.scratch_sizes[i], 64); } alloc_.Reserve(se.sizes); } template <typename OutputType, typename InputType> void Normalize<GPUBackend>::RunTyped(DeviceWorkspace &ws) { auto &input = ws.InputRef<GPUBackend>(0); TensorListView<StorageGPU, const InputType> in_view = view<const InputType>(input); auto &output = ws.OutputRef<GPUBackend>(0); TensorListView<StorageGPU, OutputType> out_view = view<OutputType>(output); output.SetLayout(input.GetLayout()); int nsamples = input.ntensor(); hipStream_t stream = ws.stream(); PreallocatedScratchpad scratch = alloc_.GetScratchpad(); KernelContext ctx; ctx.scratchpad = &scratch; ctx.gpu.stream = stream; // Prepare mean and stddev float scalar_mean = has_scalar_mean_ ? spec_.GetArgument<float>("mean") : 0; float scalar_stddev = has_scalar_stddev_ ? spec_.GetArgument<float>("stddev") : 1; OutListGPU<float> mean_gpu, stddev_gpu; if (!has_scalar_mean_) { mean_gpu = scratch.AllocTensorList<AllocType::GPU, float>(param_shape_); } else if (ShouldCalcStdDev()) { mean_gpu = BroadcastMean(ctx, scalar_mean); } if (!has_scalar_stddev_) { stddev_gpu = scratch.AllocTensorList<AllocType::GPU, float>(param_shape_); } if (ShouldCalcMean()) { // We can't just Clear() the scratchpad to reuse it, because temporary buffers are also // stored there - so let's make a snapshot of current allocation state and restore it // after the kernel Run is done. ScratchpadSnapshot snap(scratch); auto &mean_kernel = GetMeanKernel<float, InputType>(); mean_kernel.Run(ctx, mean_gpu, in_view); } else if (has_tensor_mean_) { kernels::copy(mean_gpu, mean_input_, stream); } if (ShouldCalcStdDev()) { ScratchpadSnapshot snap(scratch); auto &stddev_kernel = GetInvStdDevKernel<float, InputType>(); stddev_kernel.Run(ctx, stddev_gpu, in_view, mean_gpu, degrees_of_freedom_, epsilon_); } else if (has_tensor_stddev_) { kernels::copy(stddev_gpu, stddev_input_, stream); } // finally, run the normalize kernel { ScratchpadSnapshot snap(scratch); auto &norm_kernel = GetNormalizeKernel<OutputType, InputType>(); // if stddev is calculated internally, epsilon has already been included float epsilon = ShouldCalcStdDev() ? 0 : epsilon_; if (has_scalar_mean_) { if (has_scalar_stddev_) { norm_kernel.Run(ctx, out_view, in_view, scalar_mean, scalar_stddev, scale_, shift_, epsilon); } else { norm_kernel.Run(ctx, out_view, in_view, scalar_mean, stddev_gpu, scale_, shift_, epsilon); } } else { if (has_scalar_stddev_) { norm_kernel.Run(ctx, out_view, in_view, mean_gpu, scalar_stddev, scale_, shift_, epsilon); } else { norm_kernel.Run(ctx, out_view, in_view, mean_gpu, stddev_gpu, scale_, shift_, epsilon); } } } } } // namespace dali
52d4ca871cfc739382bd4ac48aa62829a11d4831.cu
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/operators/math/normalize/normalize.h" #include "dali/core/math_util.h" #include "dali/core/tensor_layout.h" #include "dali/kernels/normalize/normalize_gpu.h" #include "dali/kernels/reduce/reduce_gpu.h" #include "dali/kernels/common/copy.h" namespace dali { using namespace kernels; // NOLINT template <> class Normalize<GPUBackend> : public NormalizeBase<GPUBackend> { public: explicit Normalize(const OpSpec &spec) : NormalizeBase<GPUBackend>(spec) {} private: friend class NormalizeBase<GPUBackend>; template <typename OutputType, typename InputType> void SetupTyped(const DeviceWorkspace &ws); template <typename OutputType, typename InputType> void RunTyped(DeviceWorkspace &ws); void AllocTempStorage(); void FoldMeans(); void FoldStdDev(); template <typename ParamType, typename InputType> MeanGPU<ParamType, InputType> &GetMeanKernel() { return mean_kernel_.create_or_get<MeanGPU<ParamType, InputType>>(); } template <typename ParamType, typename InputType> InvStdDevGPU<ParamType, InputType> &GetInvStdDevKernel() { return stddev_kernel_.create_or_get<InvStdDevGPU<ParamType, InputType>>(); } template <typename OutputType, typename InputType> NormalizeGPU<OutputType, InputType> &GetNormalizeKernel() { return normalize_kernel_.create_or_get<NormalizeGPU<OutputType, InputType>>(); } TensorListView<StorageGPU, float> BroadcastMean(KernelContext &ctx, float value) const; AnyKernelInstance mean_kernel_, stddev_kernel_, normalize_kernel_; ScratchpadAllocator alloc_; }; DALI_REGISTER_OPERATOR(Normalize, Normalize<GPUBackend>, GPU); namespace { template <typename ToUpdate, typename Other> inline void MaxInPlace(ToUpdate &inout, const Other &other) { auto b1 = dali::begin(inout); auto b2 = dali::begin(other); auto e1 = dali::end(inout); auto e2 = dali::end(other); for (; b1 != e1 && b2 != e2; b1++, b2++) { if (*b1 < *b2) { *b1 = *b2; } } } using scratch_sizes_t = std::array<size_t, static_cast<size_t>(AllocType::Count)>; class ScratchpadSnapshot { public: explicit ScratchpadSnapshot(PreallocatedScratchpad &scratch) : scratch_(scratch) { for (size_t i = 0; i < ss_.size(); i++) ss_[i] = scratch_.allocs[i].used(); } ~ScratchpadSnapshot() { restore(); } private: void restore() { scratch_.Clear(); // this doesn't clear the memory - just resets the usage counter to 0 for (size_t i = 0; i < ss_.size(); i++) scratch_.allocs[i].alloc(ss_[i]); } scratch_sizes_t ss_; PreallocatedScratchpad &scratch_; }; template <int ndim> int64_t MaxSampleSize(const TensorListShape<ndim> &tls) { int64_t max_sample_size = 0; for (int i = 0; i < tls.num_samples(); i++) { int64_t v = volume(tls.tensor_shape_span(i)); if (v > max_sample_size) max_sample_size = v; } return max_sample_size; } template <typename T> __global__ void Fill(T *data, size_t count, T value) { auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (i < count) data[i] = value; } } // namespace TensorListView<StorageGPU, float> Normalize<GPUBackend>::BroadcastMean(KernelContext &ctx, float value) const { TensorListView<StorageGPU, float> mean_gpu; mean_gpu.shape = param_shape_; mean_gpu.data.resize(param_shape_.num_samples()); // allocate enough memory to hold the largest sample... int64_t max_sample_size = MaxSampleSize(param_shape_); float *gpu_mean_data = ctx.scratchpad->Allocate<float>(AllocType::GPU, max_sample_size); int grid = div_ceil(max_sample_size, 1024); int block = std::min<int64_t>(max_sample_size, 1024); // ...fill it with given value... Fill<<<grid, block, 0, ctx.gpu.stream>>>(gpu_mean_data, max_sample_size, value); // ...and reuse the memory for all samples for (auto &ptr : mean_gpu.data) ptr = gpu_mean_data; return mean_gpu; } template <typename OutputType, typename InputType> void Normalize<GPUBackend>::SetupTyped(const DeviceWorkspace &ws) { auto &input = ws.InputRef<GPUBackend>(0); int nsamples = input.ntensor(); KernelContext ctx; ctx.gpu.stream = ws.stream(); ScratchpadEstimator se; int64_t param_volume = param_shape_.num_elements(); // estimate memory requirements for intermediate buffers if (!has_scalar_mean_) { se.add<float>(AllocType::GPU, param_volume); } else { if (ShouldCalcStdDev()) { // StdDev kernel requires the mean to have the same shape as the output. // We can save memory by broadcasting the mean only to the size of the largest sample // and repeat the pointer for all samples. se.add<float>(AllocType::GPU, MaxSampleSize(param_shape_)); } } if (!has_scalar_stddev_) { se.add<float>(AllocType::GPU, param_volume); } // setup and get memory requirements from kernels auto &norm = GetNormalizeKernel<OutputType, InputType>(); // if stddev is calculated internally, it's already inverse bool scale_is_stddev = !ShouldCalcStdDev(); auto req = norm.Setup(ctx, data_shape_, make_span(axes_), has_scalar_mean_, has_scalar_stddev_, scale_is_stddev); if (ShouldCalcMean()) { auto &mean = GetMeanKernel<float, InputType>(); auto mean_req = mean.Setup(ctx, data_shape_, make_span(axes_), true, batch_norm_); assert(mean_req.output_shapes[0] == param_shape_); MaxInPlace(req.scratch_sizes, mean_req.scratch_sizes); } if (ShouldCalcStdDev()) { auto &stddev = GetInvStdDevKernel<float, InputType>(); auto stddev_req = stddev.Setup(ctx, data_shape_, make_span(axes_), true, batch_norm_); assert(stddev_req.output_shapes[0] == param_shape_); MaxInPlace(req.scratch_sizes, stddev_req.scratch_sizes); } for (size_t i = 0; i < se.sizes.size(); i++) { se.add<uint8_t>(static_cast<AllocType>(i), req.scratch_sizes[i], 64); } alloc_.Reserve(se.sizes); } template <typename OutputType, typename InputType> void Normalize<GPUBackend>::RunTyped(DeviceWorkspace &ws) { auto &input = ws.InputRef<GPUBackend>(0); TensorListView<StorageGPU, const InputType> in_view = view<const InputType>(input); auto &output = ws.OutputRef<GPUBackend>(0); TensorListView<StorageGPU, OutputType> out_view = view<OutputType>(output); output.SetLayout(input.GetLayout()); int nsamples = input.ntensor(); cudaStream_t stream = ws.stream(); PreallocatedScratchpad scratch = alloc_.GetScratchpad(); KernelContext ctx; ctx.scratchpad = &scratch; ctx.gpu.stream = stream; // Prepare mean and stddev float scalar_mean = has_scalar_mean_ ? spec_.GetArgument<float>("mean") : 0; float scalar_stddev = has_scalar_stddev_ ? spec_.GetArgument<float>("stddev") : 1; OutListGPU<float> mean_gpu, stddev_gpu; if (!has_scalar_mean_) { mean_gpu = scratch.AllocTensorList<AllocType::GPU, float>(param_shape_); } else if (ShouldCalcStdDev()) { mean_gpu = BroadcastMean(ctx, scalar_mean); } if (!has_scalar_stddev_) { stddev_gpu = scratch.AllocTensorList<AllocType::GPU, float>(param_shape_); } if (ShouldCalcMean()) { // We can't just Clear() the scratchpad to reuse it, because temporary buffers are also // stored there - so let's make a snapshot of current allocation state and restore it // after the kernel Run is done. ScratchpadSnapshot snap(scratch); auto &mean_kernel = GetMeanKernel<float, InputType>(); mean_kernel.Run(ctx, mean_gpu, in_view); } else if (has_tensor_mean_) { kernels::copy(mean_gpu, mean_input_, stream); } if (ShouldCalcStdDev()) { ScratchpadSnapshot snap(scratch); auto &stddev_kernel = GetInvStdDevKernel<float, InputType>(); stddev_kernel.Run(ctx, stddev_gpu, in_view, mean_gpu, degrees_of_freedom_, epsilon_); } else if (has_tensor_stddev_) { kernels::copy(stddev_gpu, stddev_input_, stream); } // finally, run the normalize kernel { ScratchpadSnapshot snap(scratch); auto &norm_kernel = GetNormalizeKernel<OutputType, InputType>(); // if stddev is calculated internally, epsilon has already been included float epsilon = ShouldCalcStdDev() ? 0 : epsilon_; if (has_scalar_mean_) { if (has_scalar_stddev_) { norm_kernel.Run(ctx, out_view, in_view, scalar_mean, scalar_stddev, scale_, shift_, epsilon); } else { norm_kernel.Run(ctx, out_view, in_view, scalar_mean, stddev_gpu, scale_, shift_, epsilon); } } else { if (has_scalar_stddev_) { norm_kernel.Run(ctx, out_view, in_view, mean_gpu, scalar_stddev, scale_, shift_, epsilon); } else { norm_kernel.Run(ctx, out_view, in_view, mean_gpu, stddev_gpu, scale_, shift_, epsilon); } } } } } // namespace dali
721091e543eb6c3ff6bb78cd2b681735e969f0c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "matrix/math.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace Matrix { template <typename Type> __global__ void nativePowerKernel(Type *in, Type *out, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = in[idx] * in[idx]; } } template <typename Type> void naivePower(Type *in, Type *out, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); hipLaunchKernelGGL(( nativePowerKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, in, out, len); CUDA_CHECK(hipPeekAtLastError()); } template <typename Type> __global__ void nativeSqrtKernel(Type *in, Type *out, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = sqrt(in[idx]); } } template <typename Type> void naiveSqrt(Type *in, Type *out, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); hipLaunchKernelGGL(( nativeSqrtKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, in, out, len); CUDA_CHECK(hipPeekAtLastError()); } template <typename Type> __global__ void naiveSignFlipKernel(Type *in, Type *out, int rowCount, int colCount) { int d_i = blockIdx.x * rowCount; int end = d_i + rowCount; if (blockIdx.x < colCount) { Type max = 0.0; int max_index = 0; for (int i = d_i; i < end; i++) { Type val = in[i]; if (val < 0.0) { val = -val; } if (val > max) { max = val; max_index = i; } } for (int i = d_i; i < end; i++) { if (in[max_index] < 0.0) { out[i] = -in[i]; } else { out[i] = in[i]; } } } __syncthreads(); } template <typename Type> void naiveSignFlip(Type *in, Type *out, int rowCount, int colCount) { hipLaunchKernelGGL(( naiveSignFlipKernel<Type>), dim3(colCount), dim3(1), 0, 0, in, out, rowCount, colCount); CUDA_CHECK(hipPeekAtLastError()); } template <typename T> struct MathInputs { T tolerance; int n_row; int n_col; int len; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const MathInputs<T> &dims) { return os; } template <typename T> class MathTest : public ::testing::TestWithParam<MathInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<MathInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int len = params.len; allocate(in_power, len); allocate(out_power_ref, len); allocate(in_sqrt, len); allocate(out_sqrt_ref, len); allocate(in_sign_flip, len); allocate(out_sign_flip_ref, len); allocate(in_ratio, 4); T in_ratio_h[4] = {1.0, 2.0, 2.0, 3.0}; updateDevice(in_ratio, in_ratio_h, 4); allocate(out_ratio_ref, 4); T out_ratio_ref_h[4] = {0.125, 0.25, 0.25, 0.375}; updateDevice(out_ratio_ref, out_ratio_ref_h, 4); r.uniform(in_power, len, T(-1.0), T(1.0)); r.uniform(in_sqrt, len, T(0.0), T(1.0)); // r.uniform(in_ratio, len, T(0.0), T(1.0)); r.uniform(in_sign_flip, len, T(-100.0), T(100.0)); naivePower(in_power, out_power_ref, len); power(in_power, len); naiveSqrt(in_sqrt, out_sqrt_ref, len); seqRoot(in_sqrt, len); auto mgr = makeDefaultAllocator(); ratio(in_ratio, in_ratio, 4, mgr); naiveSignFlip(in_sign_flip, out_sign_flip_ref, params.n_row, params.n_col); signFlip(in_sign_flip, params.n_row, params.n_col); allocate(in_recip, 4); allocate(in_recip_ref, 4); allocate(out_recip, 4); // default threshold is 1e-15 std::vector<T> in_recip_h = {0.1, 0.01, -0.01, 0.1e-16}; std::vector<T> in_recip_ref_h = {10.0, 100.0, -100.0, 0.0}; updateDevice(in_recip, in_recip_h.data(), 4); updateDevice(in_recip_ref, in_recip_ref_h.data(), 4); T recip_scalar = T(1.0); // this `reciprocal()` has to go first bc next one modifies its input reciprocal(in_recip, out_recip, recip_scalar, 4); reciprocal(in_recip, recip_scalar, 4, true); std::vector<T> in_small_val_zero_h = {0.1, 1e-16, -1e-16, -0.1}; std::vector<T> in_small_val_zero_ref_h = {0.1, 0.0, 0.0, -0.1}; allocate(in_smallzero, 4); allocate(out_smallzero, 4); allocate(out_smallzero_ref, 4); updateDevice(in_smallzero, in_small_val_zero_h.data(), 4); updateDevice(out_smallzero_ref, in_small_val_zero_ref_h.data(), 4); setSmallValuesZero(out_smallzero, in_smallzero, 4); setSmallValuesZero(in_smallzero, 4); } void TearDown() override { CUDA_CHECK(hipFree(in_power)); CUDA_CHECK(hipFree(out_power_ref)); CUDA_CHECK(hipFree(in_sqrt)); CUDA_CHECK(hipFree(out_sqrt_ref)); CUDA_CHECK(hipFree(in_ratio)); CUDA_CHECK(hipFree(out_ratio_ref)); CUDA_CHECK(hipFree(in_sign_flip)); CUDA_CHECK(hipFree(out_sign_flip_ref)); CUDA_CHECK(hipFree(in_recip)); CUDA_CHECK(hipFree(in_recip_ref)); CUDA_CHECK(hipFree(out_recip)); CUDA_CHECK(hipFree(in_smallzero)); CUDA_CHECK(hipFree(out_smallzero)); CUDA_CHECK(hipFree(out_smallzero_ref)); } protected: MathInputs<T> params; T *in_power, *out_power_ref, *in_sqrt, *out_sqrt_ref, *in_ratio, *out_ratio_ref, *in_sign_flip, *out_sign_flip_ref, *in_recip, *in_recip_ref, *out_recip, *in_smallzero, *out_smallzero, *out_smallzero_ref; }; const std::vector<MathInputs<float>> inputsf = { {0.00001f, 1024, 1024, 1024 * 1024, 1234ULL}}; const std::vector<MathInputs<double>> inputsd = { {0.00001, 1024, 1024, 1024 * 1024, 1234ULL}}; typedef MathTest<float> MathPowerTestF; TEST_P(MathPowerTestF, Result) { ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathPowerTestD; TEST_P(MathPowerTestD, Result) { ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathSqrtTestF; TEST_P(MathSqrtTestF, Result) { ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathSqrtTestD; TEST_P(MathSqrtTestD, Result) { ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathRatioTestF; TEST_P(MathRatioTestF, Result) { ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathRatioTestD; TEST_P(MathRatioTestD, Result) { ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathSignFlipTestF; TEST_P(MathSignFlipTestF, Result) { ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathSignFlipTestD; TEST_P(MathSignFlipTestD, Result) { ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathReciprocalTestF; TEST_P(MathReciprocalTestF, Result) { ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4, CompareApprox<float>(params.tolerance))); // 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`. ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathReciprocalTestD; TEST_P(MathReciprocalTestD, Result) { ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4, CompareApprox<double>(params.tolerance))); // 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`. ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathSetSmallZeroTestF; TEST_P(MathSetSmallZeroTestF, Result) { ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4, CompareApprox<float>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathSetSmallZeroTestD; TEST_P(MathSetSmallZeroTestD, Result) { ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4, CompareApprox<double>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestD, ::testing::ValuesIn(inputsd)); } // end namespace Matrix } // end namespace MLCommon
721091e543eb6c3ff6bb78cd2b681735e969f0c3.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "matrix/math.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace Matrix { template <typename Type> __global__ void nativePowerKernel(Type *in, Type *out, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = in[idx] * in[idx]; } } template <typename Type> void naivePower(Type *in, Type *out, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); nativePowerKernel<Type><<<nblks, TPB>>>(in, out, len); CUDA_CHECK(cudaPeekAtLastError()); } template <typename Type> __global__ void nativeSqrtKernel(Type *in, Type *out, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = sqrt(in[idx]); } } template <typename Type> void naiveSqrt(Type *in, Type *out, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); nativeSqrtKernel<Type><<<nblks, TPB>>>(in, out, len); CUDA_CHECK(cudaPeekAtLastError()); } template <typename Type> __global__ void naiveSignFlipKernel(Type *in, Type *out, int rowCount, int colCount) { int d_i = blockIdx.x * rowCount; int end = d_i + rowCount; if (blockIdx.x < colCount) { Type max = 0.0; int max_index = 0; for (int i = d_i; i < end; i++) { Type val = in[i]; if (val < 0.0) { val = -val; } if (val > max) { max = val; max_index = i; } } for (int i = d_i; i < end; i++) { if (in[max_index] < 0.0) { out[i] = -in[i]; } else { out[i] = in[i]; } } } __syncthreads(); } template <typename Type> void naiveSignFlip(Type *in, Type *out, int rowCount, int colCount) { naiveSignFlipKernel<Type><<<colCount, 1>>>(in, out, rowCount, colCount); CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> struct MathInputs { T tolerance; int n_row; int n_col; int len; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const MathInputs<T> &dims) { return os; } template <typename T> class MathTest : public ::testing::TestWithParam<MathInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<MathInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int len = params.len; allocate(in_power, len); allocate(out_power_ref, len); allocate(in_sqrt, len); allocate(out_sqrt_ref, len); allocate(in_sign_flip, len); allocate(out_sign_flip_ref, len); allocate(in_ratio, 4); T in_ratio_h[4] = {1.0, 2.0, 2.0, 3.0}; updateDevice(in_ratio, in_ratio_h, 4); allocate(out_ratio_ref, 4); T out_ratio_ref_h[4] = {0.125, 0.25, 0.25, 0.375}; updateDevice(out_ratio_ref, out_ratio_ref_h, 4); r.uniform(in_power, len, T(-1.0), T(1.0)); r.uniform(in_sqrt, len, T(0.0), T(1.0)); // r.uniform(in_ratio, len, T(0.0), T(1.0)); r.uniform(in_sign_flip, len, T(-100.0), T(100.0)); naivePower(in_power, out_power_ref, len); power(in_power, len); naiveSqrt(in_sqrt, out_sqrt_ref, len); seqRoot(in_sqrt, len); auto mgr = makeDefaultAllocator(); ratio(in_ratio, in_ratio, 4, mgr); naiveSignFlip(in_sign_flip, out_sign_flip_ref, params.n_row, params.n_col); signFlip(in_sign_flip, params.n_row, params.n_col); allocate(in_recip, 4); allocate(in_recip_ref, 4); allocate(out_recip, 4); // default threshold is 1e-15 std::vector<T> in_recip_h = {0.1, 0.01, -0.01, 0.1e-16}; std::vector<T> in_recip_ref_h = {10.0, 100.0, -100.0, 0.0}; updateDevice(in_recip, in_recip_h.data(), 4); updateDevice(in_recip_ref, in_recip_ref_h.data(), 4); T recip_scalar = T(1.0); // this `reciprocal()` has to go first bc next one modifies its input reciprocal(in_recip, out_recip, recip_scalar, 4); reciprocal(in_recip, recip_scalar, 4, true); std::vector<T> in_small_val_zero_h = {0.1, 1e-16, -1e-16, -0.1}; std::vector<T> in_small_val_zero_ref_h = {0.1, 0.0, 0.0, -0.1}; allocate(in_smallzero, 4); allocate(out_smallzero, 4); allocate(out_smallzero_ref, 4); updateDevice(in_smallzero, in_small_val_zero_h.data(), 4); updateDevice(out_smallzero_ref, in_small_val_zero_ref_h.data(), 4); setSmallValuesZero(out_smallzero, in_smallzero, 4); setSmallValuesZero(in_smallzero, 4); } void TearDown() override { CUDA_CHECK(cudaFree(in_power)); CUDA_CHECK(cudaFree(out_power_ref)); CUDA_CHECK(cudaFree(in_sqrt)); CUDA_CHECK(cudaFree(out_sqrt_ref)); CUDA_CHECK(cudaFree(in_ratio)); CUDA_CHECK(cudaFree(out_ratio_ref)); CUDA_CHECK(cudaFree(in_sign_flip)); CUDA_CHECK(cudaFree(out_sign_flip_ref)); CUDA_CHECK(cudaFree(in_recip)); CUDA_CHECK(cudaFree(in_recip_ref)); CUDA_CHECK(cudaFree(out_recip)); CUDA_CHECK(cudaFree(in_smallzero)); CUDA_CHECK(cudaFree(out_smallzero)); CUDA_CHECK(cudaFree(out_smallzero_ref)); } protected: MathInputs<T> params; T *in_power, *out_power_ref, *in_sqrt, *out_sqrt_ref, *in_ratio, *out_ratio_ref, *in_sign_flip, *out_sign_flip_ref, *in_recip, *in_recip_ref, *out_recip, *in_smallzero, *out_smallzero, *out_smallzero_ref; }; const std::vector<MathInputs<float>> inputsf = { {0.00001f, 1024, 1024, 1024 * 1024, 1234ULL}}; const std::vector<MathInputs<double>> inputsd = { {0.00001, 1024, 1024, 1024 * 1024, 1234ULL}}; typedef MathTest<float> MathPowerTestF; TEST_P(MathPowerTestF, Result) { ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathPowerTestD; TEST_P(MathPowerTestD, Result) { ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathSqrtTestF; TEST_P(MathSqrtTestF, Result) { ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathSqrtTestD; TEST_P(MathSqrtTestD, Result) { ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathRatioTestF; TEST_P(MathRatioTestF, Result) { ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathRatioTestD; TEST_P(MathRatioTestD, Result) { ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathSignFlipTestF; TEST_P(MathSignFlipTestF, Result) { ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathSignFlipTestD; TEST_P(MathSignFlipTestD, Result) { ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathReciprocalTestF; TEST_P(MathReciprocalTestF, Result) { ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4, CompareApprox<float>(params.tolerance))); // 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`. ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathReciprocalTestD; TEST_P(MathReciprocalTestD, Result) { ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4, CompareApprox<double>(params.tolerance))); // 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`. ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3, CompareApprox<double>(params.tolerance))); } typedef MathTest<float> MathSetSmallZeroTestF; TEST_P(MathSetSmallZeroTestF, Result) { ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4, CompareApprox<float>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4, CompareApprox<float>(params.tolerance))); } typedef MathTest<double> MathSetSmallZeroTestD; TEST_P(MathSetSmallZeroTestD, Result) { ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4, CompareApprox<double>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestD, ::testing::ValuesIn(inputsd)); } // end namespace Matrix } // end namespace MLCommon
003212fae8dba751699bc5e6012b0e5998161878.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "BrokenLineFitOnGPU.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" void HelixFitOnGPU::launchBrokenLineKernels(HitsView const *hv, uint32_t hitsInFit, uint32_t maxNumberOfTuples, hipStream_t stream) { assert(tuples_d); auto blockSize = 64; auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; // Fit internals auto hitsGPU_ = cudautils::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix3xNd<4>) / sizeof(double), stream); auto hits_geGPU_ = cudautils::make_device_unique<float[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix6x4f) / sizeof(float), stream); auto fast_fit_resultsGPU_ = cudautils::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Vector4d) / sizeof(double), stream); for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { // fit triplets hipLaunchKernelGGL(( kernelBLFastFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 3, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelBLFit<3>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 3, offset); cudaCheck(hipGetLastError()); // fit quads hipLaunchKernelGGL(( kernelBLFastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 4, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelBLFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 4, offset); cudaCheck(hipGetLastError()); if (fit5as4_) { // fit penta (only first 4) hipLaunchKernelGGL(( kernelBLFastFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelBLFit<4>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); } else { // fit penta (all 5) hipLaunchKernelGGL(( kernelBLFastFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernelBLFit<5>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(hipGetLastError()); } } // loop on concurrent fits }
003212fae8dba751699bc5e6012b0e5998161878.cu
#include "BrokenLineFitOnGPU.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" void HelixFitOnGPU::launchBrokenLineKernels(HitsView const *hv, uint32_t hitsInFit, uint32_t maxNumberOfTuples, cudaStream_t stream) { assert(tuples_d); auto blockSize = 64; auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; // Fit internals auto hitsGPU_ = cudautils::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix3xNd<4>) / sizeof(double), stream); auto hits_geGPU_ = cudautils::make_device_unique<float[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Matrix6x4f) / sizeof(float), stream); auto fast_fit_resultsGPU_ = cudautils::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(Rfit::Vector4d) / sizeof(double), stream); for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { // fit triplets kernelBLFastFit<3><<<numberOfBlocks, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 3, offset); cudaCheck(cudaGetLastError()); kernelBLFit<3><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 3, offset); cudaCheck(cudaGetLastError()); // fit quads kernelBLFastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 4, offset); cudaCheck(cudaGetLastError()); kernelBLFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 4, offset); cudaCheck(cudaGetLastError()); if (fit5as4_) { // fit penta (only first 4) kernelBLFastFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(cudaGetLastError()); kernelBLFit<4><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(cudaGetLastError()); } else { // fit penta (all 5) kernelBLFastFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_d, tupleMultiplicity_d, hv, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(cudaGetLastError()); kernelBLFit<5><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_d, bField_, outputSoa_d, hitsGPU_.get(), hits_geGPU_.get(), fast_fit_resultsGPU_.get(), 5, offset); cudaCheck(cudaGetLastError()); } } // loop on concurrent fits }
daf07d0ce9eee66ef47c171847f0405a01e41766.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> //1073741824 #define size 1073741824 #define threadsize 1024 __global__ void MatrixAddition(int *a, int *b, int *c){ int id = blockIdx.x *blockDim.x * blockDim.y + threadIdx.y * blockDim.x+ threadIdx.x; c[id] = a[id] + b[id]; } int main(){ const long long int totalsize = size*sizeof(int); long long int summation = 0; float time1, time2 = 0.0; int *matA = (int*)malloc(totalsize); int *matB = (int*)malloc(totalsize); int *matC = (int*)malloc (totalsize); for(int i = 0; i < size;i++){ matA[i] = 1; matB[i] = 2; matC[i] = 0; } dim3 dimGrid(size/threadsize/2,1); dim3 dimBlock(32,32); hipStream_t stream[2]; int *matAD[2]; int *matBD[2]; int *matCD[2]; int *matAP;int *matBP; int *matCP; hipHostMalloc((void**)&matAP,totalsize); hipHostMalloc((void**)&matBP,totalsize); hipHostMalloc((void**)&matCP,totalsize); memcpy(matAP,matA, totalsize); memcpy(matBP,matB, totalsize); memcpy(matCP,matC, totalsize); hipSetDevice(0); hipMalloc((void**)&matAD[0],totalsize/2); hipMalloc((void**)&matBD[0], totalsize/2); hipMalloc((void**)&matCD[0], totalsize/2); hipStreamCreateWithFlags(&stream[0],hipStreamNonBlocking); hipSetDevice(1); hipMalloc((void**)&matAD[1],totalsize/2); hipMalloc((void**)&matBD[1], totalsize/2); hipMalloc((void**)&matCD[1], totalsize/2); hipStreamCreateWithFlags(&stream[1],hipStreamNonBlocking); hipSetDevice(0); hipMemcpyAsync(matAD[0], &matAP[0*size/2], totalsize/2, hipMemcpyHostToDevice,stream[0]); hipMemcpyAsync(matBD[0], &matBP[0*size/2], totalsize/2, hipMemcpyHostToDevice,stream[0]); hipSetDevice(1); hipMemcpyAsync(matAD[1], &matAP[1*size/2], totalsize/2, hipMemcpyHostToDevice,stream[1]); hipMemcpyAsync(matBD[1], &matBP[1*size/2], totalsize/2, hipMemcpyHostToDevice,stream[1]); hipSetDevice(0); hipLaunchKernelGGL(( MatrixAddition), dim3(dimGrid), dim3(dimBlock),0,stream[0], matAD[0],matBD[0], matCD[0]); hipSetDevice(1); hipLaunchKernelGGL(( MatrixAddition), dim3(dimGrid), dim3(dimBlock),0,stream[1], matAD[1],matBD[1], matCD[1]); hipSetDevice(0); hipMemcpyAsync(&matCP[0*size/2], matCD[0], totalsize/2, hipMemcpyDeviceToHost,stream[0]); hipSetDevice(1); hipMemcpyAsync(&matCP[1*size/2], matCD[1], totalsize/2, hipMemcpyDeviceToHost,stream[1]); memcpy(matC, matCP, totalsize); for(int i = 0; i < size; i++){ summation += matCP[i]; } printf("Sum is %lld ", summation); hipFree(matAD); hipFree(matBD); hipFree(matCD); hipHostFree(matAP); hipHostFree(matBP); hipHostFree(matCP); }
daf07d0ce9eee66ef47c171847f0405a01e41766.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> //1073741824 #define size 1073741824 #define threadsize 1024 __global__ void MatrixAddition(int *a, int *b, int *c){ int id = blockIdx.x *blockDim.x * blockDim.y + threadIdx.y * blockDim.x+ threadIdx.x; c[id] = a[id] + b[id]; } int main(){ const long long int totalsize = size*sizeof(int); long long int summation = 0; float time1, time2 = 0.0; int *matA = (int*)malloc(totalsize); int *matB = (int*)malloc(totalsize); int *matC = (int*)malloc (totalsize); for(int i = 0; i < size;i++){ matA[i] = 1; matB[i] = 2; matC[i] = 0; } dim3 dimGrid(size/threadsize/2,1); dim3 dimBlock(32,32); cudaStream_t stream[2]; int *matAD[2]; int *matBD[2]; int *matCD[2]; int *matAP;int *matBP; int *matCP; cudaMallocHost((void**)&matAP,totalsize); cudaMallocHost((void**)&matBP,totalsize); cudaMallocHost((void**)&matCP,totalsize); memcpy(matAP,matA, totalsize); memcpy(matBP,matB, totalsize); memcpy(matCP,matC, totalsize); cudaSetDevice(0); cudaMalloc((void**)&matAD[0],totalsize/2); cudaMalloc((void**)&matBD[0], totalsize/2); cudaMalloc((void**)&matCD[0], totalsize/2); cudaStreamCreateWithFlags(&stream[0],cudaStreamNonBlocking); cudaSetDevice(1); cudaMalloc((void**)&matAD[1],totalsize/2); cudaMalloc((void**)&matBD[1], totalsize/2); cudaMalloc((void**)&matCD[1], totalsize/2); cudaStreamCreateWithFlags(&stream[1],cudaStreamNonBlocking); cudaSetDevice(0); cudaMemcpyAsync(matAD[0], &matAP[0*size/2], totalsize/2, cudaMemcpyHostToDevice,stream[0]); cudaMemcpyAsync(matBD[0], &matBP[0*size/2], totalsize/2, cudaMemcpyHostToDevice,stream[0]); cudaSetDevice(1); cudaMemcpyAsync(matAD[1], &matAP[1*size/2], totalsize/2, cudaMemcpyHostToDevice,stream[1]); cudaMemcpyAsync(matBD[1], &matBP[1*size/2], totalsize/2, cudaMemcpyHostToDevice,stream[1]); cudaSetDevice(0); MatrixAddition<<<dimGrid, dimBlock,0,stream[0]>>>(matAD[0],matBD[0], matCD[0]); cudaSetDevice(1); MatrixAddition<<<dimGrid, dimBlock,0,stream[1]>>>(matAD[1],matBD[1], matCD[1]); cudaSetDevice(0); cudaMemcpyAsync(&matCP[0*size/2], matCD[0], totalsize/2, cudaMemcpyDeviceToHost,stream[0]); cudaSetDevice(1); cudaMemcpyAsync(&matCP[1*size/2], matCD[1], totalsize/2, cudaMemcpyDeviceToHost,stream[1]); memcpy(matC, matCP, totalsize); for(int i = 0; i < size; i++){ summation += matCP[i]; } printf("Sum is %lld ", summation); cudaFree(matAD); cudaFree(matBD); cudaFree(matCD); cudaFreeHost(matAP); cudaFreeHost(matBP); cudaFreeHost(matCP); }
a07bea3ca1b2cd40e8ef022ae308c4c518bbcfe5.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* Template project which demonstrates the basics on how to setup a project * example application. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes CUDA #include <hip/hip_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); extern "C" void computeGold(float *reference, float *idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void histo_kernel(unsigned char *buffer,long size, unsigned int *histo) { __shared__ unsigned int temp[68]; int dt = 32; temp[threadIdx.x]=0; int i = threadIdx.x + blockIdx.x *blockDim.x; int offset = blockDim.x *gridDim.x; while(i<size){ if (buffer[i] >= 32 && buffer[i] < 97) // histo[buffer[i]-dt]++; atomicAdd(&temp[buffer[i]-dt],1); if (buffer[i] >=97 && buffer[i] <= 122) atomicAdd(&temp[buffer[i] -dt -32],1); // histo[buffer[i] - dt - 32]++; if (buffer[i] > 122 && buffer[i] <= 127 ) // histo[buffer[i] - dt - 32 - 26]++; atomicAdd(&temp[buffer[i]-dt -32-26],1); i+=offset; } __syncthreads(); atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] ); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { bool bTestResult = true; printf("%s Starting...\n\n", argv[0]); // use command-line specified CUDA device, otherwise use device with highest Gflops/s int devID = findCudaDevice(argc, (const char **)argv); StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkStartTimer(&timer); if(argc <= 2){ fprintf(stderr, "Arguments non valide"); return 1; } /*For file input file and output file*/ FILE *f_input; FILE *f_output; /*Will content the number of char in the input file*/ long lSize; /*will content the file in char format*/ char *buffer; /*Open the */ f_input = fopen ( argv[1] ,"r" ); f_output = fopen( argv[2],"w"); if( !f_input ) perror(argv[1]),exit(1); fseek( f_input , 0L , SEEK_END); lSize = ftell( f_input ); rewind( f_input ); //buffer = calloc( 1, lSize+1 ); buffer =(char*) malloc(lSize); if( !buffer ) fclose(f_input),fputs("memory alloc fails",stderr),exit(1); if( 1!=fread( buffer , lSize, 1 , f_input) ) fclose(f_input),free(buffer),fputs("entire read fails",stderr),exit(1); /*allocate device memory*/ unsigned char *dev_buffer; unsigned int *dev_histo; /*Give space in Global memory of GPU to store different variable*/ hipMalloc( (void**)&dev_buffer, lSize); /*Copy from CPU Global memory to GPU Global memory*/ hipMemcpy( dev_buffer, buffer, lSize, hipMemcpyHostToDevice ); /*Create space for histo variable and initialize at 0 each slopt*/ hipMalloc( (void**)&dev_histo, NBR * sizeof( long)); hipMemset( dev_histo, 0, NBR * sizeof( int )); /*Define of the configuration for kernel running*/ hipDeviceProp_t proprieties; hipGetDeviceProperties( &proprieties, 0 ); int multiproc = proprieties.multiProcessorCount; dim3 blocks(multiproc*2,1,1); dim3 threads(1000, 1, 1); // execute the kernel hipLaunchKernelGGL(( histo_kernel), dim3(blocks),dim3(threads), 0, 0, dev_buffer, lSize, dev_histo ); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); /*Define histo vqriqble and copy on GPU global memory*/ unsigned int histo[NBR]; hipMemcpy( histo, dev_histo,NBR * sizeof( int ),hipMemcpyDeviceToHost); int dt =32; for(int i =0;i< NBR;i++){ if((i>=0 && i<= 31 && (i+dt != 42) && (i+dt != 36)) || (i>58 && i<=64) ) fprintf(f_output, "%c:%d\n",i+dt,histo[i]); if(i>31 && i<= 58 ) fprintf(f_output, "%c:%d\n",i+dt+32,histo[i]); // if(i> 58 && i <=64) // fprintf(f_output, "%c:%d\n",i+dt,histo[i]); if(i>64) fprintf(f_output, "%c:%d\n",i+dt+26,histo[i]); } sdkStopTimer(&timer); printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); hipFree( dev_histo ); hipFree( dev_buffer ); fclose(f_input); fclose(f_output); free( buffer ); exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); }
a07bea3ca1b2cd40e8ef022ae308c4c518bbcfe5.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* Template project which demonstrates the basics on how to setup a project * example application. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes CUDA #include <cuda_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); extern "C" void computeGold(float *reference, float *idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void histo_kernel(unsigned char *buffer,long size, unsigned int *histo) { __shared__ unsigned int temp[68]; int dt = 32; temp[threadIdx.x]=0; int i = threadIdx.x + blockIdx.x *blockDim.x; int offset = blockDim.x *gridDim.x; while(i<size){ if (buffer[i] >= 32 && buffer[i] < 97) // histo[buffer[i]-dt]++; atomicAdd(&temp[buffer[i]-dt],1); if (buffer[i] >=97 && buffer[i] <= 122) atomicAdd(&temp[buffer[i] -dt -32],1); // histo[buffer[i] - dt - 32]++; if (buffer[i] > 122 && buffer[i] <= 127 ) // histo[buffer[i] - dt - 32 - 26]++; atomicAdd(&temp[buffer[i]-dt -32-26],1); i+=offset; } __syncthreads(); atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] ); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { bool bTestResult = true; printf("%s Starting...\n\n", argv[0]); // use command-line specified CUDA device, otherwise use device with highest Gflops/s int devID = findCudaDevice(argc, (const char **)argv); StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkStartTimer(&timer); if(argc <= 2){ fprintf(stderr, "Arguments non valide"); return 1; } /*For file input file and output file*/ FILE *f_input; FILE *f_output; /*Will content the number of char in the input file*/ long lSize; /*will content the file in char format*/ char *buffer; /*Open the */ f_input = fopen ( argv[1] ,"r" ); f_output = fopen( argv[2],"w"); if( !f_input ) perror(argv[1]),exit(1); fseek( f_input , 0L , SEEK_END); lSize = ftell( f_input ); rewind( f_input ); //buffer = calloc( 1, lSize+1 ); buffer =(char*) malloc(lSize); if( !buffer ) fclose(f_input),fputs("memory alloc fails",stderr),exit(1); if( 1!=fread( buffer , lSize, 1 , f_input) ) fclose(f_input),free(buffer),fputs("entire read fails",stderr),exit(1); /*allocate device memory*/ unsigned char *dev_buffer; unsigned int *dev_histo; /*Give space in Global memory of GPU to store different variable*/ cudaMalloc( (void**)&dev_buffer, lSize); /*Copy from CPU Global memory to GPU Global memory*/ cudaMemcpy( dev_buffer, buffer, lSize, cudaMemcpyHostToDevice ); /*Create space for histo variable and initialize at 0 each slopt*/ cudaMalloc( (void**)&dev_histo, NBR * sizeof( long)); cudaMemset( dev_histo, 0, NBR * sizeof( int )); /*Define of the configuration for kernel running*/ cudaDeviceProp proprieties; cudaGetDeviceProperties( &proprieties, 0 ); int multiproc = proprieties.multiProcessorCount; dim3 blocks(multiproc*2,1,1); dim3 threads(1000, 1, 1); // execute the kernel histo_kernel<<<blocks,threads>>>( dev_buffer, lSize, dev_histo ); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); /*Define histo vqriqble and copy on GPU global memory*/ unsigned int histo[NBR]; cudaMemcpy( histo, dev_histo,NBR * sizeof( int ),cudaMemcpyDeviceToHost); int dt =32; for(int i =0;i< NBR;i++){ if((i>=0 && i<= 31 && (i+dt != 42) && (i+dt != 36)) || (i>58 && i<=64) ) fprintf(f_output, "%c:%d\n",i+dt,histo[i]); if(i>31 && i<= 58 ) fprintf(f_output, "%c:%d\n",i+dt+32,histo[i]); // if(i> 58 && i <=64) // fprintf(f_output, "%c:%d\n",i+dt,histo[i]); if(i>64) fprintf(f_output, "%c:%d\n",i+dt+26,histo[i]); } sdkStopTimer(&timer); printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); cudaFree( dev_histo ); cudaFree( dev_buffer ); fclose(f_input); fclose(f_output); free( buffer ); exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); }
9fea3fac56c50c9aa8aadde42fbed8901f3337d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <utility> #include <memory.h> #include <stdint.h> #include <stdio.h> #include <time.h> #include "../Settings.h" #include "../BitHelper.h" #include "../LZSSInterface.h" #include "../MatchHelper/MatchHelper.h" #define MIN(a, b) \ ((a) < (b) ? (a) : (b)) __global__ void CompressKernel(const uint8_t* deviceInBuf, int inSize, uint8_t* deviceOutBuf, int* deviceOutSize, CompressFlagBlock* deviceFlagOut, int* deviceFlagSize) { __shared__ uint8_t blockBuf[DataBlockSize]; __shared__ PairType blockFlags[DataBlockSize]; auto threadId = threadIdx.x; auto blockId = blockIdx.x; auto blockOffset = blockId * DataBlockSize; auto blockSize = MIN(DataBlockSize, inSize - blockOffset); for (int t = threadId; t < blockSize; t += blockDim.x) { blockBuf[t] = deviceInBuf[blockOffset + t]; } __syncthreads(); for (int t = threadId; t < blockSize; t += blockDim.x) { auto lookbackLength = MIN(WindowSize, t); auto lookaheadLength = MIN(MaxEncodeLength, blockSize - t); int matchOffset, matchLength; if (FindMatch(blockBuf + t - lookbackLength, lookbackLength, blockBuf + t, lookaheadLength, matchOffset, matchLength)) { // Convert offset to backward representation matchOffset = lookbackLength - matchOffset; // Due to the bit limit, minus 1 for exact offset and length blockFlags[t] = ((matchOffset - 1) << PairLengthBits) | (matchLength - 1); } else { blockFlags[t] = 0; } } __syncthreads(); // Collector if (threadId == 0) { CompressFlagBlock compressBlock; memset(&compressBlock, 0, sizeof(CompressFlagBlock)); for (int i = 0; i < blockSize;) { if (blockFlags[i] == 0) { deviceOutBuf[blockOffset + compressBlock.CompressedSize] = blockBuf[i]; ++compressBlock.CompressedSize; PUT_BIT(compressBlock.Flags, compressBlock.NumOfFlags, 0); ++i; } else { memcpy(deviceOutBuf + blockOffset + compressBlock.CompressedSize, &blockFlags[i], sizeof(PairType)); compressBlock.CompressedSize += sizeof(PairType); PUT_BIT(compressBlock.Flags, compressBlock.NumOfFlags, 1); auto matchLength = (blockFlags[i] & (MaxEncodeLength - 1)) + 1; i += matchLength; } ++compressBlock.NumOfFlags; } memcpy(deviceFlagOut + blockId, &compressBlock, sizeof(CompressFlagBlock)); // taken by current flag block atomicAdd(deviceFlagSize, SIZE_OF_FLAGS(compressBlock.NumOfFlags) + sizeof(CompressFlagBlock::NumOfFlags) + sizeof(CompressFlagBlock::CompressedSize)); atomicAdd(deviceOutSize, compressBlock.CompressedSize); } }
9fea3fac56c50c9aa8aadde42fbed8901f3337d0.cu
#include <utility> #include <memory.h> #include <stdint.h> #include <stdio.h> #include <time.h> #include "../Settings.h" #include "../BitHelper.h" #include "../LZSSInterface.h" #include "../MatchHelper/MatchHelper.h" #define MIN(a, b) \ ((a) < (b) ? (a) : (b)) __global__ void CompressKernel(const uint8_t* deviceInBuf, int inSize, uint8_t* deviceOutBuf, int* deviceOutSize, CompressFlagBlock* deviceFlagOut, int* deviceFlagSize) { __shared__ uint8_t blockBuf[DataBlockSize]; __shared__ PairType blockFlags[DataBlockSize]; auto threadId = threadIdx.x; auto blockId = blockIdx.x; auto blockOffset = blockId * DataBlockSize; auto blockSize = MIN(DataBlockSize, inSize - blockOffset); for (int t = threadId; t < blockSize; t += blockDim.x) { blockBuf[t] = deviceInBuf[blockOffset + t]; } __syncthreads(); for (int t = threadId; t < blockSize; t += blockDim.x) { auto lookbackLength = MIN(WindowSize, t); auto lookaheadLength = MIN(MaxEncodeLength, blockSize - t); int matchOffset, matchLength; if (FindMatch(blockBuf + t - lookbackLength, lookbackLength, blockBuf + t, lookaheadLength, matchOffset, matchLength)) { // Convert offset to backward representation matchOffset = lookbackLength - matchOffset; // Due to the bit limit, minus 1 for exact offset and length blockFlags[t] = ((matchOffset - 1) << PairLengthBits) | (matchLength - 1); } else { blockFlags[t] = 0; } } __syncthreads(); // Collector if (threadId == 0) { CompressFlagBlock compressBlock; memset(&compressBlock, 0, sizeof(CompressFlagBlock)); for (int i = 0; i < blockSize;) { if (blockFlags[i] == 0) { deviceOutBuf[blockOffset + compressBlock.CompressedSize] = blockBuf[i]; ++compressBlock.CompressedSize; PUT_BIT(compressBlock.Flags, compressBlock.NumOfFlags, 0); ++i; } else { memcpy(deviceOutBuf + blockOffset + compressBlock.CompressedSize, &blockFlags[i], sizeof(PairType)); compressBlock.CompressedSize += sizeof(PairType); PUT_BIT(compressBlock.Flags, compressBlock.NumOfFlags, 1); auto matchLength = (blockFlags[i] & (MaxEncodeLength - 1)) + 1; i += matchLength; } ++compressBlock.NumOfFlags; } memcpy(deviceFlagOut + blockId, &compressBlock, sizeof(CompressFlagBlock)); // taken by current flag block atomicAdd(deviceFlagSize, SIZE_OF_FLAGS(compressBlock.NumOfFlags) + sizeof(CompressFlagBlock::NumOfFlags) + sizeof(CompressFlagBlock::CompressedSize)); atomicAdd(deviceOutSize, compressBlock.CompressedSize); } }
4dc73b80883e7ae382878f1ddf2786cdf656beb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include <vector> #include <set> #include <sstream> #include <string> #include <algorithm> #include <math.h> #include <numeric> #include <functional> #define n_nodes {{n_nodes}} #define num_states {{num_states}} extern "C" { __host__ __device__ void changebase(int number, std::vector<unsigned int>& state){ int current_state = num_states - 1; int Number = number; int NumStates = num_states; int quotient = (int)Number/NumStates; int remainder = Number % NumStates; Number = quotient; state[current_state] = remainder; current_state = current_state - 1; while (quotient !=0){ quotient = (int)Number/NumStates; remainder = Number % NumStates; state[current_state] = remainder; Number = quotient; current_state = current_state - 1; }; } __host__ __device__ void update(std::vector<unsigned int> x, std::vector<unsigned int>& vect){ {{functions}} std::transform(vect.begin(),vect.end(),vect.begin(),std::bind2nd(std::modulus<unsigned int>(),3)); }; void print_vect(int temp_vect){ for (const unsigned int& i : temp_vect){ std::cout << i << ' '; }; std::cout << '\n'; } __host__ __device__ void to_string(std::vector<unsigned int> v, std::string& ss ){ ss = std::accumulate(std::next(v.begin()), v.end(), std::to_string(v[0]), // start with first element [](std::string a, unsigned int b) {return a + std::to_string(b);}); } __global__ void AttractorFinder(std::string* result){ int tid = blockDim.x * blockIdx.x + threadIdx.x; // std::vector<unsigned int> start_vect(n_nodes, 0); // std::vector<unsigned int> end_vect(n_nodes, 0); int start_vect[n_nodes]; int end_vect[n_nodes]; std::string end_string; std::set<std::vector<unsigned int>> EmptySet; std::set<std::vector<unsigned int>> visited_states; changebase(tid, start_vect); while(true){ update(start_vect, end_vect); if (visited_states.count(end_vect)==1){break;} visited_states.insert(end_vect); start_vect = end_vect; }; to_string(end_vect, end_string); result[tid] = end_string; }; }
4dc73b80883e7ae382878f1ddf2786cdf656beb5.cu
#include <iostream> #include <stdio.h> #include <vector> #include <set> #include <sstream> #include <string> #include <algorithm> #include <math.h> #include <numeric> #include <functional> #define n_nodes {{n_nodes}} #define num_states {{num_states}} extern "C" { __host__ __device__ void changebase(int number, std::vector<unsigned int>& state){ int current_state = num_states - 1; int Number = number; int NumStates = num_states; int quotient = (int)Number/NumStates; int remainder = Number % NumStates; Number = quotient; state[current_state] = remainder; current_state = current_state - 1; while (quotient !=0){ quotient = (int)Number/NumStates; remainder = Number % NumStates; state[current_state] = remainder; Number = quotient; current_state = current_state - 1; }; } __host__ __device__ void update(std::vector<unsigned int> x, std::vector<unsigned int>& vect){ {{functions}} std::transform(vect.begin(),vect.end(),vect.begin(),std::bind2nd(std::modulus<unsigned int>(),3)); }; void print_vect(int temp_vect){ for (const unsigned int& i : temp_vect){ std::cout << i << ' '; }; std::cout << '\n'; } __host__ __device__ void to_string(std::vector<unsigned int> v, std::string& ss ){ ss = std::accumulate(std::next(v.begin()), v.end(), std::to_string(v[0]), // start with first element [](std::string a, unsigned int b) {return a + std::to_string(b);}); } __global__ void AttractorFinder(std::string* result){ int tid = blockDim.x * blockIdx.x + threadIdx.x; // std::vector<unsigned int> start_vect(n_nodes, 0); // std::vector<unsigned int> end_vect(n_nodes, 0); int start_vect[n_nodes]; int end_vect[n_nodes]; std::string end_string; std::set<std::vector<unsigned int>> EmptySet; std::set<std::vector<unsigned int>> visited_states; changebase(tid, start_vect); while(true){ update(start_vect, end_vect); if (visited_states.count(end_vect)==1){break;} visited_states.insert(end_vect); start_vect = end_vect; }; to_string(end_vect, end_string); result[tid] = end_string; }; }
fd35eff3aa87f6e730e06d077d228e0137daf8cc.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------- * * CUDA functions for texture-memory interpolation based projection * * This file has the necesary fucntiosn to perform X-ray CBCT projection * operation given a geaometry, angles and image. It uses the 3D texture * memory linear interpolation to uniformily sample a path to integrate the * X-rays. * * CODE by Ander Biguri * Sepideh Hatamikia (arbitrary rotation) * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #include <algorithm> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "ray_interpolated_projection.hpp" #include "mex.h" #include <math.h> #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("%s \n",msg);\ hipDeviceReset();\ mexErrMsgIdAndTxt("TIGRE:Ax:interpolated",hipGetErrorString(__err));\ } \ } while (0) // Declare the texture reference. #define MAXTREADS 1024 #define PROJ_PER_BLOCK 9 #define PIXEL_SIZE_BLOCK 9 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool allocate); __constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device __constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device __global__ void vecAddInPlaceInterp(float *a, float *b, unsigned long n) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (idx < n) a[idx] = a[idx] + b[idx]; } template<bool sphericalrotation> __global__ void kernelPixelDetector( Geometry geo, float* detector, const int currProjSetNumber, const int totalNoOfProjections, hipTextureObject_t tex){ unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long projNumber=threadIdx.z; if ((x>= geo.nDetecU) | (y>= geo.nDetecV)| (projNumber>=PROJ_PER_BLOCK)) return; size_t idx = (size_t)(x * geo.nDetecV + y)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ; int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array if(indAlpha>=totalNoOfProjections) return; Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaU = projParamsArrayDev[4*projNumber+1]; Point3D deltaV = projParamsArrayDev[4*projNumber+2]; Point3D source = projParamsArrayDev[4*projNumber+3]; float DSO = projFloatsArrayDev[2*projNumber+0]; float cropdist_init = projFloatsArrayDev[2*projNumber+1]; /////// Get coordinates XYZ of pixel UV int pixelV = geo.nDetecV-y-1; int pixelU = x; float vectX,vectY,vectZ; Point3D P; P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); // Length is the ray length in normalized space float length=__fsqrt_rd((source.x-P.x)*(source.x-P.x)+(source.y-P.y)*(source.y-P.y)+(source.z-P.z)*(source.z-P.z)); //now legth is an integer of Nsamples that are required on this line length=ceilf(__fdividef(length,geo.accuracy));//Divide the directional vector by an integer vectX=__fdividef(P.x -source.x,length); vectY=__fdividef(P.y -source.y,length); vectZ=__fdividef(P.z -source.z,length); // //Integrate over the line float tx,ty,tz; float sum=0; float i; // Because I have no idea how to efficiently cutoff the legth path in 3D, a very upper limit is computed (see maxdistanceCuboid) // for the 3D case. However it would be bad to lose performance in the 3D case // TODO: can ge really improve this? if (sphericalrotation){ if ((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy < length) length=ceilf((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy); } else{ if ((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy < length) length=ceilf((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy); } //Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel") for (i=floorf(cropdist_init/geo.accuracy); i<=length; i=i+1){ tx=vectX*i+source.x; ty=vectY*i+source.y; tz=vectZ*i+source.z; sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time. } float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+ (vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+ (vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) ); detector[idx]=sum*deltalength; } // legnth(angles)=3 x nagnles, as we have roll, pitch, yaw. int interpolation_projection(float * img, Geometry geo, float** result,float const * const angles,int nangles){ // Prepare for MultiGPU int deviceCount = 0; hipGetDeviceCount(&deviceCount); cudaCheckErrors("Device query fail"); if (deviceCount == 0) { mexErrMsgIdAndTxt("Ax:Interpolated_projection:GPUselect","There are no available device(s) that support CUDA\n"); } // // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning trhown) int dev; char * devicenames; hipDeviceProp_t deviceProp; for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); hipGetDeviceProperties(&deviceProp, dev); if (dev>0){ if (strcmp(devicenames,deviceProp.name)!=0){ mexWarnMsgIdAndTxt("Ax:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275."); break; } } devicenames=deviceProp.name; } // Check free memory size_t mem_GPU_global; checkFreeMemory(deviceCount,&mem_GPU_global); size_t mem_image=(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float); size_t mem_proj =(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV * sizeof(float); // Does everything fit in the GPUs? bool fits_in_memory=false; unsigned int splits=1; Geometry * geoArray; if (mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global){// yes it does fits_in_memory=true; geoArray=(Geometry*)malloc(sizeof(Geometry)); geoArray[0]=geo; } else{// Nope nope. fits_in_memory=false; // Oh dear. // approx free memory we have. We already have left some extra 10% free for internal stuff // we need a second projection memory to combine multi-GPU stuff. size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj; splits=mem_image/mem_free+1;// Ceil of the truncation geoArray=(Geometry*)malloc(splits*sizeof(Geometry)); splitImageInterp(splits,geo,geoArray,nangles); } // Allocate auiliary memory for projections on the GPU to accumulate partial resutsl float ** dProjection_accum; size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float); if (!fits_in_memory){ dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); for (int i = 0; i < 2; ++i){ hipMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj); hipMemset(dProjection_accum[dev*2+i],0,num_bytes_proj); cudaCheckErrors("cudaMallocauxiliarty projections fail"); } } } // This is happening regarthless if the image fits on memory float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); for (int i = 0; i < 2; ++i){ hipMalloc((void**)&dProjection[dev*2+i], num_bytes_proj); hipMemset(dProjection[dev*2+i] ,0,num_bytes_proj); cudaCheckErrors("hipMalloc projections fail"); } } //Pagelock memory for syncronous copy. // Lets try to make the host memory pinned: // We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes. int isHostRegisterSupported; hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0); // empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to // pin the memory is greater than the lost time in Syncronously launching the memcpys. This is only worth it when the image is too big. if (isHostRegisterSupported & splits>1){ hipHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable); } Point3D source, deltaU, deltaV, uvOrigin; Point3D* projParamsArrayHost; hipHostMalloc((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D)); float* projFloatsArrayHost; hipHostMalloc((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float)); // Create Streams for overlapping memcopy and compute int nStream_device=2; int nStreams=deviceCount*nStream_device; hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t)); for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); for (int i = 0; i < nStream_device; ++i){ hipStreamCreate(&stream[i+dev*nStream_device]); } } cudaCheckErrors("Stream creation fail"); int nangles_device=(nangles+deviceCount-1)/deviceCount; int nangles_last_device=(nangles-(deviceCount-1)*nangles_device); unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK unsigned int last_device_blocks= (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management. int projection_this_block; hipTextureObject_t *texImg = new hipTextureObject_t[deviceCount]; hipArray **d_cuArrTex = new hipArray*[deviceCount]; for (unsigned int sp=0;sp<splits;sp++){ // Create texture objects for all GPUs size_t linear_idx_start; //First one shoudl always be the same size as all the rest but the last linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ; CreateTextureInterp(deviceCount,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp); cudaCheckErrors("Texture object creation fail"); int divU,divV; divU=PIXEL_SIZE_BLOCK; divV=PIXEL_SIZE_BLOCK; dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1); dim3 block(divU,divV,PROJ_PER_BLOCK); unsigned int proj_global; unsigned int i; float maxdist; // Now that we have prepared the image (piece of image) and parameters for kernels // we project for all angles. for ( i=0; i<noOfKernelCalls; i++){ for (dev=0;dev<deviceCount;dev++){ float is_spherical=0; hipSetDevice(dev); for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){ proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device; if (proj_global>=nangles) break; if ((i*PROJ_PER_BLOCK+j)>=nangles_device) break; geo.alpha=angles[proj_global*3]; geo.theta=angles[proj_global*3+1]; geo.psi =angles[proj_global*3+2]; is_spherical+=abs(geo.theta)+abs(geo.psi); //precomute distances for faster execution maxdist=maxdistanceCuboid(geo,proj_global); //Precompute per angle constant stuff for speed computeDeltas(geo, proj_global, &uvOrigin, &deltaU, &deltaV, &source); //Ray tracing! projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection projParamsArrayHost[4*j+1]=deltaU; projParamsArrayHost[4*j+2]=deltaV; projParamsArrayHost[4*j+3]=source; projFloatsArrayHost[2*j]=geo.DSO[proj_global]; projFloatsArrayHost[2*j+1]=floor(maxdist); } hipMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[dev*nStream_device]); hipMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[dev*nStream_device]); hipStreamSynchronize(stream[dev*nStream_device]); //TODO: we could do this around X and Y axis too, but we would need to compute the new axis of rotation (not possible to know from jsut the angles) if (!is_spherical){ hipLaunchKernelGGL(( kernelPixelDetector<false>), dim3(grid),dim3(block),0,stream[dev*nStream_device], geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]); } else{ hipLaunchKernelGGL(( kernelPixelDetector<true>) , dim3(grid),dim3(block),0,stream[dev*nStream_device], geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]); } } // Now that the computation is happening, we need to either prepare the memory for // combining of the projections (splits>1) or start removing previous results. // If our image does not fit in memory then we need to make sure we accumulate previous results too. if( !fits_in_memory&&sp>0){ // First, grab previous results and put them in the auxiliary variable for (dev = 0; dev < deviceCount; dev++){ projection_this_block=PROJ_PER_BLOCK; hipSetDevice(dev); // this werid code makes sure we dont access bad memory. Its necesary for deviceCount>2 if (dev+1==deviceCount){ // if its the last device if(i+1==last_device_blocks) // If we are in the last block of the last device, how many projections? projection_this_block=nangles_last_device-(last_device_blocks-1)*PROJ_PER_BLOCK; if(i+1>last_device_blocks) // As the last device can have less blocs, i may be over it. break; }else{ if(i+1==noOfKernelCalls) // if its not the last device, it can still be the lat block projection_this_block=nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK; } hipMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[i*PROJ_PER_BLOCK+dev*nangles_device], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyHostToDevice,stream[dev*2+1]); } // Second, take the results from current compute call and add it to the code in execution. for (dev = 0; dev < deviceCount; dev++){ projection_this_block=PROJ_PER_BLOCK; hipSetDevice(dev); // this werid code makes sure we dont access bad memory. Its necesary for deviceCount>2 if (dev+1==deviceCount){ // if its the last device if(i+1==last_device_blocks) // If we are in the last block of the last device, how many projections? projection_this_block=nangles_last_device-(last_device_blocks-1)*PROJ_PER_BLOCK; if(i+1>last_device_blocks) // As the last device can have less blocs, i may be over it. break; }else{ if(i+1==noOfKernelCalls) // if its not the last device, it can still be the lat block projection_this_block=nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK; } hipStreamSynchronize(stream[dev*2+1]); // wait until copy is finished hipLaunchKernelGGL(( vecAddInPlaceInterp), dim3((geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS),dim3(MAXTREADS),0,stream[dev*2], dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block); } } // Now, lets get out the projections from the previous execution of the kernels. if (i>0){ for (dev = 0; dev < deviceCount; dev++){ projection_this_block=PROJ_PER_BLOCK; hipSetDevice(dev); if (dev+1==deviceCount && i+1==noOfKernelCalls && last_device_blocks!=noOfKernelCalls){ projection_this_block=nangles_last_device-(last_device_blocks-1)*PROJ_PER_BLOCK; } hipMemcpyAsync(result[(i-1)*PROJ_PER_BLOCK+dev*nangles_device], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]); } } // Make sure Computation on kernels has finished before we launch the next batch. for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); hipStreamSynchronize(stream[dev*2]); } } // We still have the last one to get out, do that one int projection_this_block; for (dev = 0; dev < deviceCount; dev++){ projection_this_block=PROJ_PER_BLOCK; hipSetDevice(dev); // this werid code makes sure we dont access bad memory. Its necesary for deviceCount>2 if (dev+1==deviceCount){ // if its the last device projection_this_block=nangles_last_device-(last_device_blocks-1)*PROJ_PER_BLOCK; if(i>last_device_blocks) // As the last device can have less blocs, i may be over it. break; }else{ projection_this_block=nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK; } hipDeviceSynchronize(); cudaCheckErrors("Fail memcopy fail"); hipMemcpyAsync(result[(i-1)*PROJ_PER_BLOCK+dev*nangles_device], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]); } // Free memory for the next piece of image hipDeviceSynchronize(); } cudaCheckErrors("Main loop fail"); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); hipDestroyTextureObject(texImg[dev]); hipFreeArray(d_cuArrTex[dev]); } // Freeing Stage for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); hipFree(dProjection[dev*2]); hipFree(dProjection[dev*2+1]); } free(dProjection); if(!fits_in_memory){ for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); hipFree(dProjection_accum[dev*2]); hipFree(dProjection_accum[dev*2+1]); } free(dProjection_accum); } freeGeoArray(splits,geoArray); hipHostFree(projParamsArrayHost); for (int i = 0; i < nStreams; ++i) hipStreamDestroy(stream[i]) ; if (isHostRegisterSupported & splits>1){ hipHostUnregister(img); } cudaCheckErrors("hipFree fail"); // hipDeviceReset(); return 0; } void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool allocate) { //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ; const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); if(allocate){ for (unsigned int i = 0; i < num_devices; i++){ hipSetDevice(i); //hipArray Descriptor hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); //cuda Array hipMalloc3DArray(&d_cuArrTex[i], &channelDesc, extent); cudaCheckErrors("Texture memory allocation fail"); } } for (unsigned int i = 0; i < num_devices; i++){ hipMemcpy3DParms copyParams = {0}; hipSetDevice(i); //Array creation copyParams.srcPtr = make_hipPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_cuArrTex[i]; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3DAsync(&copyParams); //cudaCheckErrors("Texture memory data copy fail"); //Array creation End } for (unsigned int i = 0; i < num_devices; i++){ hipSetDevice(i); hipResourceDesc texRes; memset(&texRes, 0, sizeof(hipResourceDesc)); texRes.resType = hipResourceTypeArray; texRes.res.array.array = d_cuArrTex[i]; hipTextureDesc texDescr; memset(&texDescr, 0, sizeof(hipTextureDesc)); texDescr.normalizedCoords = false; if (geo.accuracy>1){ texDescr.filterMode = hipFilterModePoint; geo.accuracy=1; } else{ texDescr.filterMode = hipFilterModeLinear; } texDescr.addressMode[0] = hipAddressModeBorder; texDescr.addressMode[1] = hipAddressModeBorder; texDescr.addressMode[2] = hipAddressModeBorder; texDescr.readMode = hipReadModeElementType; hipCreateTextureObject(&texImage[i], &texRes, &texDescr, NULL); cudaCheckErrors("Texture object creation fail"); } } /* This code generates the geometries needed to split the image properly in * cases where the entire image does not fit in the memory of the GPU **/ void splitImageInterp(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){ unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible for(unsigned int sp=0;sp<splits;sp++){ geoArray[sp]=geo; // All of them are splitsize, but the last one, possible geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp; geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ; // We need to redefine the offsets, as now each subimage is not aligned in the origin. geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float)); for (unsigned int i=0;i<nangles;i++){ geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2; } } } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas(Geometry geo,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO[i]; S.y=0; S.z=0; //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: // Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours. // The obkjective is to get a position of the detector in a coordinate system where: // 1-units are voxel size (in each direction can be different) // 2-The image has the its first voxel at (0,0,0) // 3-The image never rotates // To do that, we need to compute the "deltas" the detector, or "by how much // (in new xyz) does the voxels change when and index is added". To do that // several geometric steps needs to be changed //1.Roll,pitch,jaw // The detector can have a small rotation. // according to //"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706. // Only the Z rotation will have a big influence in the image quality when they are small. // Still all rotations are supported // To roll pitch jaw, the detector has to be in centered in OXYZ. P.x=0;Pu0.x=0;Pv0.x=0; // Roll pitch yaw rollPitchYaw(geo,i,&P); rollPitchYaw(geo,i,&Pu0); rollPitchYaw(geo,i,&Pv0); //Now ltes translate the detector coordinates to DOD (original position on real coordinate system: P.x=P.x-(geo.DSD[i]-geo.DSO[i]); Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]); Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]); //2: Offset detector //S doesnt need to chagne //3: Rotate around RZ RY RZ Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x; Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i]; Pfinalu0.x=Pu0.x; Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i]; Pfinalv0.x=Pv0.x; Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i]; eulerZYZ(geo,&Pfinal); eulerZYZ(geo,&Pfinalu0); eulerZYZ(geo,&Pfinalv0); eulerZYZ(geo,&S); //3: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ; //mexPrintf("COR: %f \n",geo.COR[i]); //5. apply COR. Wherever everything was, now its offesetd by a bit. // Only wors for standard rotaiton, not aribtary axis rotation. float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S.x+=CORx; S.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S; } float maxdistanceCuboid(Geometry geo,unsigned int i){ /////////// // Compute initial "t" so we access safely as less as out of bounds as possible. ////////// float maxCubX,maxCubY,maxCubZ; // Forgetting Z, compute mas distance: diagonal+offset maxCubX=(geo.nVoxelX/2+ abs(geo.offOrigX[i])/geo.dVoxelX); maxCubY=(geo.nVoxelY/2+ abs(geo.offOrigY[i])/geo.dVoxelY); maxCubZ=(geo.nVoxelZ/2+ abs(geo.offOrigZ[i])/geo.dVoxelZ); float a,b; a=geo.DSO[i]/geo.dVoxelX; b=geo.DSO[i]/geo.dVoxelY; // As the return of this value is in "voxel space", the source may have an elliptical curve. // The distance returned is the safe distance that can be skipped for a given angle alpha, before we need to start sampling. if (geo.theta==0.0f & geo.psi==0.0f) // Special case, it will make the code faster return max(a*b/sqrt(a*a*sin(geo.alpha)*sin(geo.alpha)+b*b*cos(geo.alpha)*cos(geo.alpha))- sqrt(maxCubX*maxCubX+maxCubY*maxCubY),0.0f); //TODO: think of more special cases? return max(geo.DSO[i]/max(max(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)-sqrt(maxCubX*maxCubX+maxCubY*maxCubY+maxCubZ*maxCubZ),0.0f); } void rollPitchYaw(Geometry geo,unsigned int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->z=-sin(geo.dPitch[i])*auxPoint.x +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; } void eulerZYZ(Geometry geo, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+ cos(geo.alpha)*sin(geo.theta)*auxPoint.z; point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+ sin(geo.alpha)*sin(geo.theta)*auxPoint.z; point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+ sin(geo.theta)*sin(geo.psi)*auxPoint.y+ cos(geo.theta)*auxPoint.z; } //______________________________________________________________________________ // // Function: freeGeoArray // // Description: Frees the memory from the geometry array for multiGPU. //______________________________________________________________________________ void freeGeoArray(unsigned int splits,Geometry* geoArray){ for(unsigned int sp=0;sp<splits;sp++){ free(geoArray[sp].offOrigZ); } free(geoArray); } //______________________________________________________________________________ // // Function: checkFreeMemory // // Description: check available memory on devices //______________________________________________________________________________ void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){ size_t memfree; size_t memtotal; for (int dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); hipMemGetInfo(&memfree,&memtotal); if(dev==0) *mem_GPU_global=memfree; if(memfree<memtotal/2){ mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n"); } cudaCheckErrors("Check mem error"); *mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global; } *mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95); //*mem_GPU_global= insert your known number here, in bytes. }
fd35eff3aa87f6e730e06d077d228e0137daf8cc.cu
/*------------------------------------------------------------------------- * * CUDA functions for texture-memory interpolation based projection * * This file has the necesary fucntiosn to perform X-ray CBCT projection * operation given a geaometry, angles and image. It uses the 3D texture * memory linear interpolation to uniformily sample a path to integrate the * X-rays. * * CODE by Ander Biguri * Sepideh Hatamikia (arbitrary rotation) * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> #include "ray_interpolated_projection.hpp" #include "mex.h" #include <math.h> #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("%s \n",msg);\ cudaDeviceReset();\ mexErrMsgIdAndTxt("TIGRE:Ax:interpolated",cudaGetErrorString(__err));\ } \ } while (0) // Declare the texture reference. #define MAXTREADS 1024 #define PROJ_PER_BLOCK 9 #define PIXEL_SIZE_BLOCK 9 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool allocate); __constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device __constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device __global__ void vecAddInPlaceInterp(float *a, float *b, unsigned long n) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (idx < n) a[idx] = a[idx] + b[idx]; } template<bool sphericalrotation> __global__ void kernelPixelDetector( Geometry geo, float* detector, const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex){ unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long projNumber=threadIdx.z; if ((x>= geo.nDetecU) | (y>= geo.nDetecV)| (projNumber>=PROJ_PER_BLOCK)) return; size_t idx = (size_t)(x * geo.nDetecV + y)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ; int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array if(indAlpha>=totalNoOfProjections) return; Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaU = projParamsArrayDev[4*projNumber+1]; Point3D deltaV = projParamsArrayDev[4*projNumber+2]; Point3D source = projParamsArrayDev[4*projNumber+3]; float DSO = projFloatsArrayDev[2*projNumber+0]; float cropdist_init = projFloatsArrayDev[2*projNumber+1]; /////// Get coordinates XYZ of pixel UV int pixelV = geo.nDetecV-y-1; int pixelU = x; float vectX,vectY,vectZ; Point3D P; P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); // Length is the ray length in normalized space float length=__fsqrt_rd((source.x-P.x)*(source.x-P.x)+(source.y-P.y)*(source.y-P.y)+(source.z-P.z)*(source.z-P.z)); //now legth is an integer of Nsamples that are required on this line length=ceilf(__fdividef(length,geo.accuracy));//Divide the directional vector by an integer vectX=__fdividef(P.x -source.x,length); vectY=__fdividef(P.y -source.y,length); vectZ=__fdividef(P.z -source.z,length); // //Integrate over the line float tx,ty,tz; float sum=0; float i; // Because I have no idea how to efficiently cutoff the legth path in 3D, a very upper limit is computed (see maxdistanceCuboid) // for the 3D case. However it would be bad to lose performance in the 3D case // TODO: can ge really improve this? if (sphericalrotation){ if ((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy < length) length=ceilf((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy); } else{ if ((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy < length) length=ceilf((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy); } //Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel") for (i=floorf(cropdist_init/geo.accuracy); i<=length; i=i+1){ tx=vectX*i+source.x; ty=vectY*i+source.y; tz=vectZ*i+source.z; sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time. } float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+ (vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+ (vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) ); detector[idx]=sum*deltalength; } // legnth(angles)=3 x nagnles, as we have roll, pitch, yaw. int interpolation_projection(float * img, Geometry geo, float** result,float const * const angles,int nangles){ // Prepare for MultiGPU int deviceCount = 0; cudaGetDeviceCount(&deviceCount); cudaCheckErrors("Device query fail"); if (deviceCount == 0) { mexErrMsgIdAndTxt("Ax:Interpolated_projection:GPUselect","There are no available device(s) that support CUDA\n"); } // // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning trhown) int dev; char * devicenames; cudaDeviceProp deviceProp; for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); cudaGetDeviceProperties(&deviceProp, dev); if (dev>0){ if (strcmp(devicenames,deviceProp.name)!=0){ mexWarnMsgIdAndTxt("Ax:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275."); break; } } devicenames=deviceProp.name; } // Check free memory size_t mem_GPU_global; checkFreeMemory(deviceCount,&mem_GPU_global); size_t mem_image=(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float); size_t mem_proj =(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV * sizeof(float); // Does everything fit in the GPUs? bool fits_in_memory=false; unsigned int splits=1; Geometry * geoArray; if (mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global){// yes it does fits_in_memory=true; geoArray=(Geometry*)malloc(sizeof(Geometry)); geoArray[0]=geo; } else{// Nope nope. fits_in_memory=false; // Oh dear. // approx free memory we have. We already have left some extra 10% free for internal stuff // we need a second projection memory to combine multi-GPU stuff. size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj; splits=mem_image/mem_free+1;// Ceil of the truncation geoArray=(Geometry*)malloc(splits*sizeof(Geometry)); splitImageInterp(splits,geo,geoArray,nangles); } // Allocate auiliary memory for projections on the GPU to accumulate partial resutsl float ** dProjection_accum; size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float); if (!fits_in_memory){ dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); for (int i = 0; i < 2; ++i){ cudaMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj); cudaMemset(dProjection_accum[dev*2+i],0,num_bytes_proj); cudaCheckErrors("cudaMallocauxiliarty projections fail"); } } } // This is happening regarthless if the image fits on memory float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); for (int i = 0; i < 2; ++i){ cudaMalloc((void**)&dProjection[dev*2+i], num_bytes_proj); cudaMemset(dProjection[dev*2+i] ,0,num_bytes_proj); cudaCheckErrors("cudaMalloc projections fail"); } } //Pagelock memory for syncronous copy. // Lets try to make the host memory pinned: // We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes. int isHostRegisterSupported; cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0); // empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to // pin the memory is greater than the lost time in Syncronously launching the memcpys. This is only worth it when the image is too big. if (isHostRegisterSupported & splits>1){ cudaHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable); } Point3D source, deltaU, deltaV, uvOrigin; Point3D* projParamsArrayHost; cudaMallocHost((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D)); float* projFloatsArrayHost; cudaMallocHost((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float)); // Create Streams for overlapping memcopy and compute int nStream_device=2; int nStreams=deviceCount*nStream_device; cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t)); for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); for (int i = 0; i < nStream_device; ++i){ cudaStreamCreate(&stream[i+dev*nStream_device]); } } cudaCheckErrors("Stream creation fail"); int nangles_device=(nangles+deviceCount-1)/deviceCount; int nangles_last_device=(nangles-(deviceCount-1)*nangles_device); unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK unsigned int last_device_blocks= (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management. int projection_this_block; cudaTextureObject_t *texImg = new cudaTextureObject_t[deviceCount]; cudaArray **d_cuArrTex = new cudaArray*[deviceCount]; for (unsigned int sp=0;sp<splits;sp++){ // Create texture objects for all GPUs size_t linear_idx_start; //First one shoudl always be the same size as all the rest but the last linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ; CreateTextureInterp(deviceCount,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp); cudaCheckErrors("Texture object creation fail"); int divU,divV; divU=PIXEL_SIZE_BLOCK; divV=PIXEL_SIZE_BLOCK; dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1); dim3 block(divU,divV,PROJ_PER_BLOCK); unsigned int proj_global; unsigned int i; float maxdist; // Now that we have prepared the image (piece of image) and parameters for kernels // we project for all angles. for ( i=0; i<noOfKernelCalls; i++){ for (dev=0;dev<deviceCount;dev++){ float is_spherical=0; cudaSetDevice(dev); for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){ proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device; if (proj_global>=nangles) break; if ((i*PROJ_PER_BLOCK+j)>=nangles_device) break; geo.alpha=angles[proj_global*3]; geo.theta=angles[proj_global*3+1]; geo.psi =angles[proj_global*3+2]; is_spherical+=abs(geo.theta)+abs(geo.psi); //precomute distances for faster execution maxdist=maxdistanceCuboid(geo,proj_global); //Precompute per angle constant stuff for speed computeDeltas(geo, proj_global, &uvOrigin, &deltaU, &deltaV, &source); //Ray tracing! projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection projParamsArrayHost[4*j+1]=deltaU; projParamsArrayHost[4*j+2]=deltaV; projParamsArrayHost[4*j+3]=source; projFloatsArrayHost[2*j]=geo.DSO[proj_global]; projFloatsArrayHost[2*j+1]=floor(maxdist); } cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*nStream_device]); cudaMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*nStream_device]); cudaStreamSynchronize(stream[dev*nStream_device]); //TODO: we could do this around X and Y axis too, but we would need to compute the new axis of rotation (not possible to know from jsut the angles) if (!is_spherical){ kernelPixelDetector<false><<<grid,block,0,stream[dev*nStream_device]>>>(geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]); } else{ kernelPixelDetector<true> <<<grid,block,0,stream[dev*nStream_device]>>>(geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]); } } // Now that the computation is happening, we need to either prepare the memory for // combining of the projections (splits>1) or start removing previous results. // If our image does not fit in memory then we need to make sure we accumulate previous results too. if( !fits_in_memory&&sp>0){ // First, grab previous results and put them in the auxiliary variable for (dev = 0; dev < deviceCount; dev++){ projection_this_block=PROJ_PER_BLOCK; cudaSetDevice(dev); // this werid code makes sure we dont access bad memory. Its necesary for deviceCount>2 if (dev+1==deviceCount){ // if its the last device if(i+1==last_device_blocks) // If we are in the last block of the last device, how many projections? projection_this_block=nangles_last_device-(last_device_blocks-1)*PROJ_PER_BLOCK; if(i+1>last_device_blocks) // As the last device can have less blocs, i may be over it. break; }else{ if(i+1==noOfKernelCalls) // if its not the last device, it can still be the lat block projection_this_block=nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK; } cudaMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[i*PROJ_PER_BLOCK+dev*nangles_device], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyHostToDevice,stream[dev*2+1]); } // Second, take the results from current compute call and add it to the code in execution. for (dev = 0; dev < deviceCount; dev++){ projection_this_block=PROJ_PER_BLOCK; cudaSetDevice(dev); // this werid code makes sure we dont access bad memory. Its necesary for deviceCount>2 if (dev+1==deviceCount){ // if its the last device if(i+1==last_device_blocks) // If we are in the last block of the last device, how many projections? projection_this_block=nangles_last_device-(last_device_blocks-1)*PROJ_PER_BLOCK; if(i+1>last_device_blocks) // As the last device can have less blocs, i may be over it. break; }else{ if(i+1==noOfKernelCalls) // if its not the last device, it can still be the lat block projection_this_block=nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK; } cudaStreamSynchronize(stream[dev*2+1]); // wait until copy is finished vecAddInPlaceInterp<<<(geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS,MAXTREADS,0,stream[dev*2]>>>(dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block); } } // Now, lets get out the projections from the previous execution of the kernels. if (i>0){ for (dev = 0; dev < deviceCount; dev++){ projection_this_block=PROJ_PER_BLOCK; cudaSetDevice(dev); if (dev+1==deviceCount && i+1==noOfKernelCalls && last_device_blocks!=noOfKernelCalls){ projection_this_block=nangles_last_device-(last_device_blocks-1)*PROJ_PER_BLOCK; } cudaMemcpyAsync(result[(i-1)*PROJ_PER_BLOCK+dev*nangles_device], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]); } } // Make sure Computation on kernels has finished before we launch the next batch. for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); cudaStreamSynchronize(stream[dev*2]); } } // We still have the last one to get out, do that one int projection_this_block; for (dev = 0; dev < deviceCount; dev++){ projection_this_block=PROJ_PER_BLOCK; cudaSetDevice(dev); // this werid code makes sure we dont access bad memory. Its necesary for deviceCount>2 if (dev+1==deviceCount){ // if its the last device projection_this_block=nangles_last_device-(last_device_blocks-1)*PROJ_PER_BLOCK; if(i>last_device_blocks) // As the last device can have less blocs, i may be over it. break; }else{ projection_this_block=nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK; } cudaDeviceSynchronize(); cudaCheckErrors("Fail memcopy fail"); cudaMemcpyAsync(result[(i-1)*PROJ_PER_BLOCK+dev*nangles_device], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]); } // Free memory for the next piece of image cudaDeviceSynchronize(); } cudaCheckErrors("Main loop fail"); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); cudaDestroyTextureObject(texImg[dev]); cudaFreeArray(d_cuArrTex[dev]); } // Freeing Stage for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); cudaFree(dProjection[dev*2]); cudaFree(dProjection[dev*2+1]); } free(dProjection); if(!fits_in_memory){ for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); cudaFree(dProjection_accum[dev*2]); cudaFree(dProjection_accum[dev*2+1]); } free(dProjection_accum); } freeGeoArray(splits,geoArray); cudaFreeHost(projParamsArrayHost); for (int i = 0; i < nStreams; ++i) cudaStreamDestroy(stream[i]) ; if (isHostRegisterSupported & splits>1){ cudaHostUnregister(img); } cudaCheckErrors("cudaFree fail"); // cudaDeviceReset(); return 0; } void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool allocate) { //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ; const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); if(allocate){ for (unsigned int i = 0; i < num_devices; i++){ cudaSetDevice(i); //cudaArray Descriptor cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); //cuda Array cudaMalloc3DArray(&d_cuArrTex[i], &channelDesc, extent); cudaCheckErrors("Texture memory allocation fail"); } } for (unsigned int i = 0; i < num_devices; i++){ cudaMemcpy3DParms copyParams = {0}; cudaSetDevice(i); //Array creation copyParams.srcPtr = make_cudaPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_cuArrTex[i]; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3DAsync(&copyParams); //cudaCheckErrors("Texture memory data copy fail"); //Array creation End } for (unsigned int i = 0; i < num_devices; i++){ cudaSetDevice(i); cudaResourceDesc texRes; memset(&texRes, 0, sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = d_cuArrTex[i]; cudaTextureDesc texDescr; memset(&texDescr, 0, sizeof(cudaTextureDesc)); texDescr.normalizedCoords = false; if (geo.accuracy>1){ texDescr.filterMode = cudaFilterModePoint; geo.accuracy=1; } else{ texDescr.filterMode = cudaFilterModeLinear; } texDescr.addressMode[0] = cudaAddressModeBorder; texDescr.addressMode[1] = cudaAddressModeBorder; texDescr.addressMode[2] = cudaAddressModeBorder; texDescr.readMode = cudaReadModeElementType; cudaCreateTextureObject(&texImage[i], &texRes, &texDescr, NULL); cudaCheckErrors("Texture object creation fail"); } } /* This code generates the geometries needed to split the image properly in * cases where the entire image does not fit in the memory of the GPU **/ void splitImageInterp(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){ unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible for(unsigned int sp=0;sp<splits;sp++){ geoArray[sp]=geo; // All of them are splitsize, but the last one, possible geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp; geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ; // We need to redefine the offsets, as now each subimage is not aligned in the origin. geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float)); for (unsigned int i=0;i<nangles;i++){ geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2; } } } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas(Geometry geo,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO[i]; S.y=0; S.z=0; //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: // Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours. // The obkjective is to get a position of the detector in a coordinate system where: // 1-units are voxel size (in each direction can be different) // 2-The image has the its first voxel at (0,0,0) // 3-The image never rotates // To do that, we need to compute the "deltas" the detector, or "by how much // (in new xyz) does the voxels change when and index is added". To do that // several geometric steps needs to be changed //1.Roll,pitch,jaw // The detector can have a small rotation. // according to //"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706. // Only the Z rotation will have a big influence in the image quality when they are small. // Still all rotations are supported // To roll pitch jaw, the detector has to be in centered in OXYZ. P.x=0;Pu0.x=0;Pv0.x=0; // Roll pitch yaw rollPitchYaw(geo,i,&P); rollPitchYaw(geo,i,&Pu0); rollPitchYaw(geo,i,&Pv0); //Now ltes translate the detector coordinates to DOD (original position on real coordinate system: P.x=P.x-(geo.DSD[i]-geo.DSO[i]); Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]); Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]); //2: Offset detector //S doesnt need to chagne //3: Rotate around RZ RY RZ Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x; Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i]; Pfinalu0.x=Pu0.x; Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i]; Pfinalv0.x=Pv0.x; Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i]; eulerZYZ(geo,&Pfinal); eulerZYZ(geo,&Pfinalu0); eulerZYZ(geo,&Pfinalv0); eulerZYZ(geo,&S); //3: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ; //mexPrintf("COR: %f \n",geo.COR[i]); //5. apply COR. Wherever everything was, now its offesetd by a bit. // Only wors for standard rotaiton, not aribtary axis rotation. float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S.x+=CORx; S.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S; } float maxdistanceCuboid(Geometry geo,unsigned int i){ /////////// // Compute initial "t" so we access safely as less as out of bounds as possible. ////////// float maxCubX,maxCubY,maxCubZ; // Forgetting Z, compute mas distance: diagonal+offset maxCubX=(geo.nVoxelX/2+ abs(geo.offOrigX[i])/geo.dVoxelX); maxCubY=(geo.nVoxelY/2+ abs(geo.offOrigY[i])/geo.dVoxelY); maxCubZ=(geo.nVoxelZ/2+ abs(geo.offOrigZ[i])/geo.dVoxelZ); float a,b; a=geo.DSO[i]/geo.dVoxelX; b=geo.DSO[i]/geo.dVoxelY; // As the return of this value is in "voxel space", the source may have an elliptical curve. // The distance returned is the safe distance that can be skipped for a given angle alpha, before we need to start sampling. if (geo.theta==0.0f & geo.psi==0.0f) // Special case, it will make the code faster return max(a*b/sqrt(a*a*sin(geo.alpha)*sin(geo.alpha)+b*b*cos(geo.alpha)*cos(geo.alpha))- sqrt(maxCubX*maxCubX+maxCubY*maxCubY),0.0f); //TODO: think of more special cases? return max(geo.DSO[i]/max(max(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)-sqrt(maxCubX*maxCubX+maxCubY*maxCubY+maxCubZ*maxCubZ),0.0f); } void rollPitchYaw(Geometry geo,unsigned int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->z=-sin(geo.dPitch[i])*auxPoint.x +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; } void eulerZYZ(Geometry geo, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+ cos(geo.alpha)*sin(geo.theta)*auxPoint.z; point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+ sin(geo.alpha)*sin(geo.theta)*auxPoint.z; point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+ sin(geo.theta)*sin(geo.psi)*auxPoint.y+ cos(geo.theta)*auxPoint.z; } //______________________________________________________________________________ // // Function: freeGeoArray // // Description: Frees the memory from the geometry array for multiGPU. //______________________________________________________________________________ void freeGeoArray(unsigned int splits,Geometry* geoArray){ for(unsigned int sp=0;sp<splits;sp++){ free(geoArray[sp].offOrigZ); } free(geoArray); } //______________________________________________________________________________ // // Function: checkFreeMemory // // Description: check available memory on devices //______________________________________________________________________________ void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){ size_t memfree; size_t memtotal; for (int dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); cudaMemGetInfo(&memfree,&memtotal); if(dev==0) *mem_GPU_global=memfree; if(memfree<memtotal/2){ mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n"); } cudaCheckErrors("Check mem error"); *mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global; } *mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95); //*mem_GPU_global= insert your known number here, in bytes. }
6e47b450d25e562c830468ad41d6ea0fcfad4538.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! ImageData <Tx = ?, Ty = ?, Device = CUDA> */ template <typename Tx, typename Ty> __global__ void _ImageData_NCHW( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; Ty raw_value = x[((n * H + h) * W + w) * C + c]; #if __CUDA_ARCH__ >= 350 if (mean_values) raw_value -= __ldg(mean_values + c); if (std_values) raw_value /= __ldg(std_values + c); #else if (mean_values) raw_value -= mean_values[c]; if (std_values) raw_value /= std_values[c]; #endif y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageData_NHWC( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; Ty raw_value = x[idx]; #if __CUDA_ARCH__ >= 350 if (mean_values) raw_value -= __ldg(mean_values + c); if (std_values) raw_value /= __ldg(std_values + c); #else if (mean_values) raw_value -= mean_values[c]; if (std_values) raw_value /= std_values[c]; #endif y[idx] = raw_value; } } /*! ImageData <Tx = float32, Ty = float32, Device = CUDA> */ template <> void ImageData<float, float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageData_NCHW<float, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<float, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /*! ImageData <Tx = uint8, Ty = float32, Device = CUDA> */ template <> void ImageData<uint8_t, float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageData_NCHW<uint8_t, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<uint8_t, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /*! ImageData <Tx = ?, Ty = float16, Device = CUDA> */ template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NCHW( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; float raw_value = x[((n * H + h) * W + w) * C + c]; #if __CUDA_ARCH__ >= 350 if (mean_values) raw_value -= __ldg(mean_values + c); if (std_values) raw_value /= __ldg(std_values + c); #else if (mean_values) raw_value -= mean_values[c]; if (std_values) raw_value /= std_values[c]; #endif y[idx] = __float2half(raw_value); } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NHWC( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; float raw_value = x[idx]; #if __CUDA_ARCH__ >= 350 if (mean_values) raw_value -= __ldg(mean_values + c); if (std_values) raw_value /= __ldg(std_values + c); #else if (mean_values) raw_value -= mean_values[c]; if (std_values) raw_value /= std_values[c]; #endif y[idx] = __float2half(raw_value); } } /*! ImageData <Tx = float32, Ty = float16, Device = CUDA> */ template <> void ImageData<float, float16, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float16* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<float, half> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<float, half> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; } template <> void ImageData<uint8_t, float16, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float16* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<uint8_t, half> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<uint8_t, half> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
6e47b450d25e562c830468ad41d6ea0fcfad4538.cu
#ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! ImageData <Tx = ?, Ty = ?, Device = CUDA> */ template <typename Tx, typename Ty> __global__ void _ImageData_NCHW( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; Ty raw_value = x[((n * H + h) * W + w) * C + c]; #if __CUDA_ARCH__ >= 350 if (mean_values) raw_value -= __ldg(mean_values + c); if (std_values) raw_value /= __ldg(std_values + c); #else if (mean_values) raw_value -= mean_values[c]; if (std_values) raw_value /= std_values[c]; #endif y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageData_NHWC( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; Ty raw_value = x[idx]; #if __CUDA_ARCH__ >= 350 if (mean_values) raw_value -= __ldg(mean_values + c); if (std_values) raw_value /= __ldg(std_values + c); #else if (mean_values) raw_value -= mean_values[c]; if (std_values) raw_value /= std_values[c]; #endif y[idx] = raw_value; } } /*! ImageData <Tx = float32, Ty = float32, Device = CUDA> */ template <> void ImageData<float, float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageData_NCHW<float, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<float, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /*! ImageData <Tx = uint8, Ty = float32, Device = CUDA> */ template <> void ImageData<uint8_t, float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageData_NCHW<uint8_t, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<uint8_t, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /*! ImageData <Tx = ?, Ty = float16, Device = CUDA> */ template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NCHW( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; float raw_value = x[((n * H + h) * W + w) * C + c]; #if __CUDA_ARCH__ >= 350 if (mean_values) raw_value -= __ldg(mean_values + c); if (std_values) raw_value /= __ldg(std_values + c); #else if (mean_values) raw_value -= mean_values[c]; if (std_values) raw_value /= std_values[c]; #endif y[idx] = __float2half(raw_value); } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NHWC( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; float raw_value = x[idx]; #if __CUDA_ARCH__ >= 350 if (mean_values) raw_value -= __ldg(mean_values + c); if (std_values) raw_value /= __ldg(std_values + c); #else if (mean_values) raw_value -= mean_values[c]; if (std_values) raw_value /= std_values[c]; #endif y[idx] = __float2half(raw_value); } } /*! ImageData <Tx = float32, Ty = float16, Device = CUDA> */ template <> void ImageData<float, float16, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float16* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<float, half> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<float, half> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; } template <> void ImageData<uint8_t, float16, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float16* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<uint8_t, half> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<uint8_t, half> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
9a7fc527c8ab73ab06b502013f738e0bc3e10fe8.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/new_kernel_util.h" #ifdef OF_ENABLE_PROFILER #include <nvtx3/roctracer/roctx.h> #endif // OF_ENABLE_PROFILER namespace oneflow { namespace { #ifdef OF_ENABLE_PROFILER static thread_local HashMap<std::string, nvtxRangeId_t> mark2range_id; #endif } // namespace class NvtxOpKernelState final : public user_op::OpKernelState { public: NvtxOpKernelState() : counter_(0) { #ifndef OF_ENABLE_PROFILER LOG(WARNING) << "To use NVTX, run cmake with -DBUILD_PROFILER=ON"; #endif } ~NvtxOpKernelState() override = default; int64_t counter() const { return counter_; } void IncreaseCount() { counter_ += 1; } private: int64_t counter_; }; class NvtxStartKernel final : public user_op::OpKernel { public: NvtxStartKernel() = default; ~NvtxStartKernel() override = default; std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { return std::make_shared<NvtxOpKernelState>(); } private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const ShapeView& in_shape = in->shape(); CHECK_EQ(out->shape(), in_shape); const DataType in_data_type = in->data_type(); CHECK_EQ(out->data_type(), in_data_type); Memcpy<DeviceType::kGPU>(ctx->stream(), out->mut_dptr<void>(), in->dptr<void>(), in_shape.elem_cnt() * GetSizeOfDataType(in_data_type)); #ifdef OF_ENABLE_PROFILER auto* kernel_state = dynamic_cast<NvtxOpKernelState*>(state); const std::string mark_prefix = ctx->Attr<std::string>("mark_prefix"); const std::string mark = mark_prefix + "-" + std::to_string(kernel_state->counter()); nvtxRangeId_t range_id = roctxRangeStartA(mark.c_str()); CHECK(mark2range_id.emplace(mark, range_id).second); kernel_state->IncreaseCount(); #endif // OF_ENABLE_PROFILER } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; REGISTER_USER_KERNEL("nvtx_start") .SetCreateFn<NvtxStartKernel>() .SetIsMatchedHob(user_op::HobDeviceType() == DeviceType::kGPU) .SetInplaceProposalFn([](const user_op::InferContext&, user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, false)); return Maybe<void>::Ok(); }); class NvtxEndKernel final : public user_op::OpKernel { public: NvtxEndKernel() = default; ~NvtxEndKernel() override = default; std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { return std::make_shared<NvtxOpKernelState>(); } private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const ShapeView& in_shape = in->shape(); CHECK_EQ(out->shape(), in_shape); const DataType in_data_type = in->data_type(); CHECK_EQ(out->data_type(), in_data_type); #ifdef OF_ENABLE_PROFILER auto* kernel_state = dynamic_cast<NvtxOpKernelState*>(state); const std::string mark_prefix = ctx->Attr<std::string>("mark_prefix"); const std::string mark = mark_prefix + "-" + std::to_string(kernel_state->counter()); auto it = mark2range_id.find(mark.c_str()); CHECK(it != mark2range_id.end()); nvtxRangeId_t range_id = it->second; mark2range_id.erase(it); roctxRangeStop(range_id); Memcpy<DeviceType::kGPU>(ctx->stream(), out->mut_dptr<void>(), in->dptr<void>(), in_shape.elem_cnt() * GetSizeOfDataType(in_data_type)); kernel_state->IncreaseCount(); #endif } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; REGISTER_USER_KERNEL("nvtx_end") .SetCreateFn<NvtxEndKernel>() .SetIsMatchedHob(user_op::HobDeviceType() == DeviceType::kGPU) .SetInplaceProposalFn([](const user_op::InferContext&, user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, false)); return Maybe<void>::Ok(); }); } // namespace oneflow
9a7fc527c8ab73ab06b502013f738e0bc3e10fe8.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/new_kernel_util.h" #ifdef OF_ENABLE_PROFILER #include <nvtx3/nvToolsExt.h> #endif // OF_ENABLE_PROFILER namespace oneflow { namespace { #ifdef OF_ENABLE_PROFILER static thread_local HashMap<std::string, nvtxRangeId_t> mark2range_id; #endif } // namespace class NvtxOpKernelState final : public user_op::OpKernelState { public: NvtxOpKernelState() : counter_(0) { #ifndef OF_ENABLE_PROFILER LOG(WARNING) << "To use NVTX, run cmake with -DBUILD_PROFILER=ON"; #endif } ~NvtxOpKernelState() override = default; int64_t counter() const { return counter_; } void IncreaseCount() { counter_ += 1; } private: int64_t counter_; }; class NvtxStartKernel final : public user_op::OpKernel { public: NvtxStartKernel() = default; ~NvtxStartKernel() override = default; std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { return std::make_shared<NvtxOpKernelState>(); } private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const ShapeView& in_shape = in->shape(); CHECK_EQ(out->shape(), in_shape); const DataType in_data_type = in->data_type(); CHECK_EQ(out->data_type(), in_data_type); Memcpy<DeviceType::kGPU>(ctx->stream(), out->mut_dptr<void>(), in->dptr<void>(), in_shape.elem_cnt() * GetSizeOfDataType(in_data_type)); #ifdef OF_ENABLE_PROFILER auto* kernel_state = dynamic_cast<NvtxOpKernelState*>(state); const std::string mark_prefix = ctx->Attr<std::string>("mark_prefix"); const std::string mark = mark_prefix + "-" + std::to_string(kernel_state->counter()); nvtxRangeId_t range_id = nvtxRangeStartA(mark.c_str()); CHECK(mark2range_id.emplace(mark, range_id).second); kernel_state->IncreaseCount(); #endif // OF_ENABLE_PROFILER } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; REGISTER_USER_KERNEL("nvtx_start") .SetCreateFn<NvtxStartKernel>() .SetIsMatchedHob(user_op::HobDeviceType() == DeviceType::kGPU) .SetInplaceProposalFn([](const user_op::InferContext&, user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, false)); return Maybe<void>::Ok(); }); class NvtxEndKernel final : public user_op::OpKernel { public: NvtxEndKernel() = default; ~NvtxEndKernel() override = default; std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { return std::make_shared<NvtxOpKernelState>(); } private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const ShapeView& in_shape = in->shape(); CHECK_EQ(out->shape(), in_shape); const DataType in_data_type = in->data_type(); CHECK_EQ(out->data_type(), in_data_type); #ifdef OF_ENABLE_PROFILER auto* kernel_state = dynamic_cast<NvtxOpKernelState*>(state); const std::string mark_prefix = ctx->Attr<std::string>("mark_prefix"); const std::string mark = mark_prefix + "-" + std::to_string(kernel_state->counter()); auto it = mark2range_id.find(mark.c_str()); CHECK(it != mark2range_id.end()); nvtxRangeId_t range_id = it->second; mark2range_id.erase(it); nvtxRangeEnd(range_id); Memcpy<DeviceType::kGPU>(ctx->stream(), out->mut_dptr<void>(), in->dptr<void>(), in_shape.elem_cnt() * GetSizeOfDataType(in_data_type)); kernel_state->IncreaseCount(); #endif } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; REGISTER_USER_KERNEL("nvtx_end") .SetCreateFn<NvtxEndKernel>() .SetIsMatchedHob(user_op::HobDeviceType() == DeviceType::kGPU) .SetInplaceProposalFn([](const user_op::InferContext&, user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, false)); return Maybe<void>::Ok(); }); } // namespace oneflow
fbbe0c27249e0726b6488711b83258d1f1a5a493.hip
// !!! This is a file automatically generated by hipify!!! #ifndef RAYTRACEKERNELENTRY_CU #define RAYTRACEKERNELENTRY_CU // ======================================================================================= // RaytraceKernelEntry // ======================================================================================= ///--------------------------------------------------------------------------------------- /// \brief Entry point from host to kernel(device) /// /// # RaytraceKernelEntry /// /// 2012->2013 Jarl Larsson ///--------------------------------------------------------------------------------------- #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "RaytraceConstantBuffer.h" #include "KernelHelper.h" // device specific #include "DeviceResources.h" #include "Raytracer.h" #include <RawTexture.h> #pragma comment(lib, "cudart") using std::vector; __global__ void RaytraceKernel(unsigned char *p_outSurface, const int p_width, const int p_height, const size_t p_pitch, float3* p_verts,float3* p_uvs,float3* p_norms,unsigned int p_numVerts, unsigned int* p_indices,unsigned int p_numIndices, float3 p_kdExtents, float3 p_kdPos, TriPart* p_tris, unsigned int p_numTris, DKDNode* p_nodes, DKDLeaf* p_leaflist, unsigned int* p_nodeIndices, unsigned int p_numNodes,unsigned int p_numLeaves,unsigned int p_numNodeIndices) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float *pixel; // discard pixel if outside if (x >= p_width || y >= p_height) return; // get a pointer to the pixel at (x,y) pixel = (float *)(p_outSurface + y*p_pitch) + 4*x; Raytrace(pixel,x,y, p_width, p_height, p_verts,p_uvs,p_norms,p_numVerts, p_indices,p_numIndices, p_kdExtents,p_kdPos, p_tris, p_numTris, p_nodes, p_leaflist, p_nodeIndices, p_numNodes,p_numLeaves,p_numNodeIndices); } // Executes CUDA kernel extern "C" void RunRaytraceKernel(void* p_cb,void *surface, int width, int height, int pitch, void* p_verts,void* p_uvs,void* p_norms,unsigned int p_numVerts, void* p_indices,unsigned int p_numIndices, RawTexture* p_texture, void* p_kdExtents, void* p_kdPos, void* p_tris, unsigned int p_numTris, void* p_nodes, void* p_leaflist, void* p_nodeIndices, unsigned int p_numNodes,unsigned int p_numLeaves,unsigned int p_numNodeIndices) { // copy to constant buffer hipError_t res = hipMemcpyToSymbol(cb, p_cb, sizeof(RaytraceConstantBuffer)); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); // Allocate texture //float4* input; int ww=656, hh=480; // input = new float4[ww*hh]; // for(int i = 0; i < ww*hh; i++) // { // // r // input[i].x = /*(unsigned char)(256.0f**/(float)i/(float)(ww*hh)/*)*/; // // g // input[i].y = /*(unsigned char)(256.0f*(*/1.0f-((float)i/(float)(ww*hh))/*)*/; // // b // input[i].z = 128; // // a // input[i].w = 0; // } float4* texinput=(float4*)p_texture->m_data; ww=p_texture->m_width; hh=p_texture->m_height; // Allocate array and copy image data hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat); //hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned); //hipCreateChannelDesc<uchar4>(); //hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipArray *cuArray; res=hipMallocArray(&cuArray, &channelDesc, ww, hh); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); int s=sizeof(float4); res=hipMemcpyToArray(cuArray, 0, 0, texinput, ww*hh*s, hipMemcpyHostToDevice); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); // Set texture parameters tex.addressMode[0] = hipAddressModeWrap; tex.addressMode[1] = hipAddressModeWrap; tex.filterMode = hipFilterModeLinear; tex.normalized = true; // access with normalized texture coordinates // Bind the array to the texture res=hipBindTextureToArray(&tex, cuArray, &channelDesc); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); // Set up dimensions int bd=min(16,max(8,width/40)); if (bd<14) bd=8; else bd=16; dim3 Db = dim3(bd, bd ); // block dimensions are fixed to be 256 threads dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); //DEBUGPRINT(( ("\n"+toString(width)+" x "+toString(height)+" @ "+toString(1000*reinterpret_cast<RaytraceConstantBuffer*>(p_cb)->b)).c_str() )); hipLaunchKernelGGL(( RaytraceKernel), dim3(Dg),dim3(Db), 0, 0, (unsigned char *)surface, width, height, pitch, (float3*)p_verts, (float3*) p_uvs, (float3*)p_norms,p_numVerts, (unsigned int*)p_indices, p_numIndices, *((float3*)p_kdExtents),*((float3*)p_kdPos), (TriPart*)p_tris,p_numTris, (DKDNode*)p_nodes, (DKDLeaf*)p_leaflist, (unsigned int*)p_nodeIndices, p_numNodes,p_numLeaves,p_numNodeIndices); res = hipDeviceSynchronize(); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); res=hipUnbindTexture(&tex); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); res=hipFreeArray(cuArray); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); //delete [] texinput; } #endif
fbbe0c27249e0726b6488711b83258d1f1a5a493.cu
#ifndef RAYTRACEKERNELENTRY_CU #define RAYTRACEKERNELENTRY_CU // ======================================================================================= // RaytraceKernelEntry // ======================================================================================= ///--------------------------------------------------------------------------------------- /// \brief Entry point from host to kernel(device) /// /// # RaytraceKernelEntry /// /// 2012->2013 Jarl Larsson ///--------------------------------------------------------------------------------------- #include <vector> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "RaytraceConstantBuffer.h" #include "KernelHelper.h" // device specific #include "DeviceResources.h" #include "Raytracer.h" #include <RawTexture.h> #pragma comment(lib, "cudart") using std::vector; __global__ void RaytraceKernel(unsigned char *p_outSurface, const int p_width, const int p_height, const size_t p_pitch, float3* p_verts,float3* p_uvs,float3* p_norms,unsigned int p_numVerts, unsigned int* p_indices,unsigned int p_numIndices, float3 p_kdExtents, float3 p_kdPos, TriPart* p_tris, unsigned int p_numTris, DKDNode* p_nodes, DKDLeaf* p_leaflist, unsigned int* p_nodeIndices, unsigned int p_numNodes,unsigned int p_numLeaves,unsigned int p_numNodeIndices) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; float *pixel; // discard pixel if outside if (x >= p_width || y >= p_height) return; // get a pointer to the pixel at (x,y) pixel = (float *)(p_outSurface + y*p_pitch) + 4*x; Raytrace(pixel,x,y, p_width, p_height, p_verts,p_uvs,p_norms,p_numVerts, p_indices,p_numIndices, p_kdExtents,p_kdPos, p_tris, p_numTris, p_nodes, p_leaflist, p_nodeIndices, p_numNodes,p_numLeaves,p_numNodeIndices); } // Executes CUDA kernel extern "C" void RunRaytraceKernel(void* p_cb,void *surface, int width, int height, int pitch, void* p_verts,void* p_uvs,void* p_norms,unsigned int p_numVerts, void* p_indices,unsigned int p_numIndices, RawTexture* p_texture, void* p_kdExtents, void* p_kdPos, void* p_tris, unsigned int p_numTris, void* p_nodes, void* p_leaflist, void* p_nodeIndices, unsigned int p_numNodes,unsigned int p_numLeaves,unsigned int p_numNodeIndices) { // copy to constant buffer cudaError_t res = cudaMemcpyToSymbol(cb, p_cb, sizeof(RaytraceConstantBuffer)); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); // Allocate texture //float4* input; int ww=656, hh=480; // input = new float4[ww*hh]; // for(int i = 0; i < ww*hh; i++) // { // // r // input[i].x = /*(unsigned char)(256.0f**/(float)i/(float)(ww*hh)/*)*/; // // g // input[i].y = /*(unsigned char)(256.0f*(*/1.0f-((float)i/(float)(ww*hh))/*)*/; // // b // input[i].z = 128; // // a // input[i].w = 0; // } float4* texinput=(float4*)p_texture->m_data; ww=p_texture->m_width; hh=p_texture->m_height; // Allocate array and copy image data cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat); //cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned); //cudaCreateChannelDesc<uchar4>(); //cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaArray *cuArray; res=cudaMallocArray(&cuArray, &channelDesc, ww, hh); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); int s=sizeof(float4); res=cudaMemcpyToArray(cuArray, 0, 0, texinput, ww*hh*s, cudaMemcpyHostToDevice); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); // Set texture parameters tex.addressMode[0] = cudaAddressModeWrap; tex.addressMode[1] = cudaAddressModeWrap; tex.filterMode = cudaFilterModeLinear; tex.normalized = true; // access with normalized texture coordinates // Bind the array to the texture res=cudaBindTextureToArray(&tex, cuArray, &channelDesc); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); // Set up dimensions int bd=min(16,max(8,width/40)); if (bd<14) bd=8; else bd=16; dim3 Db = dim3(bd, bd ); // block dimensions are fixed to be 256 threads dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y); //DEBUGPRINT(( ("\n"+toString(width)+" x "+toString(height)+" @ "+toString(1000*reinterpret_cast<RaytraceConstantBuffer*>(p_cb)->b)).c_str() )); RaytraceKernel<<<Dg,Db>>>((unsigned char *)surface, width, height, pitch, (float3*)p_verts, (float3*) p_uvs, (float3*)p_norms,p_numVerts, (unsigned int*)p_indices, p_numIndices, *((float3*)p_kdExtents),*((float3*)p_kdPos), (TriPart*)p_tris,p_numTris, (DKDNode*)p_nodes, (DKDLeaf*)p_leaflist, (unsigned int*)p_nodeIndices, p_numNodes,p_numLeaves,p_numNodeIndices); res = cudaDeviceSynchronize(); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); res=cudaUnbindTexture(&tex); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); res=cudaFreeArray(cuArray); KernelHelper::assertAndPrint(res,__FILE__,__FUNCTION__,__LINE__); //delete [] texinput; } #endif
0323540364fda49d2af2262c95b6c149a0842cf5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "transpose_cuda.cuh" __global__ void transpose_parallel_per_element(int a[], int b[], size_t N, size_t K) { int i = blockIdx.x * K + threadIdx.x; int j = blockIdx.y * K + threadIdx.y; b[j + i * N] = a[i + j * N]; }
0323540364fda49d2af2262c95b6c149a0842cf5.cu
#include <stdio.h> #include "transpose_cuda.cuh" __global__ void transpose_parallel_per_element(int a[], int b[], size_t N, size_t K) { int i = blockIdx.x * K + threadIdx.x; int j = blockIdx.y * K + threadIdx.y; b[j + i * N] = a[i + j * N]; }
fec6aff0d8148cbbb7995b604d193ef2d0f4bf81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_minus_4_back; int xdim0_update_halo_kernel5_minus_4_back_h = -1; __constant__ int ydim0_update_halo_kernel5_minus_4_back; int ydim0_update_halo_kernel5_minus_4_back_h = -1; __constant__ int xdim1_update_halo_kernel5_minus_4_back; int xdim1_update_halo_kernel5_minus_4_back_h = -1; __constant__ int ydim1_update_halo_kernel5_minus_4_back; int ydim1_update_halo_kernel5_minus_4_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel5_minus_4_back * (y) + \ xdim0_update_halo_kernel5_minus_4_back * \ ydim0_update_halo_kernel5_minus_4_back * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel5_minus_4_back * (y) + \ xdim1_update_halo_kernel5_minus_4_back * \ ydim1_update_halo_kernel5_minus_4_back * (z)) // user function __device__ inline void update_halo_kernel5_minus_4_back_gpu(double *vol_flux_z, double *mass_flux_z, const int *fields) { if (fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, 4)]; if (fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, 4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_minus_4_back(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back + idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back * ydim0_update_halo_kernel5_minus_4_back; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back + idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back * ydim1_update_halo_kernel5_minus_4_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_minus_4_back_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel5_minus_4_back_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 92)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(92, "update_halo_kernel5_minus_4_back"); OPS_kernels[92].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel5_minus_4_back_h || ydim0 != ydim0_update_halo_kernel5_minus_4_back_h || xdim1 != xdim1_update_halo_kernel5_minus_4_back_h || ydim1 != ydim1_update_halo_kernel5_minus_4_back_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel5_minus_4_back, &xdim0, sizeof(int)); xdim0_update_halo_kernel5_minus_4_back_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel5_minus_4_back, &ydim0, sizeof(int)); ydim0_update_halo_kernel5_minus_4_back_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel5_minus_4_back, &xdim1, sizeof(int)); xdim1_update_halo_kernel5_minus_4_back_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel5_minus_4_back, &ydim1, sizeof(int)); ydim1_update_halo_kernel5_minus_4_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[92].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel5_minus_4_back), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[92].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[92].mpi_time += t2 - t1; OPS_kernels[92].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[92].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 92; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 92; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel5_minus_4_back_execute; if (OPS_diags > 1) { ops_timing_realloc(92, "update_halo_kernel5_minus_4_back"); } ops_enqueue_kernel(desc); } #endif
fec6aff0d8148cbbb7995b604d193ef2d0f4bf81.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_minus_4_back; int xdim0_update_halo_kernel5_minus_4_back_h = -1; __constant__ int ydim0_update_halo_kernel5_minus_4_back; int ydim0_update_halo_kernel5_minus_4_back_h = -1; __constant__ int xdim1_update_halo_kernel5_minus_4_back; int xdim1_update_halo_kernel5_minus_4_back_h = -1; __constant__ int ydim1_update_halo_kernel5_minus_4_back; int ydim1_update_halo_kernel5_minus_4_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel5_minus_4_back * (y) + \ xdim0_update_halo_kernel5_minus_4_back * \ ydim0_update_halo_kernel5_minus_4_back * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel5_minus_4_back * (y) + \ xdim1_update_halo_kernel5_minus_4_back * \ ydim1_update_halo_kernel5_minus_4_back * (z)) // user function __device__ inline void update_halo_kernel5_minus_4_back_gpu(double *vol_flux_z, double *mass_flux_z, const int *fields) { if (fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, 4)]; if (fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, 4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_minus_4_back(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back + idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back * ydim0_update_halo_kernel5_minus_4_back; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back + idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back * ydim1_update_halo_kernel5_minus_4_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_minus_4_back_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel5_minus_4_back_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 92)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(92, "update_halo_kernel5_minus_4_back"); OPS_kernels[92].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel5_minus_4_back_h || ydim0 != ydim0_update_halo_kernel5_minus_4_back_h || xdim1 != xdim1_update_halo_kernel5_minus_4_back_h || ydim1 != ydim1_update_halo_kernel5_minus_4_back_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel5_minus_4_back, &xdim0, sizeof(int)); xdim0_update_halo_kernel5_minus_4_back_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel5_minus_4_back, &ydim0, sizeof(int)); ydim0_update_halo_kernel5_minus_4_back_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel5_minus_4_back, &xdim1, sizeof(int)); xdim1_update_halo_kernel5_minus_4_back_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel5_minus_4_back, &ydim1, sizeof(int)); ydim1_update_halo_kernel5_minus_4_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[92].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel5_minus_4_back<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[92].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[92].mpi_time += t2 - t1; OPS_kernels[92].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[92].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 92; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 92; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel5_minus_4_back_execute; if (OPS_diags > 1) { ops_timing_realloc(92, "update_halo_kernel5_minus_4_back"); } ops_enqueue_kernel(desc); } #endif
ea9251bf39351b7bae5c2f722d11778262c7768c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @author Mark Gates @author Azzam Haidar @generated from zlaset.cu normal z -> d, Fri Sep 11 18:29:21 2015 */ #include "common_magma.h" #include "batched_kernel_param.h" // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for dlaset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to dlaset, dlacpy, dlag2s, clag2z, dgeadd. */ static __device__ void dlaset_full_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag || above diag || offdiag == diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_D_EQUAL( offdiag, diag ))); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block or offdiag == diag #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else A[j*lda] = offdiag; } } } } /* Similar to dlaset_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlaset_lower_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < m && ind + BLK_X > iby ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind > iby+j ) A[j*lda] = offdiag; } } } } /* Similar to dlaset_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlaset_upper_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind < iby+j ) A[j*lda] = offdiag; } } } } ////////////////////////////////////////////////////////////////////////////////////// /* kernel wrappers to call the device functions. */ __global__ void dlaset_full_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_full_device(m, n, offdiag, diag, dA, ldda); } __global__ void dlaset_lower_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_lower_device(m, n, offdiag, diag, dA, ldda); } __global__ void dlaset_upper_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_upper_device(m, n, offdiag, diag, dA, ldda); } ////////////////////////////////////////////////////////////////////////////////////// /* kernel wrappers to call the device functions for the batched routine. */ __global__ void dlaset_full_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void dlaset_lower_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void dlaset_upper_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda); } ////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DLASET_Q initializes a 2-D array A to DIAG on the diagonal and OFFDIAG on the off-diagonals. This is the same as DLASET, but adds queue argument. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be set. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] offdiag DOUBLE_PRECISION The scalar OFFDIAG. (In LAPACK this is called ALPHA.) @param[in] diag DOUBLE_PRECISION The scalar DIAG. (In LAPACK this is called BETA.) @param[in] dA DOUBLE_PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j; and A(i,i) = DIAG, 1 <= i <= min(m,n) @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaset_q( magma_uplo_t uplo, magma_int_t m, magma_int_t n, double offdiag, double diag, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; if (uplo == MagmaLower) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( dlaset_lower_kernel), dim3(grid), dim3(threads), 0, queue , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else if (uplo == MagmaUpper) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( dlaset_upper_kernel), dim3(grid), dim3(threads), 0, queue , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else { // if continuous in memory & set to zero, hipMemset is faster. // TODO: use hipMemset2D ? if ( m == ldda && MAGMA_D_EQUAL( offdiag, MAGMA_D_ZERO ) && MAGMA_D_EQUAL( diag, MAGMA_D_ZERO ) ) { size_t size = m*n; hipError_t err = hipMemsetAsync( dA, 0, size*sizeof(double), queue ); assert( err == hipSuccess ); } else { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } } } /** @see magmablas_dlaset_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaset( magma_uplo_t uplo, magma_int_t m, magma_int_t n, double offdiag, double diag, magmaDouble_ptr dA, magma_int_t ldda ) { magmablas_dlaset_q( uplo, m, n, offdiag, diag, dA, ldda, magma_stream ); } //////////////////////////////////////////////////////////////////////////////////////// extern "C" void magmablas_dlaset_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, double offdiag, double diag, magmaDouble_ptr dAarray[], magma_int_t ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount ); if (uplo == MagmaLower) { hipLaunchKernelGGL(( dlaset_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dAarray, ldda); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( dlaset_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dAarray, ldda); } else { hipLaunchKernelGGL(( dlaset_full_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dAarray, ldda); } }
ea9251bf39351b7bae5c2f722d11778262c7768c.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @author Mark Gates @author Azzam Haidar @generated from zlaset.cu normal z -> d, Fri Sep 11 18:29:21 2015 */ #include "common_magma.h" #include "batched_kernel_param.h" // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for dlaset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to dlaset, dlacpy, dlag2s, clag2z, dgeadd. */ static __device__ void dlaset_full_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag || above diag || offdiag == diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_D_EQUAL( offdiag, diag ))); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block or offdiag == diag #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else A[j*lda] = offdiag; } } } } /* Similar to dlaset_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlaset_lower_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < m && ind + BLK_X > iby ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind > iby+j ) A[j*lda] = offdiag; } } } } /* Similar to dlaset_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlaset_upper_device( int m, int n, double offdiag, double diag, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind < iby+j ) A[j*lda] = offdiag; } } } } ////////////////////////////////////////////////////////////////////////////////////// /* kernel wrappers to call the device functions. */ __global__ void dlaset_full_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_full_device(m, n, offdiag, diag, dA, ldda); } __global__ void dlaset_lower_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_lower_device(m, n, offdiag, diag, dA, ldda); } __global__ void dlaset_upper_kernel( int m, int n, double offdiag, double diag, double *dA, int ldda ) { dlaset_upper_device(m, n, offdiag, diag, dA, ldda); } ////////////////////////////////////////////////////////////////////////////////////// /* kernel wrappers to call the device functions for the batched routine. */ __global__ void dlaset_full_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void dlaset_lower_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void dlaset_upper_kernel_batched( int m, int n, double offdiag, double diag, double **dAarray, int ldda ) { int batchid = blockIdx.z; dlaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda); } ////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DLASET_Q initializes a 2-D array A to DIAG on the diagonal and OFFDIAG on the off-diagonals. This is the same as DLASET, but adds queue argument. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be set. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] offdiag DOUBLE_PRECISION The scalar OFFDIAG. (In LAPACK this is called ALPHA.) @param[in] diag DOUBLE_PRECISION The scalar DIAG. (In LAPACK this is called BETA.) @param[in] dA DOUBLE_PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j; and A(i,i) = DIAG, 1 <= i <= min(m,n) @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaset_q( magma_uplo_t uplo, magma_int_t m, magma_int_t n, double offdiag, double diag, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; if (uplo == MagmaLower) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block dlaset_lower_kernel<<< grid, threads, 0, queue >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block dlaset_full_kernel<<< grid, threads, 0, queue >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else if (uplo == MagmaUpper) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block dlaset_upper_kernel<<< grid, threads, 0, queue >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block dlaset_full_kernel<<< grid, threads, 0, queue >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else { // if continuous in memory & set to zero, cudaMemset is faster. // TODO: use cudaMemset2D ? if ( m == ldda && MAGMA_D_EQUAL( offdiag, MAGMA_D_ZERO ) && MAGMA_D_EQUAL( diag, MAGMA_D_ZERO ) ) { size_t size = m*n; cudaError_t err = cudaMemsetAsync( dA, 0, size*sizeof(double), queue ); assert( err == cudaSuccess ); } else { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block dlaset_full_kernel<<< grid, threads, 0, queue >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block dlaset_full_kernel<<< grid, threads, 0, queue >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } } } /** @see magmablas_dlaset_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaset( magma_uplo_t uplo, magma_int_t m, magma_int_t n, double offdiag, double diag, magmaDouble_ptr dA, magma_int_t ldda ) { magmablas_dlaset_q( uplo, m, n, offdiag, diag, dA, ldda, magma_stream ); } //////////////////////////////////////////////////////////////////////////////////////// extern "C" void magmablas_dlaset_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, double offdiag, double diag, magmaDouble_ptr dAarray[], magma_int_t ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount ); if (uplo == MagmaLower) { dlaset_lower_kernel_batched<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dAarray, ldda); } else if (uplo == MagmaUpper) { dlaset_upper_kernel_batched<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dAarray, ldda); } else { dlaset_full_kernel_batched<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dAarray, ldda); } }
50b0ec23cd7973b00b8c72032c9d383e1b50b691.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2016 Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. The names of its contributors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * *********************************************************************************************** * * CARLsim * created by: (MDR) Micah Richert, (JN) Jayram M. Nageswaran * maintained by: * (MA) Mike Avery <[email protected]> * (MB) Michael Beyeler <[email protected]>, * (KDC) Kristofor Carlson <[email protected]> * (TSC) Ting-Shuo Chou <[email protected]> * (HK) Hirak J Kashyap <[email protected]> * * CARLsim v1.0: JM, MDR * CARLsim v2.0/v2.1/v2.2: JM, MDR, MA, MB, KDC * CARLsim3: MB, KDC, TSC * CARLsim4: TSC, HK * * CARLsim available from http://socsci.uci.edu/~jkrichma/CARLsim/ * Ver 12/31/2016 */ #include <snn.h> #include <spike_buffer.h> #include <error_code.h> #include <hip/hip_runtime.h> #define NUM_THREADS 128 #define NUM_BLOCKS 64 #define WARP_SIZE 32 /////////////////////////////////////////////////////////////////// // Some important ideas that explains the GPU execution are as follows: // 1. Each GPU block has a local firing table (called fireTable). The block of threads // reads a bunch of neurons parameters and determines if it needs to fire or not // Whenever a neuron need to fire, it keeps track of the fired neuron in the local // table. When the table is full, we go and write back the fireTable to the global // firing table. // 2. Firing information is maintained in two tables globally (timingTable and the globalFiringTable) // for excitatory neuron population and inhibitory neurons. // The globalFiringTable only stores a sequence of id corresponding to fired neurons. // The timingTable store the total number of fired neurons till the current time step t. // These two tables are flushed and adjusted every second. // This approach requires about half of the memory compared to the traditional AER scheme which // stores the firing time and firing id together. // For more details kindly read the enclosed report (report.pdf) in the source directory // // // timeTableD2GPU[0] always is 0 -- index into firingTableD2 // timeTableD2GPU[maxDelay_] -- should be the number of spikes "leftover" from the previous second // timeTableD2GPU[maxDelay_+1]-timeTableD2GPU[maxDelay_] -- should be the number of spikes in the first ms of the current second // timeTableD2GPU[1000+maxDelay_] -- should be the number of spikes in the current second + the leftover spikes. // /////////////////////////////////////////////////////////////////// __device__ unsigned int timeTableD2GPU[TIMING_COUNT]; __device__ unsigned int timeTableD1GPU[TIMING_COUNT]; __device__ unsigned int spikeCountD2SecGPU; __device__ unsigned int spikeCountD1SecGPU; __device__ unsigned int spikeCountD2GPU; __device__ unsigned int spikeCountD1GPU; __device__ unsigned int recBufferIdx; __device__ unsigned int secD2fireCntTest; __device__ unsigned int secD1fireCntTest; __device__ unsigned int spikeCountLastSecLeftD2GPU; __device__ unsigned int spikeCountExtRxD1SecGPU; __device__ unsigned int spikeCountExtRxD2SecGPU; __device__ unsigned int spikeCountExtRxD2GPU; __device__ unsigned int spikeCountExtRxD1GPU; __device__ __constant__ RuntimeData runtimeDataGPU; __device__ __constant__ NetworkConfigRT networkConfigGPU; __device__ __constant__ GroupConfigRT groupConfigsGPU[MAX_GRP_PER_SNN]; __device__ __constant__ float d_mulSynFast[MAX_CONN_PER_SNN]; __device__ __constant__ float d_mulSynSlow[MAX_CONN_PER_SNN]; __device__ int loadBufferCount; __device__ int loadBufferSize; texture <int, 1, hipReadModeElementType> timeTableD2GPU_tex; texture <int, 1, hipReadModeElementType> timeTableD1GPU_tex; texture <int, 1, hipReadModeElementType> groupIdInfo_tex; // groupIDInfo is allocated using hipMalloc thus doesn't require an offset when using textures __device__ int timeTableD1GPU_tex_offset; __device__ int timeTableD2GPU_tex_offset; // example of the quick synaptic table // index cnt // 0000000 - 0 // 0000001 - 0 // 0000010 - 1 // 0100000 - 5 // 0110000 - 4 int quickSynIdTable[256]; __device__ int quickSynIdTableGPU[256]; void initQuickSynIdTable(int netId) { void* devPtr; for(int i = 1; i < 256; i++) { int cnt = 0; while(i) { if(((i >> cnt) & 1) == 1) break; cnt++; assert(cnt <= 7); } quickSynIdTable[i] = cnt; } hipSetDevice(netId); hipGetSymbolAddress(&devPtr, quickSynIdTableGPU); CUDA_CHECK_ERRORS(hipMemcpy( devPtr, quickSynIdTable, sizeof(quickSynIdTable), hipMemcpyHostToDevice)); } __device__ inline bool isPoissonGroup(short int lGrpId) { return (groupConfigsGPU[lGrpId].Type & POISSON_NEURON); } __device__ inline void setFiringBitSynapses(int lNId, int synId) { unsigned int* tmp_I_set_p = ((unsigned int*)((char*)runtimeDataGPU.I_set + ((synId >> 5) * networkConfigGPU.I_setPitch)) + lNId); atomicOr(tmp_I_set_p, 1 << (synId % 32)); } __device__ inline unsigned int* getFiringBitGroupPtr(int lNId, int synId) { return (((unsigned int*)((char*)runtimeDataGPU.I_set + synId * networkConfigGPU.I_setPitch)) + lNId); } __device__ inline int getSTPBufPos(int lNId, int simTime) { return (((simTime + 1) % (networkConfigGPU.maxDelay + 1)) * networkConfigGPU.STP_Pitch + lNId); } __device__ inline int2 getStaticThreadLoad(int bufPos) { return (runtimeDataGPU.neuronAllocation[bufPos]); } __device__ inline bool getPoissonSpike(int lNId) { // Random number value is less than the poisson firing probability // if poisson firing probability is say 1.0 then the random poisson ptr // will always be less than 1.0 and hence it will continiously fire return runtimeDataGPU.randNum[lNId - networkConfigGPU.numNReg] * 1000.0f < runtimeDataGPU.poissonFireRate[lNId - networkConfigGPU.numNReg]; } __device__ inline bool getSpikeGenBit(unsigned int nidPos) { const int nidBitPos = nidPos % 32; const int nidIndex = nidPos / 32; return ((runtimeDataGPU.spikeGenBits[nidIndex] >> nidBitPos) & 0x1); } /*! * \brief This device function updates the average firing rate of each neuron, which is required for homeostasis * * \param[in] lNId The neuron id to be updated * \param[in] lGrpId The group id of the neuron */ __device__ inline void updateHomeoStaticState(int lNId, int lGrpId) { // here the homeostasis adjustment runtimeDataGPU.avgFiring[lNId] *= (groupConfigsGPU[lGrpId].avgTimeScale_decay); } /*! * \brief After every time step we update the time table * * Only one cuda thread is required for updating the time table * * \param[in] simTime The current time step */ __global__ void kernel_updateTimeTable(int simTime) { if (threadIdx.x == 0 && blockIdx.x == 0) { timeTableD2GPU[simTime + networkConfigGPU.maxDelay + 1] = spikeCountD2SecGPU + spikeCountLastSecLeftD2GPU; timeTableD1GPU[simTime + networkConfigGPU.maxDelay + 1] = spikeCountD1SecGPU; } __syncthreads(); } ///////////////////////////////////////////////////////////////////////////////// // Device Kernel Function: Intialization of the GPU side of the simulator /// // KERNEL: This kernel is called after initialization of various parameters /// // so that we can reset all required parameters. /// ///////////////////////////////////////////////////////////////////////////////// __global__ void kernel_initGPUMemory() { // FIXME: use parallel access int timeTableIdx = blockIdx.x * blockDim.x + threadIdx.x; if (timeTableIdx < TIMING_COUNT) { timeTableD2GPU[timeTableIdx] = 0; timeTableD1GPU[timeTableIdx] = 0; } if (threadIdx.x == 0 && blockIdx.x == 0) { spikeCountD2SecGPU = 0; spikeCountD1SecGPU = 0; spikeCountD2GPU = 0; spikeCountD1GPU = 0; recBufferIdx = 0; secD2fireCntTest = 0; secD1fireCntTest = 0; spikeCountLastSecLeftD2GPU = 0; spikeCountExtRxD2GPU = 0; spikeCountExtRxD1GPU = 0; spikeCountExtRxD2SecGPU = 0; spikeCountExtRxD1SecGPU = 0; } } // Allocation of the group and its id.. void SNN::allocateGroupId(int netId) { checkAndSetGPUDevice(netId); assert (runtimeData[netId].groupIdInfo == NULL); int3* tempNeuronAllocation = (int3*)malloc(sizeof(int3) * networkConfigs[netId].numGroups); for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { int3 threadLoad; threadLoad.x = groupConfigs[netId][lGrpId].lStartN; threadLoad.y = groupConfigs[netId][lGrpId].lEndN; threadLoad.z = lGrpId; tempNeuronAllocation[lGrpId] = threadLoad; } CUDA_CHECK_ERRORS(hipMalloc((void**)&runtimeData[netId].groupIdInfo, sizeof(int3) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMemcpy(runtimeData[netId].groupIdInfo, tempNeuronAllocation, sizeof(int3) * networkConfigs[netId].numGroups, hipMemcpyHostToDevice)); CUDA_CHECK_ERRORS(hipBindTexture(NULL, groupIdInfo_tex, runtimeData[netId].groupIdInfo, sizeof(int3) * networkConfigs[netId].numGroups)); free(tempNeuronAllocation); } /************************ VARIOUS KERNELS FOR FIRING CALCULATION AND FIRING UPDATE ****************************/ // Static Thread Load Allocation... // This function is necessary for static allocation of load that each CUDA-SM needs for its computation. // We store the static load allocation using the following format // Neuron starting position (32 bit): Group identification (16) : Buffer size (16 bit) // if we have 3 groups. grp(1) = 400 neurons, grp(2) = 100, grp(3) = 600 // The allocated static table will look as follows.. //------------------------- // start | grp | size //------------------------- // 0 : 0 : 256 // 256 : 0 : 144 // 400 : 1 : 100 // 500 : 2 : 256 // 756 : 2 : 256 // 1012 : 2 : 88 //----------------------- int SNN::allocateStaticLoad(int netId, int bufSize) { checkAndSetGPUDevice(netId); // only one thread does the static load table int bufferCnt = 0; for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { int grpBufCnt = (int) ceil(1.0f * groupConfigs[netId][lGrpId].numN / bufSize); assert(grpBufCnt >= 0); bufferCnt += grpBufCnt; KERNEL_DEBUG("Grp Size = %d, Total Buffer Cnt = %d, Buffer Cnt = %d", groupConfigs[netId][lGrpId].numN, bufferCnt, grpBufCnt); } assert(bufferCnt > 0); int2* tempNeuronAllocation = (int2*)malloc(sizeof(int2) * bufferCnt); KERNEL_DEBUG("STATIC THREAD ALLOCATION"); KERNEL_DEBUG("------------------------"); KERNEL_DEBUG("Buffer Size = %d, Buffer Count = %d", bufSize, bufferCnt); bufferCnt = 0; for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId += bufSize) { int2 threadLoad; // starting neuron id is saved... threadLoad.x = lNId; if ((lNId + bufSize - 1) <= groupConfigs[netId][lGrpId].lEndN) // grpID + full size threadLoad.y = (lGrpId + (bufSize << 16)); // can't support group id > 2^16 else // grpID + left-over size threadLoad.y = (lGrpId + ((groupConfigs[netId][lGrpId].lEndN - lNId + 1) << 16)); // can't support group id > 2^16 // fill the static load distribution here... int testGrpId = STATIC_LOAD_GROUP(threadLoad); tempNeuronAllocation[bufferCnt] = threadLoad; KERNEL_DEBUG("%d. Start=%d, size=%d grpId=%d:%s (SpikeMonId=%d) (GroupMonId=%d)", bufferCnt, STATIC_LOAD_START(threadLoad), STATIC_LOAD_SIZE(threadLoad), STATIC_LOAD_GROUP(threadLoad), groupConfigMap[groupConfigs[netId][testGrpId].gGrpId].grpName.c_str(), groupConfigMDMap[groupConfigs[netId][testGrpId].gGrpId].spikeMonitorId, groupConfigMDMap[groupConfigs[netId][testGrpId].gGrpId].groupMonitorId); bufferCnt++; } } assert(runtimeData[netId].allocated == false); // Finally writeback the total bufferCnt // Note down the buffer size for reference KERNEL_DEBUG("GPU loadBufferSize = %d, GPU loadBufferCount = %d", bufSize, bufferCnt); CUDA_CHECK_ERRORS(hipMemcpyToSymbol(loadBufferCount, &bufferCnt, sizeof(int), 0, hipMemcpyHostToDevice)); CUDA_CHECK_ERRORS(hipMemcpyToSymbol(loadBufferSize, &bufSize, sizeof(int), 0, hipMemcpyHostToDevice)); CUDA_CHECK_ERRORS(hipMalloc((void**) &runtimeData[netId].neuronAllocation, sizeof(int2) * bufferCnt)); CUDA_CHECK_ERRORS(hipMemcpy(runtimeData[netId].neuronAllocation, tempNeuronAllocation, sizeof(int2) * bufferCnt, hipMemcpyHostToDevice)); free(tempNeuronAllocation); return bufferCnt; } ////////////////////////////////////////////////// // 1. KERNELS used when a specific neuron fires // ////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////// // Device local function: Update the STP Variables /// // update the STPU and STPX variable after firing /// ///////////////////////////////////////////////////////////////////////////////// // update the spike-dependent part of du/dt and dx/dt __device__ void firingUpdateSTP (int nid, int simTime, short int grpId) { // we need to retrieve the STP values from the right buffer position (right before vs. right after the spike) int ind_plus = getSTPBufPos(nid, simTime); int ind_minus = getSTPBufPos(nid, (simTime - 1)); // at this point, stpu[ind_plus] has already been assigned, and the decay applied // so add the spike-dependent part to that // du/dt = -u/tau_F + U * (1-u^-) * \delta(t-t_{spk}) runtimeDataGPU.stpu[ind_plus] += groupConfigsGPU[grpId].STP_U * (1.0f - runtimeDataGPU.stpu[ind_minus]); // dx/dt = (1-x)/tau_D - u^+ * x^- * \delta(t-t_{spk}) runtimeDataGPU.stpx[ind_plus] -= runtimeDataGPU.stpu[ind_plus] * runtimeDataGPU.stpx[ind_minus]; } __device__ void resetFiredNeuron(int lNId, short int lGrpId, int simTime) { // \FIXME \TODO: convert this to use coalesced access by grouping into a // single 16 byte access. This might improve bandwidth performance // This is fully uncoalsced access...need to convert to coalsced access.. if (groupConfigsGPU[lGrpId].WithSTDP) runtimeDataGPU.lastSpikeTime[lNId] = simTime; if (networkConfigGPU.sim_with_homeostasis) { // with homeostasis flag can be used here. runtimeDataGPU.avgFiring[lNId] += 1000/(groupConfigsGPU[lGrpId].avgTimeScale*1000); } } /*! * \brief 1. Copy neuron id from local table to global firing table. 2. Reset all neuron properties of neuron id in local table * * * \param[in] fireTablePtr the local shared memory firing table with neuron ids of fired neuron * \param[in] fireCntD2 the number of neurons in local table that has fired with group's max delay == 1 * \param[in] fireCntD1 the number of neurons in local table that has fired with group's max delay > 1 * \param[in] simTime the current time step, stored as neuron firing time entry */ __device__ void updateSpikeCount(volatile unsigned int& fireCnt, volatile unsigned int& fireCntD1, volatile unsigned int& cntD2, volatile unsigned int& cntD1, volatile int& blkErrCode) { int fireCntD2 = fireCnt - fireCntD1; cntD2 = atomicAdd(&secD2fireCntTest, fireCntD2); cntD1 = atomicAdd(&secD1fireCntTest, fireCntD1); //check for overflow in the firing table size.... if(secD2fireCntTest>networkConfigGPU.maxSpikesD2) { blkErrCode = NEW_FIRE_UPDATE_OVERFLOW_ERROR2; return; } else if(secD1fireCntTest>networkConfigGPU.maxSpikesD1) { blkErrCode = NEW_FIRE_UPDATE_OVERFLOW_ERROR1; return; } blkErrCode = 0; // get a distinct counter to store firing info // into the firing table cntD2 = atomicAdd(&spikeCountD2SecGPU, fireCntD2) + spikeCountLastSecLeftD2GPU; cntD1 = atomicAdd(&spikeCountD1SecGPU, fireCntD1); } // update the firing table... __device__ void updateFiringTable(int lNId, short int lGrpId, volatile unsigned int& cntD2, volatile unsigned int& cntD1) { int pos; if (groupConfigsGPU[lGrpId].MaxDelay == 1) { // this group has a delay of only 1 pos = atomicAdd((int*)&cntD1, 1); //runtimeDataGPU.firingTableD1[pos] = SET_FIRING_TABLE(nid, grpId); runtimeDataGPU.firingTableD1[pos] = lNId; } else { // all other groups is dumped here pos = atomicAdd((int*)&cntD2, 1); //runtimeDataGPU.firingTableD2[pos] = SET_FIRING_TABLE(nid, grpId); runtimeDataGPU.firingTableD2[pos] = lNId; } } // update the firing table... __device__ void updateExtFiringTable(int lNId, short int lGrpId) { int pos; if (groupConfigsGPU[lGrpId].MaxDelay == 1) { // this group has a delay of only 1 pos = atomicAdd((int*)&runtimeDataGPU.extFiringTableEndIdxD1[lGrpId], 1); //runtimeDataGPU.firingTableD1[pos] = SET_FIRING_TABLE(nid, grpId); runtimeDataGPU.extFiringTableD1[lGrpId][pos] = lNId + groupConfigsGPU[lGrpId].LtoGOffset; // convert to global neuron id } else { // all other groups is dumped here pos = atomicAdd((int*)&runtimeDataGPU.extFiringTableEndIdxD2[lGrpId], 1); //runtimeDataGPU.firingTableD2[pos] = SET_FIRING_TABLE(nid, grpId); runtimeDataGPU.extFiringTableD2[lGrpId][pos] = lNId + groupConfigsGPU[lGrpId].LtoGOffset; // convert to global neuron id } } __device__ int updateNewFirings(int* fireTablePtr, short int* fireGrpId, volatile unsigned int& fireCnt, volatile unsigned int& fireCntD1, int simTime) { __shared__ volatile unsigned int cntD2; __shared__ volatile unsigned int cntD1; __shared__ volatile int blkErrCode; blkErrCode = 0; if (threadIdx.x == 0) { updateSpikeCount(fireCnt, fireCntD1, cntD2, cntD1, blkErrCode); } __syncthreads(); // if we overflow the spike buffer space that is available, // then we return with an error here... if (blkErrCode) return blkErrCode; for (int i = threadIdx.x; i < fireCnt; i += blockDim.x) { // Read the firing id from the local table..... int lNId = fireTablePtr[i]; updateFiringTable(lNId, fireGrpId[i], cntD2, cntD1); if (groupConfigsGPU[fireGrpId[i]].hasExternalConnect) updateExtFiringTable(lNId, fireGrpId[i]); if (groupConfigsGPU[fireGrpId[i]].WithSTP) firingUpdateSTP(lNId, simTime, fireGrpId[i]); // keep track of number spikes per neuron runtimeDataGPU.nSpikeCnt[lNId]++; // only neurons would do the remaining settings... // pure poisson generators will return without changing anything else.. if (IS_REGULAR_NEURON(lNId, networkConfigGPU.numNReg, networkConfigGPU.numNPois)) resetFiredNeuron(lNId, fireGrpId[i], simTime); } __syncthreads(); return 0; } // zero GPU spike counts __global__ void kernel_resetNSpikeCnt(int lGrpId) { const int totBuffers = loadBufferCount; for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... int2 threadLoad = getStaticThreadLoad(bufPos); int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x); int lastId = STATIC_LOAD_SIZE(threadLoad); int grpId = STATIC_LOAD_GROUP(threadLoad); if ((lGrpId == ALL || lGrpId == grpId) && (nid <= lastId)) { runtimeDataGPU.nSpikeCnt[nid] = 0; } } } // wrapper to call resetSpikeCnt void SNN::resetSpikeCnt_GPU(int netId, int lGrpId) { assert(runtimeData[netId].memType == GPU_MEM); if (lGrpId == ALL) { checkAndSetGPUDevice(netId); CUDA_CHECK_ERRORS(hipMemset((void*)runtimeData[netId].nSpikeCnt, 0, sizeof(int) * networkConfigs[netId].numN)); } else { checkAndSetGPUDevice(netId); hipLaunchKernelGGL(( kernel_resetNSpikeCnt), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, lGrpId); } } #define LTP_GROUPING_SZ 16 //!< synaptic grouping for LTP Calculation /*! * \brief Computes the STDP update values for each of fired neurons stored in the local firing table. * * \param[in] fireTablePtr the local firing table with neuron ids of fired neuron * \param[in] fireCnt the number of fired neurons in local firing table * \param[in] simTime the current time step, stored as neuron firing time entry */ __device__ void updateLTP(int* fireTablePtr, short int* fireGrpId, volatile unsigned int& fireCnt, int simTime) { for(int pos=threadIdx.x/LTP_GROUPING_SZ; pos < fireCnt; pos += (blockDim.x/LTP_GROUPING_SZ)) { // each neuron has two variable pre and pre_exc // pre: number of pre-neuron // pre_exc: number of neuron had has plastic connections short int grpId = fireGrpId[pos]; // STDP calculation: the post-synaptic neron fires after the arrival of pre-synaptic neuron's spike if (groupConfigsGPU[grpId].WithSTDP) { // MDR, FIXME this probably will cause more thread divergence than need be... int nid = fireTablePtr[pos]; unsigned int end_p = runtimeDataGPU.cumulativePre[nid] + runtimeDataGPU.Npre_plastic[nid]; for(unsigned int p = runtimeDataGPU.cumulativePre[nid] + threadIdx.x % LTP_GROUPING_SZ; p < end_p; p+=LTP_GROUPING_SZ) { int stdp_tDiff = (simTime - runtimeDataGPU.synSpikeTime[p]); if (stdp_tDiff > 0) { if (groupConfigsGPU[grpId].WithESTDP) { // Handle E-STDP curves switch (groupConfigsGPU[grpId].WithESTDPcurve) { case EXP_CURVE: // exponential curve if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_EXC < 25) runtimeDataGPU.wtChange[p] += STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC); break; case TIMING_BASED_CURVE: // sc curve if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_EXC < 25) { if (stdp_tDiff <= groupConfigsGPU[grpId].GAMMA) runtimeDataGPU.wtChange[p] += groupConfigsGPU[grpId].OMEGA + groupConfigsGPU[grpId].KAPPA * STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC); else // stdp_tDiff > GAMMA runtimeDataGPU.wtChange[p] -= STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC); } break; default: break; } } if (groupConfigsGPU[grpId].WithISTDP) { // Handle I-STDP curves switch (groupConfigsGPU[grpId].WithISTDPcurve) { case EXP_CURVE: // exponential curve if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_INB < 25) { // LTP of inhibitory synapse, which decreases synapse weight runtimeDataGPU.wtChange[p] -= STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_INB, groupConfigsGPU[grpId].TAU_PLUS_INV_INB); } break; case PULSE_CURVE: // pulse curve if (stdp_tDiff <= groupConfigsGPU[grpId].LAMBDA) { // LTP of inhibitory synapse, which decreases synapse weight runtimeDataGPU.wtChange[p] -= groupConfigsGPU[grpId].BETA_LTP; } else if (stdp_tDiff <= groupConfigsGPU[grpId].DELTA) { // LTD of inhibitory syanpse, which increase sysnapse weight runtimeDataGPU.wtChange[p] -= groupConfigsGPU[grpId].BETA_LTD; } break; default: break; } } } } } } __syncthreads(); } #define FIRE_CHUNK_CNT 512 /*! * \brief This kernel is responsible for finding the neurons that need to be fired. * * We use a buffered firing table that allows neuron to gradually load * the buffer and make it easy to carry out the calculations in a single group. * A single function is used for simple neurons and also for poisson neurons. * The function also update LTP * * device access: spikeCountD2SecGPU, spikeCountD1SecGPU * net access: numNReg numNPois, numN, sim_with_stdp, sim_in_testing, sim_with_homeostasis, maxSpikesD1, maxSpikesD2 * grp access: Type, spikeGenFunc, Noffset, withSpikeCounter, spkCntBufPos, StartN, WithSTP, avgTimeScale WithSTDP, WithESTDP, WithISTDP, WithESTDPCurve, With ISTDPCurve, all STDP parameters * rtd access: randNum, poissonFireRate, spkCntBuf, nSpikeCnt, voltage, recovery, Izh_c, Izh_d * cumulativePre, Npre_plastic, (R)synSpikeTime, (W)lastSpikeTime, (W)wtChange, * avgFiring */ __global__ void kernel_findFiring (int simTimeMs, int simTime) { __shared__ volatile unsigned int fireCnt; __shared__ volatile unsigned int fireCntTest; __shared__ volatile unsigned int fireCntD1; __shared__ int fireTable[FIRE_CHUNK_CNT]; __shared__ short int fireGrpId[FIRE_CHUNK_CNT]; __shared__ volatile int errCode; if (threadIdx.x == 0) { fireCnt = 0; // initialize total cnt to 0 fireCntD1 = 0; // initialize d1 cnt to 0 fireCntTest = 0; // initialize test cnt to 0 } const int totBuffers=loadBufferCount; __syncthreads(); for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... int2 threadLoad = getStaticThreadLoad(bufPos); int lNId = (STATIC_LOAD_START(threadLoad) + threadIdx.x); int lastLNId = STATIC_LOAD_SIZE(threadLoad); short int lGrpId = STATIC_LOAD_GROUP(threadLoad); bool needToWrite = false; // used by all neuron to indicate firing condition int fireId = 0; // threadId is valid and lies within the lastId..... if ((threadIdx.x < lastLNId) && (lNId < networkConfigGPU.numN)) { // Simple poisson spiker uses the poisson firing probability // to detect whether it has fired or not.... if(isPoissonGroup(lGrpId)) { // spikes generated by spikeGenFunc if(groupConfigsGPU[lGrpId].isSpikeGenFunc) { unsigned int offset = lNId - groupConfigsGPU[lGrpId].lStartN + groupConfigsGPU[lGrpId].Noffset; needToWrite = getSpikeGenBit(offset); } else { // spikes generated by poission rate needToWrite = getPoissonSpike(lNId); } // Note: valid lastSpikeTime of spike gen neurons is required by userDefinedSpikeGenerator() if (needToWrite) runtimeDataGPU.lastSpikeTime[lNId] = simTime; } else { // Regular neuron if (runtimeDataGPU.curSpike[lNId]) { runtimeDataGPU.curSpike[lNId] = false; needToWrite = true; } // log v, u value if any active neuron monitor is presented if (networkConfigGPU.sim_with_nm && lNId - groupConfigsGPU[lGrpId].lStartN < MAX_NEURON_MON_GRP_SZIE) { int idxBase = networkConfigGPU.numGroups * MAX_NEURON_MON_GRP_SZIE * simTimeMs + lGrpId * MAX_NEURON_MON_GRP_SZIE; runtimeDataGPU.nVBuffer[idxBase + lNId - groupConfigsGPU[lGrpId].lStartN] = runtimeDataGPU.voltage[lNId]; runtimeDataGPU.nUBuffer[idxBase + lNId - groupConfigsGPU[lGrpId].lStartN] = runtimeDataGPU.recovery[lNId]; } } } // loop through a few times to ensure that we have added/processed all spikes that need to be written // if the buffer is small relative to the number of spikes needing to be written, we may have to empty the buffer a few times... for (int c = 0; c < 2; c++) { // we first increment fireCntTest to make sure we haven't filled the buffer if (needToWrite) fireId = atomicAdd((int*)&fireCntTest, 1); // if there is a spike and the buffer still has space... if (needToWrite && (fireId <(FIRE_CHUNK_CNT))) { // get our position in the buffer fireId = atomicAdd((int*)&fireCnt, 1); if (groupConfigsGPU[lGrpId].MaxDelay == 1) atomicAdd((int*)&fireCntD1, 1); // store ID of the fired neuron needToWrite = false; fireTable[fireId] = lNId; fireGrpId[fireId] = lGrpId;//setFireProperties(grpId, isInhib); } __syncthreads(); // the local firing table is full. dump the local firing table to the global firing table before proceeding if (fireCntTest >= (FIRE_CHUNK_CNT)) { // clear the table and update... int retCode = updateNewFirings(fireTable, fireGrpId, fireCnt, fireCntD1, simTime); if (retCode != 0) return; // update based on stdp rule // KILLME !!! if (simTime > 0)) if (networkConfigGPU.sim_with_stdp && !networkConfigGPU.sim_in_testing) updateLTP (fireTable, fireGrpId, fireCnt, simTime); // reset counters if (threadIdx.x == 0) { fireCntD1 = 0; fireCnt = 0; fireCntTest = 0; } } } } __syncthreads(); // few more fired neurons are left. we update their firing state here.. if (fireCnt) { int retCode = updateNewFirings(fireTable, fireGrpId, fireCnt, fireCntD1, simTime); if (retCode != 0) return; if (networkConfigGPU.sim_with_stdp && !networkConfigGPU.sim_in_testing) updateLTP(fireTable, fireGrpId, fireCnt, simTime); } } //******************************** UPDATE CONDUCTANCES AND TOTAL SYNAPTIC CURRENT EVERY TIME STEP ***************************** #define LOG_CURRENT_GROUP 5 /*! * \brief Based on the bitvector used for indicating the presence of spike, the global conductance values are updated. * * net access: numNReg, numNPois, I_setPitch, maxDelay, STP_Pitch, sim_with_conductances, sim_with_NMDA_rise, sim_withGABAb_Rise, sNMDA, sGABAb * grp access: WithSTP, STP_A * rtd access: Npre, cumulativePre, I_set, preSynapticIds, grpIds, wt, stpx, stpu, connIdsPreIdx, gAMPA, gGABAa, gNMDA_r, gNMDA_d, gNMDA, gGABAb_r, gGABAb_d, gGABAb * glb access: d_mulSynFast, d_mulSynSlow */ __global__ void kernel_conductanceUpdate (int simTimeMs, int simTimeSec, int simTime) { __shared__ int sh_quickSynIdTable[256]; // Table for quick access for (int i = 0; i < 256; i += blockDim.x) { if ((i + threadIdx.x) < 256) { sh_quickSynIdTable[i + threadIdx.x] = quickSynIdTableGPU[i + threadIdx.x]; } } __syncthreads(); const int totBuffers = loadBufferCount; for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... int2 threadLoad = getStaticThreadLoad(bufPos); int postNId = STATIC_LOAD_START(threadLoad) + threadIdx.x; int lastNId = STATIC_LOAD_SIZE(threadLoad); if ((threadIdx.x < lastNId) && (IS_REGULAR_NEURON(postNId, networkConfigGPU.numNReg, networkConfigGPU.numNPois))) { // P6-1 // load the initial current due to noise inputs for neuron 'post_nid' // initial values of the conductances for neuron 'post_nid' float AMPA_sum = 0.0f; float NMDA_sum = 0.0f; float NMDA_r_sum = 0.0f; float NMDA_d_sum = 0.0f; float GABAa_sum = 0.0f; float GABAb_sum = 0.0f; float GABAb_r_sum = 0.0f; float GABAb_d_sum = 0.0f; int lmt = runtimeDataGPU.Npre[postNId]; unsigned int cum_pos = runtimeDataGPU.cumulativePre[postNId]; // find the total current to this neuron... for (int j = 0; (lmt) && (j <= ((lmt - 1) >> LOG_CURRENT_GROUP)); j++) { // because of malloc2D operation we are using pitch, post_nid, j to get // actual position of the input current.... // int* tmp_I_set_p = ((int*)((char*)runtimeDataGPU.I_set + j * networkConfigGPU.I_setPitch) + post_nid); uint32_t* tmp_I_set_p = getFiringBitGroupPtr(postNId, j); uint32_t tmp_I_set = *tmp_I_set_p; // table lookup based find bits that are set int cnt = 0; int tmp_I_cnt = 0; while (tmp_I_set) { int k = (tmp_I_set >> (8 * cnt)) & 0xff; if (k == 0) { cnt = cnt + 1; continue; } int wt_i = sh_quickSynIdTable[k]; int wtId = (j * 32 + cnt * 8 + wt_i); SynInfo synInfo = runtimeDataGPU.preSynapticIds[cum_pos + wtId]; //uint8_t pre_grpId = GET_CONN_GRP_ID(pre_Id); uint32_t preNId = GET_CONN_NEURON_ID(synInfo); short int preGrpId = runtimeDataGPU.grpIds[preNId]; char type = groupConfigsGPU[preGrpId].Type; // load the synaptic weight for the wtId'th input float change = runtimeDataGPU.wt[cum_pos + wtId]; // Adjust the weight according to STP scaling if (groupConfigsGPU[preGrpId].WithSTP) { int tD = 0; // \FIXME find delay // \FIXME I think pre_nid needs to be adjusted for the delay int ind_minus = getSTPBufPos(preNId, (simTime - tD - 1)); // \FIXME should be adjusted for delay int ind_plus = getSTPBufPos(preNId, (simTime - tD)); // dI/dt = -I/tau_S + A * u^+ * x^- * \delta(t-t_{spk}) change *= groupConfigsGPU[preGrpId].STP_A * runtimeDataGPU.stpx[ind_minus] * runtimeDataGPU.stpu[ind_plus]; } if (networkConfigGPU.sim_with_conductances) { short int connId = runtimeDataGPU.connIdsPreIdx[cum_pos+wtId]; if (type & TARGET_AMPA) AMPA_sum += change * d_mulSynFast[connId]; if (type & TARGET_NMDA) { if (networkConfigGPU.sim_with_NMDA_rise) { NMDA_r_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sNMDA; NMDA_d_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sNMDA; } else { NMDA_sum += change * d_mulSynSlow[connId]; } } if (type & TARGET_GABAa) GABAa_sum += change * d_mulSynFast[connId]; // wt should be negative for GABAa and GABAb if (type & TARGET_GABAb) { // but that is dealt with below if (networkConfigGPU.sim_with_GABAb_rise) { GABAb_r_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sGABAb; GABAb_d_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sGABAb; } else { GABAb_sum += change * d_mulSynSlow[connId]; } } } else { // current based model with STP (CUBA) // updated current for neuron 'post_nid' AMPA_sum += change; } tmp_I_cnt++; tmp_I_set = tmp_I_set & (~(1 << (8 * cnt + wt_i))); } // FIXME: move reset outside kernel for debbuing I_set, resume it later // reset the input if there are any bit'wt set if(tmp_I_cnt) *tmp_I_set_p = 0; __syncthreads(); } __syncthreads(); // P6-2 if (networkConfigGPU.sim_with_conductances) { // don't add mulSynFast/mulSynSlow here, because they depend on the exact pre<->post connection, not // just post_nid runtimeDataGPU.gAMPA[postNId] += AMPA_sum; runtimeDataGPU.gGABAa[postNId] -= GABAa_sum; // wt should be negative for GABAa and GABAb if (networkConfigGPU.sim_with_NMDA_rise) { runtimeDataGPU.gNMDA_r[postNId] += NMDA_r_sum; runtimeDataGPU.gNMDA_d[postNId] += NMDA_d_sum; } else { runtimeDataGPU.gNMDA[postNId] += NMDA_sum; } if (networkConfigGPU.sim_with_GABAb_rise) { runtimeDataGPU.gGABAb_r[postNId] -= GABAb_r_sum; runtimeDataGPU.gGABAb_d[postNId] -= GABAb_d_sum; } else { runtimeDataGPU.gGABAb[postNId] -= GABAb_sum; } } else { runtimeDataGPU.current[postNId] += AMPA_sum; } } } } // single integration step for voltage equation of 4-param Izhikevich __device__ inline float dvdtIzhikevich4(float volt, float recov, float totCurrent, float timeStep = 1.0f) { return (((0.04f * volt + 5.0f) * volt + 140.0f - recov + totCurrent) * timeStep); } // single integration step for recovery equation of 4-param Izhikevich __device__ inline float dudtIzhikevich4(float volt, float recov, float izhA, float izhB, float timeStep = 1.0f) { return (izhA * (izhB * volt - recov) * timeStep); } // single integration step for voltage equation of 9-param Izhikevich __device__ inline float dvdtIzhikevich9(float volt, float recov, float invCapac, float izhK, float voltRest, float voltInst, float totCurrent, float timeStep = 1.0f) { return ((izhK * (volt - voltRest) * (volt - voltInst) - recov + totCurrent) * invCapac * timeStep); } // single integration step for recovery equation of 9-param Izhikevich __device__ inline float dudtIzhikevich9(float volt, float recov, float voltRest, float izhA, float izhB, float timeStep = 1.0f) { return (izhA * (izhB * (volt - voltRest) - recov) * timeStep); } __device__ inline float dvdtLIF(float volt, float lif_vReset, float lif_gain, float lif_bias, int lif_tau_m, float totalCurrent, float timeStep=1.0f){ return ((lif_vReset -volt + ((totalCurrent * lif_gain) + lif_bias))/ (float) lif_tau_m) * timeStep; } __device__ float getCompCurrent_GPU(int grpId, int neurId, float const0 = 0.0f, float const1 = 0.0f) { float compCurrent = 0.0f; for (int k = 0; k<groupConfigsGPU[grpId].numCompNeighbors; k++) { int grpIdOther = groupConfigsGPU[grpId].compNeighbors[k]; int neurIdOther = neurId - groupConfigsGPU[grpId].lStartN + groupConfigsGPU[grpIdOther].lStartN; compCurrent += groupConfigsGPU[grpId].compCoupling[k] * ((runtimeDataGPU.voltage[neurIdOther] + const1) - (runtimeDataGPU.voltage[neurId] + const0)); } return compCurrent; } //************************ UPDATE GLOBAL STATE EVERY TIME STEP *******************************************************// /*! * \brief This device function implements the equations of neuron dynamics * * \param[in] nid The neuron id to be updated * \param[in] grpId The group id of the neuron */ __device__ void updateNeuronState(int nid, int grpId, int simTimeMs, bool lastIteration) { float v = runtimeDataGPU.voltage[nid]; float v_next = runtimeDataGPU.nextVoltage[nid]; float u = runtimeDataGPU.recovery[nid]; float I_sum, NMDAtmp; float gNMDA, gGABAb; float k = runtimeDataGPU.Izh_k[nid]; float vr = runtimeDataGPU.Izh_vr[nid]; float vt = runtimeDataGPU.Izh_vt[nid]; float inverse_C = 1.0f / runtimeDataGPU.Izh_C[nid]; float vpeak = runtimeDataGPU.Izh_vpeak[nid]; float a = runtimeDataGPU.Izh_a[nid]; float b = runtimeDataGPU.Izh_b[nid]; // pre-load LIF parameters int lif_tau_m = runtimeDataGPU.lif_tau_m[nid]; int lif_tau_ref = runtimeDataGPU.lif_tau_ref[nid]; int lif_tau_ref_c = runtimeDataGPU.lif_tau_ref_c[nid]; float lif_vTh = runtimeDataGPU.lif_vTh[nid]; float lif_vReset = runtimeDataGPU.lif_vReset[nid]; float lif_gain = runtimeDataGPU.lif_gain[nid]; float lif_bias = runtimeDataGPU.lif_bias[nid]; const float one_sixth = 1.0f / 6.0f; float timeStep = networkConfigGPU.timeStep; float totalCurrent = runtimeDataGPU.extCurrent[nid]; if (networkConfigGPU.sim_with_conductances) { NMDAtmp = (v + 80.0f) * (v + 80.0f) / 60.0f / 60.0f; gNMDA = (networkConfigGPU.sim_with_NMDA_rise) ? (runtimeDataGPU.gNMDA_d[nid] - runtimeDataGPU.gNMDA_r[nid]) : runtimeDataGPU.gNMDA[nid]; gGABAb = (networkConfigGPU.sim_with_GABAb_rise) ? (runtimeDataGPU.gGABAb_d[nid] - runtimeDataGPU.gGABAb_r[nid]) : runtimeDataGPU.gGABAb[nid]; I_sum = -(runtimeDataGPU.gAMPA[nid] * (v - 0.0f) + gNMDA * NMDAtmp / (1.0f + NMDAtmp) * (v - 0.0f) + runtimeDataGPU.gGABAa[nid] * (v + 70.0f) + gGABAb * (v + 90.0f)); totalCurrent += I_sum; } else { totalCurrent += runtimeDataGPU.current[nid]; } if (groupConfigsGPU[grpId].withCompartments) { totalCurrent += getCompCurrent_GPU(grpId, nid); } switch (networkConfigGPU.simIntegrationMethod) { case FORWARD_EULER: if (!groupConfigsGPU[grpId].withParamModel_9 && !groupConfigsGPU[grpId].isLIF) { // 4-param Izhikevich // update vpos and upos for the current neuron v_next = v + dvdtIzhikevich4(v, u, totalCurrent, timeStep); if (v_next > 30.0f) { // record spike but keep integrating runtimeDataGPU.curSpike[nid] = true; v_next = runtimeDataGPU.Izh_c[nid]; u += runtimeDataGPU.Izh_d[nid]; } } else if(!groupConfigsGPU[grpId].isLIF) { // 9-param Izhikevich // update vpos and upos for the current neuron v_next = v + dvdtIzhikevich9(v, u, inverse_C, k, vr, vt, totalCurrent, timeStep); if (v_next > vpeak) { runtimeDataGPU.curSpike[nid] = true; v_next = runtimeDataGPU.Izh_c[nid]; u += runtimeDataGPU.Izh_d[nid]; } } else{ if (lif_tau_ref_c > 0){ if(lastIteration){ runtimeDataGPU.lif_tau_ref_c[nid] -= 1; v_next = lif_vReset; } } else{ if (v_next > lif_vTh) { runtimeDataGPU.curSpike[nid] = true; v_next = lif_vReset; if(lastIteration){ runtimeDataGPU.lif_tau_ref_c[nid] = lif_tau_ref; } else{ runtimeDataGPU.lif_tau_ref_c[nid] = lif_tau_ref+1; } } else{ v_next = v + dvdtLIF(v, lif_vReset, lif_gain, lif_bias, lif_tau_m, totalCurrent, timeStep); } } } if (groupConfigsGPU[grpId].isLIF){ if (v_next < lif_vReset) v_next = lif_vReset; } else{ if (v_next < -90.0f) v_next = -90.0f; if (!groupConfigsGPU[grpId].withParamModel_9) { u += dudtIzhikevich4(v_next, u, a, b, timeStep); } else { u += dudtIzhikevich9(v_next, u, vr, a, b, timeStep); } } break; case RUNGE_KUTTA4: if (!groupConfigsGPU[grpId].withParamModel_9 && !groupConfigsGPU[grpId].isLIF) { // 4-param Izhikevich float k1 = dvdtIzhikevich4(v, u, totalCurrent, timeStep); float l1 = dudtIzhikevich4(v, u, a, b, timeStep); float k2 = dvdtIzhikevich4(v + k1 / 2.0f, u + l1 / 2.0f, totalCurrent, timeStep); float l2 = dudtIzhikevich4(v + k1 / 2.0f, u + l1 / 2.0f, a, b, timeStep); float k3 = dvdtIzhikevich4(v + k2 / 2.0f, u + l2 / 2.0f, totalCurrent, timeStep); float l3 = dudtIzhikevich4(v + k2 / 2.0f, u + l2 / 2.0f, a, b, timeStep); float k4 = dvdtIzhikevich4(v + k3, u + l3, totalCurrent, timeStep); float l4 = dudtIzhikevich4(v + k3, u + l3, a, b, timeStep); v_next = v + one_sixth * (k1 + 2.0f * k2 + 2.0f * k3 + k4); if (v_next > 30.0f) { // record spike but keep integrating runtimeDataGPU.curSpike[nid] = true; v_next = runtimeDataGPU.Izh_c[nid]; u += runtimeDataGPU.Izh_d[nid]; } if (v_next < -90.0f) v_next = -90.0f; u += one_sixth * (l1 + 2.0f * l2 + 2.0f * l3 + l4); } else if(!groupConfigsGPU[grpId].isLIF){ // 9-param Izhikevich float k1 = dvdtIzhikevich9(v, u, inverse_C, k, vr, vt, totalCurrent, timeStep); float l1 = dudtIzhikevich9(v, u, vr, a, b, timeStep); float k2 = dvdtIzhikevich9(v + k1 / 2.0f, u + l1 / 2.0f, inverse_C, k, vr, vt, totalCurrent, timeStep); float l2 = dudtIzhikevich9(v + k1 / 2.0f, u + l1 / 2.0f, vr, a, b, timeStep); float k3 = dvdtIzhikevich9(v + k2 / 2.0f, u + l2 / 2.0f, inverse_C, k, vr, vt, totalCurrent, timeStep); float l3 = dudtIzhikevich9(v + k2 / 2.0f, u + l2 / 2.0f, vr, a, b, timeStep); float k4 = dvdtIzhikevich9(v + k3, u + l3, inverse_C, k, vr, vt, totalCurrent, timeStep); float l4 = dudtIzhikevich9(v + k3, u + l3, vr, a, b, timeStep); v_next = v + one_sixth * (k1 + 2.0f * k2 + 2.0f * k3 + k4); if (v_next > vpeak) { // record spike but keep integrating runtimeDataGPU.curSpike[nid] = true; v_next = runtimeDataGPU.Izh_c[nid]; u += runtimeDataGPU.Izh_d[nid]; } if (v_next < -90.0f) v_next = -90.0f; u += one_sixth * (l1 + 2.0f * l2 + 2.0f * l3 + l4); } else{ // LIF integration is always FORWARD_EULER if (lif_tau_ref_c > 0){ if(lastIteration){ runtimeDataGPU.lif_tau_ref_c[nid] -= 1; v_next = lif_vReset; } } else{ if (v_next > lif_vTh) { runtimeDataGPU.curSpike[nid] = true; v_next = lif_vReset; if(lastIteration){ runtimeDataGPU.lif_tau_ref_c[nid] = lif_tau_ref; } else{ runtimeDataGPU.lif_tau_ref_c[nid] = lif_tau_ref+1; } } else{ v_next = v + dvdtLIF(v, lif_vReset, lif_gain, lif_bias, lif_tau_m, totalCurrent, timeStep); } } if (v_next < lif_vReset) v_next = lif_vReset; } break; case UNKNOWN_INTEGRATION: default: // unknown integration method assert(false); } if(lastIteration) { if (networkConfigGPU.sim_with_conductances) { runtimeDataGPU.current[nid] = I_sum; } else { // current must be reset here for CUBA and not kernel_STPUpdateAndDecayConductances runtimeDataGPU.current[nid] = 0.0f; } // log i value if any active neuron monitor is presented if (networkConfigGPU.sim_with_nm && nid - groupConfigsGPU[grpId].lStartN < MAX_NEURON_MON_GRP_SZIE) { int idxBase = networkConfigGPU.numGroups * MAX_NEURON_MON_GRP_SZIE * simTimeMs + grpId * MAX_NEURON_MON_GRP_SZIE; runtimeDataGPU.nIBuffer[idxBase + nid - groupConfigsGPU[grpId].lStartN] = totalCurrent; } } runtimeDataGPU.nextVoltage[nid] = v_next; runtimeDataGPU.recovery[nid] = u; } /*! * \brief update neuron state * * This kernel update neurons' membrance potential according to neurons' dynamics model. * This kernel also update variables required by homeostasis * * net access: numN, numNReg, numNPois, sim_with_conductances, sim_with_NMDA_rise, sim_with_GABAb_rise * grp access: WithHomeostasis, avgTimeScale_decay * rtd access: avgFiring, voltage, recovery, gNMDA, gNMDA_r, gNMDA_d, gGABAb, gGABAb_r, gGABAb_d, gAMPA, gGABAa, * current, extCurrent, Izh_a, Izh_b * glb access: */ __global__ void kernel_neuronStateUpdate(int simTimeMs, bool lastIteration) { const int totBuffers = loadBufferCount; // update neuron state for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... int2 threadLoad = getStaticThreadLoad(bufPos); int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x); int lastId = STATIC_LOAD_SIZE(threadLoad); int grpId = STATIC_LOAD_GROUP(threadLoad); if ((threadIdx.x < lastId) && (nid < networkConfigGPU.numN)) { if (IS_REGULAR_NEURON(nid, networkConfigGPU.numNReg, networkConfigGPU.numNPois)) { // P7 // update neuron state here.... updateNeuronState(nid, grpId, simTimeMs, lastIteration); // P8 if (groupConfigsGPU[grpId].WithHomeostasis) updateHomeoStaticState(nid, grpId); } } } } /*! * \brief Update the state of groups, which includes concentration of dopamine currently * * Update the concentration of neuronmodulator * * net access: numGroups * grp access: WithESTDPtype, WithISTDPtype, baseDP, decayDP * rtd access: grpDA, grpDABuffer * glb access: */ __global__ void kernel_groupStateUpdate(int simTime) { // update group state int grpIdx = blockIdx.x * blockDim.x + threadIdx.x; // P9 if (grpIdx < networkConfigGPU.numGroups) { // decay dopamine concentration if ((groupConfigsGPU[grpIdx].WithESTDPtype == DA_MOD || groupConfigsGPU[grpIdx].WithISTDPtype == DA_MOD) && runtimeDataGPU.grpDA[grpIdx] > groupConfigsGPU[grpIdx].baseDP) { runtimeDataGPU.grpDA[grpIdx] *= groupConfigsGPU[grpIdx].decayDP; } runtimeDataGPU.grpDABuffer[grpIdx * 1000 + simTime] = runtimeDataGPU.grpDA[grpIdx]; // log dopamine concentration } } //******************************** UPDATE STP STATE EVERY TIME STEP ********************************************** /*! * \brief This function is called for updat STP and decay coductance every time step * * net access sim_with_conductance, sim_with_NMDA_rise, sim_with_GABAb_rise, numNReg, numNPois, numN, STP_Pitch, maxDelay * grp access WithSTP * rtd access gAMPA, gNMDA_r, gNMDA_d, gNMDA, gBABAa, gGABAb_r, gGABAb_d, gGABAb * rtd access stpu, stpx */ __global__ void kernel_STPUpdateAndDecayConductances (int t, int sec, int simTime) { const int totBuffers = loadBufferCount; for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... int2 threadLoad = getStaticThreadLoad(bufPos); int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x); int lastId = STATIC_LOAD_SIZE(threadLoad); int grpId = STATIC_LOAD_GROUP(threadLoad); // update the conductane parameter of the current neron if (networkConfigGPU.sim_with_conductances && IS_REGULAR_NEURON(nid, networkConfigGPU.numNReg, networkConfigGPU.numNPois)) { runtimeDataGPU.gAMPA[nid] *= networkConfigGPU.dAMPA; if (networkConfigGPU.sim_with_NMDA_rise) { runtimeDataGPU.gNMDA_r[nid] *= networkConfigGPU.rNMDA; runtimeDataGPU.gNMDA_d[nid] *= networkConfigGPU.dNMDA; } else { runtimeDataGPU.gNMDA[nid] *= networkConfigGPU.dNMDA; } runtimeDataGPU.gGABAa[nid] *= networkConfigGPU.dGABAa; if (networkConfigGPU.sim_with_GABAb_rise) { runtimeDataGPU.gGABAb_r[nid] *= networkConfigGPU.rGABAb; runtimeDataGPU.gGABAb_d[nid] *= networkConfigGPU.dGABAb; } else { runtimeDataGPU.gGABAb[nid] *= networkConfigGPU.dGABAb; } } if (groupConfigsGPU[grpId].WithSTP && (threadIdx.x < lastId) && (nid < networkConfigGPU.numN)) { int ind_plus = getSTPBufPos(nid, simTime); int ind_minus = getSTPBufPos(nid, (simTime-1)); // \FIXME sure? runtimeDataGPU.stpu[ind_plus] = runtimeDataGPU.stpu[ind_minus]*(1.0f-groupConfigsGPU[grpId].STP_tau_u_inv); runtimeDataGPU.stpx[ind_plus] = runtimeDataGPU.stpx[ind_minus] + (1.0f-runtimeDataGPU.stpx[ind_minus])*groupConfigsGPU[grpId].STP_tau_x_inv; } } } //********************************UPDATE SYNAPTIC WEIGHTS EVERY SECOND ************************************************************* /*! * \brief This kernel update synaptic weights * * This kernel is called every second to adjust the timingTable and globalFiringTable * We do the following thing: * 1. We discard all firing information that happened more than 1000-maxDelay_ time step. * 2. We move the firing information that happened in the last 1000-maxDelay_ time step to * the begining of the gloalFiringTable. * 3. We read each value of "wtChange" and update the value of "synaptic weights wt". * We also clip the "synaptic weight wt" to lie within the required range. */ __device__ void updateSynapticWeights(int nid, unsigned int synId, int grpId, float diff_firing, float homeostasisScale, float baseFiring, float avgTimeScaleInv) { // This function does not get called if the neuron group has all fixed weights. // t_twChange is adjusted by stdpScaleFactor based on frequency of weight updates (e.g., 10ms, 100ms, 1s) float t_wt = runtimeDataGPU.wt[synId]; float t_wtChange = runtimeDataGPU.wtChange[synId]; float t_effectiveWtChange = networkConfigGPU.stdpScaleFactor * t_wtChange; float t_maxWt = runtimeDataGPU.maxSynWt[synId]; switch (groupConfigsGPU[grpId].WithESTDPtype) { case STANDARD: if (groupConfigsGPU[grpId].WithHomeostasis) { // this factor is slow t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f); } else { t_wt += t_effectiveWtChange; } break; case DA_MOD: if (groupConfigsGPU[grpId].WithHomeostasis) { t_effectiveWtChange = runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange; t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f); } else { t_wt += runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange; } break; case UNKNOWN_STDP: default: // we shouldn't even be here if !WithSTDP break; } switch (groupConfigsGPU[grpId].WithISTDPtype) { case STANDARD: if (groupConfigsGPU[grpId].WithHomeostasis) { // this factor is slow t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f); } else { t_wt += t_effectiveWtChange; } break; case DA_MOD: if (groupConfigsGPU[grpId].WithHomeostasis) { t_effectiveWtChange = runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange; t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f + fabs(diff_firing)*50.0f); } else { t_wt += runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange; } break; case UNKNOWN_STDP: default: // we shouldn't even be here if !WithSTDP break; } // It's user's choice to decay weight change or not // see setWeightAndWeightChangeUpdate() t_wtChange *= networkConfigGPU.wtChangeDecay; // Check the synapse is excitatory or inhibitory first if (t_maxWt >= 0.0f) { // excitatory synapse if (t_wt >= t_maxWt) t_wt = t_maxWt; if (t_wt < 0.0f) t_wt = 0.0f; } else { // inhibitory synapse if (t_wt <= t_maxWt) t_wt = t_maxWt; if (t_wt > 0.0f) t_wt = 0.0f; } runtimeDataGPU.wt[synId] = t_wt; runtimeDataGPU.wtChange[synId] = t_wtChange; } #define UPWTS_CLUSTERING_SZ 32 /*! * \brief this kernel updates all synaptic weights * * net access: stdpScaleFactor, wtChangeDecay * grp access: homeostasisScale, avgTimeScaleInv, FixedInputWts, WithESTDPtype, WithISTDOtype, WithHomeostasis * rtd access: Npre_plastic, cumulativePre, avgFiring, baseFiringInv, baseFiring, wt, wtChange, maxSynWt * glb access: */ __global__ void kernel_updateWeights() { __shared__ volatile int errCode; __shared__ int startId, lastId, grpId, totBuffers, grpNCnt; __shared__ int2 threadLoad; // added for homeostasis __shared__ float homeostasisScale, avgTimeScaleInv; if(threadIdx.x == 0) { totBuffers = loadBufferCount; grpNCnt = (blockDim.x / UPWTS_CLUSTERING_SZ) + ((blockDim.x % UPWTS_CLUSTERING_SZ) != 0); } __syncthreads(); for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... // if ( threadIdx.x) { // TSC: this could be a performance bug, 127 threads other than the first thread try to read // threadLoad and wirte homeostatsisScale and avgTimeScaleInv at the same time if (threadIdx.x == 0) { threadLoad = getStaticThreadLoad(bufPos); startId = STATIC_LOAD_START(threadLoad); lastId = STATIC_LOAD_SIZE(threadLoad); grpId = STATIC_LOAD_GROUP(threadLoad); // load homestasis parameters if (groupConfigsGPU[grpId].WithHomeostasis) { homeostasisScale = groupConfigsGPU[grpId].homeostasisScale; avgTimeScaleInv = groupConfigsGPU[grpId].avgTimeScaleInv; } else { homeostasisScale = 0.0f; avgTimeScaleInv = 1.0f; } } __syncthreads(); // the weights are fixed for this group.. so dont make any changes on // the weight and continue to the next set of neurons... if (groupConfigsGPU[grpId].FixedInputWts) continue; int nid = (threadIdx.x / UPWTS_CLUSTERING_SZ) + startId; // update the synaptic weights from the synaptic weight derivatives for(; nid < startId + lastId; nid += grpNCnt) { int Npre_plastic = runtimeDataGPU.Npre_plastic[nid]; unsigned int cumulativePre = runtimeDataGPU.cumulativePre[nid]; float diff_firing = 0.0f; float baseFiring = 0.0f; if (groupConfigsGPU[grpId].WithHomeostasis) { diff_firing = (1.0f - runtimeDataGPU.avgFiring[nid] * runtimeDataGPU.baseFiringInv[nid]); baseFiring = runtimeDataGPU.baseFiring[nid]; } const int threadIdGrp = (threadIdx.x % UPWTS_CLUSTERING_SZ); // use 32 threads to update 32 synapses parallely for(unsigned int synIdOffset = cumulativePre; synIdOffset < cumulativePre + Npre_plastic; synIdOffset += UPWTS_CLUSTERING_SZ) { //excitatory connection change the synaptic weights unsigned int synId = synIdOffset + threadIdGrp; if(synId < cumulativePre + Npre_plastic) { updateSynapticWeights(nid, synId, grpId, diff_firing, homeostasisScale, baseFiring, avgTimeScaleInv); } } } } } //********************************UPDATE TABLES AND COUNTERS EVERY SECOND ************************************************************* /*! * \brief This kernel shift the un-processed firing information in firingTableD2 to the beginning of * firingTableD2 for the next second of simulation. * * net access: maxDelay * grp access: N/A * rtd access: firingTableD2 * glb access: timeTableD2GPU */ __global__ void kernel_shiftFiringTable() { int gnthreads = blockDim.x * gridDim.x; for(int p = timeTableD2GPU[999], k = 0; p < timeTableD2GPU[999 + networkConfigGPU.maxDelay + 1]; p += gnthreads, k += gnthreads) { if ((p + threadIdx.x) < timeTableD2GPU[999 + networkConfigGPU.maxDelay + 1]) runtimeDataGPU.firingTableD2[k + threadIdx.x] = runtimeDataGPU.firingTableD2[p + threadIdx.x]; } } /*! * \brief This kernel shift the un-processed firing information in timeTableD1(D2)GPU to the beginning of * timeTableD1(D2)GPU for the next second of simulation. * * After all the threads/blocks had adjusted the firingTableD1(D2)GPU, we update the timeTableD1(D2)GPU * so that the firing information that happended in the last maxDelay_ time step would become * the first maxDelay_ time step firing information for the next second of simulation. * We also reset/update all spike counters to appropriate values as indicated in the second part * of this kernel. */ __global__ void kernel_shiftTimeTable() { int maxDelay = networkConfigGPU.maxDelay; if(blockIdx.x == 0) { for(int i = threadIdx.x; i < maxDelay; i += blockDim.x) { // use i+1 instead of just i because timeTableD2GPU[0] should always be 0 timeTableD2GPU[i + 1] = timeTableD2GPU[1000 + i + 1] - timeTableD2GPU[1000]; timeTableD1GPU[i + 1] = timeTableD1GPU[1000 + i + 1] - timeTableD1GPU[1000]; } } __syncthreads(); // reset various counters for the firing information if((blockIdx.x == 0) && (threadIdx.x == 0)) { timeTableD1GPU[maxDelay] = 0; spikeCountD2GPU += spikeCountD2SecGPU; spikeCountD1GPU += spikeCountD1SecGPU; spikeCountD2SecGPU = 0; spikeCountD1SecGPU = 0; spikeCountExtRxD2SecGPU = 0; spikeCountExtRxD1SecGPU = 0; spikeCountLastSecLeftD2GPU = timeTableD2GPU[maxDelay]; secD2fireCntTest = timeTableD2GPU[maxDelay]; secD1fireCntTest = 0; } } //****************************** GENERATE POST-SYNAPTIC CURRENT EVERY TIME-STEP **************************** /* * The sequence of handling an post synaptic spike in GPU mode: * P1. Update synSpikeTime * P2. Update DA,5HT,ACh,NE accordingly * P3. Update STDP wtChange * P4. Load wt into change (temporary variable) * P5. Modulate change by STP (if enabled) * P6-1. Modulate change by d_mulSynSlow and d_mulSynFast * P6-2. Accumulate g(AMPA,NMDA,GABAa,GABAb) or current * P7. Update v(voltage), u(recovery) * P8. Update homeostasis * P9. Decay and log DA,5HT,ACh,NE */ __device__ void generatePostSynapticSpike(int simTime, int preNId, int postNId, int synId) { // get the actual position of the synapses and other variables... unsigned int pos = runtimeDataGPU.cumulativePre[postNId] + synId; short int preGrpId = runtimeDataGPU.grpIds[preNId]; // STP uses preGrpId short int postGrpId = runtimeDataGPU.grpIds[postNId]; // STDP uses postGrpId setFiringBitSynapses(postNId, synId); // P1 runtimeDataGPU.synSpikeTime[pos] = simTime; //uncoalesced access // P2 // Got one spike from dopaminergic neuron, increase dopamine concentration in the target area if (groupConfigsGPU[preGrpId].Type & TARGET_DA) { atomicAdd(&(runtimeDataGPU.grpDA[postGrpId]), 0.04f); } // P3 // STDP calculation: the post-synaptic neuron fires before the arrival of pre-synaptic neuron's spike if (groupConfigsGPU[postGrpId].WithSTDP && !networkConfigGPU.sim_in_testing) { int stdp_tDiff = simTime - runtimeDataGPU.lastSpikeTime[postNId]; if (stdp_tDiff >= 0) { if (groupConfigsGPU[postGrpId].WithESTDP) { // Handle E-STDP curves switch (groupConfigsGPU[postGrpId].WithESTDPcurve) { case EXP_CURVE: // exponential curve case TIMING_BASED_CURVE: // sc curve if (stdp_tDiff * groupConfigsGPU[postGrpId].TAU_MINUS_INV_EXC < 25.0f) runtimeDataGPU.wtChange[pos] += STDP(stdp_tDiff, groupConfigsGPU[postGrpId].ALPHA_MINUS_EXC, groupConfigsGPU[postGrpId].TAU_MINUS_INV_EXC); // uncoalesced access break; default: break; } } if (groupConfigsGPU[postGrpId].WithISTDP) { // Handle I-STDP curves switch (groupConfigsGPU[postGrpId].WithISTDPcurve) { case EXP_CURVE: // exponential curve if ((stdp_tDiff * groupConfigsGPU[postGrpId].TAU_MINUS_INV_INB) < 25.0f) { // LTD of inhibitory syanpse, which increase synapse weight runtimeDataGPU.wtChange[pos] -= STDP(stdp_tDiff, groupConfigsGPU[postGrpId].ALPHA_MINUS_INB, groupConfigsGPU[postGrpId].TAU_MINUS_INV_INB); } break; case PULSE_CURVE: // pulse curve if (stdp_tDiff <= groupConfigsGPU[postGrpId].LAMBDA) { // LTP of inhibitory synapse, which decreases synapse weight runtimeDataGPU.wtChange[pos] -= groupConfigsGPU[postGrpId].BETA_LTP; } else if (stdp_tDiff <= groupConfigsGPU[postGrpId].DELTA) { // LTD of inhibitory syanpse, which increase synapse weight runtimeDataGPU.wtChange[pos] -= groupConfigsGPU[postGrpId].BETA_LTD; } break; default: break; } } } } } #define READ_CHUNK_SZ 64 /*! * \brief This kernel updates and generates spikes for delays greater than 1 from the fired neuron. * * The LTD computation is also executed by this kernel. * * net access: maxDelay, I_setPitch, sim_in_testing * grp access: Type, WithSTDP, WithESTDP, WithESTDPcurve, WithISDP, WithISTDPcurve, all STDP parameters * rtd access: firingTableD2, cumulativePost, postDelayInfo, postSynapticIds, cumulativePre, grpIds, * grpDA, I_set, (W)synSpikeTime, (R)lastSpikeTime, wtChange * glb access: spikeCountD2SecGPU, timeTableD2GPU_tex, timeTableD2GPU_tex_offset */ __global__ void kernel_doCurrentUpdateD2(int simTimeMs, int simTimeSec, int simTime) { __shared__ volatile int sh_neuronOffsetTable[READ_CHUNK_SZ + 2]; __shared__ int sh_delayLength[READ_CHUNK_SZ + 2]; __shared__ int sh_delayIndexStart[READ_CHUNK_SZ + 2]; __shared__ int sh_firingId[READ_CHUNK_SZ + 2]; __shared__ volatile int sh_NeuronCnt; const int threadIdWarp = (threadIdx.x % WARP_SIZE); const int warpId = (threadIdx.x / WARP_SIZE); // this variable is used to record the // number of updates done by different blocks if(threadIdx.x<=0) { sh_NeuronCnt = 0; } __syncthreads(); // stores the number of fired neurons at time t int k = tex1Dfetch(timeTableD2GPU_tex, simTimeMs + networkConfigGPU.maxDelay + 1 + timeTableD2GPU_tex_offset) - 1; // stores the number of fired neurons at time (t - maxDelay_) int k_end = tex1Dfetch(timeTableD2GPU_tex, simTimeMs + 1 + timeTableD2GPU_tex_offset); int t_pos = simTimeMs; // we need to read (k-k_end) neurons from the firing // table and do necesary updates for all these post-synaptic // connection in these neurons.. while ((k >= k_end) && (k >= 0)) { // at any point of time EXCIT_READ_CHUNK_SZ neurons // read different firing id from the firing table if (threadIdx.x < READ_CHUNK_SZ) { // use 64 threads int fPos = k - (READ_CHUNK_SZ * blockIdx.x) - threadIdx.x; if ((fPos >= 0) && (fPos >= k_end)) { // get the neuron nid here.... //int val = runtimeDataGPU.firingTableD2[fPos]; //int nid = GET_FIRING_TABLE_NID(val); int nid = runtimeDataGPU.firingTableD2[fPos]; // find the time of firing based on the firing number fPos while (!((fPos >= tex1Dfetch(timeTableD2GPU_tex, t_pos + networkConfigGPU.maxDelay + timeTableD2GPU_tex_offset)) && (fPos < tex1Dfetch(timeTableD2GPU_tex, t_pos + networkConfigGPU.maxDelay + 1 + timeTableD2GPU_tex_offset)))) { t_pos--; } // find the time difference between firing of the neuron and the current time int tD = simTimeMs - t_pos; // find the various delay parameters for neuron 'nid', with a delay of 'tD' //sh_axonDelay[threadIdx.x] = tD; int tPos = (networkConfigGPU.maxDelay + 1) * nid + tD; //sh_firingId[threadIdx.x] = val; sh_firingId[threadIdx.x] = nid; sh_neuronOffsetTable[threadIdx.x] = runtimeDataGPU.cumulativePost[nid]; sh_delayLength[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_length; sh_delayIndexStart[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_index_start; // This is to indicate that the current thread // has a valid delay parameter for post-synaptic firing generation atomicAdd((int*)&sh_NeuronCnt, 1); } } __syncthreads(); // if cnt is zero than no more neurons need to generate // post-synaptic firing, then we break the loop. if (sh_NeuronCnt == 0) { break; } // first WARP_SIZE threads the post synaptic // firing for first neuron, and so on. each of this group // needs to generate (numPostSynapses/maxDelay_) spikes for every fired neuron, every second // for numPostSynapses=500,maxDelay_=20, we need to generate 25 spikes for each fired neuron // for numPostSynapses=600,maxDelay_=20, we need to generate 30 spikes for each fired neuron for (int pos = warpId; pos < sh_NeuronCnt; pos += (NUM_THREADS / WARP_SIZE)) { int delId = threadIdWarp; while (delId < sh_delayLength[pos]) { // get the post synaptic information for specific delay SynInfo postInfo = runtimeDataGPU.postSynapticIds[sh_neuronOffsetTable[pos] + sh_delayIndexStart[pos] + delId]; int postNId = GET_CONN_NEURON_ID(postInfo); // get post-neuron id int synId = GET_CONN_SYN_ID(postInfo); // get synaptic id if (postNId < networkConfigGPU.numN) // test if post-neuron is a local neuron generatePostSynapticSpike(simTime, sh_firingId[pos] /* preNId */, postNId, synId); delId += WARP_SIZE; } } //(for all excitory neurons in table) __syncthreads(); if(threadIdx.x == 0) { sh_NeuronCnt = 0; } k = k - (gridDim.x * READ_CHUNK_SZ); __syncthreads(); } __syncthreads(); } /*! * \brief This kernel updating and generating spikes on connections with a delay of 1ms from the fired neuron. * * This function looks mostly like kernel_doCurrentUpdateD2() but has been optimized for a fixed delay of 1ms. * Ultimately we may merge this kernel with the kernel_doCurrentUpdateD2(). * The LTD computation is also executed by this kernel. * * net access: maxDelay, I_setPitch, sim_in_testing * grp access: Type, grpDA, WithSTDP, WithESTDP, WithISTDP, WithESTDPcurve, WithISTDPcurve, all STDP parameters * rtd access: postSynapticIds, cumulativePre, grpIds, I_set, wtChange, (R)lastSpikeTime, (W)synSpikeTime * glb access: timeTableD1GPU, spikeCountD1SecGPU, firingTableD1 */ __global__ void kernel_doCurrentUpdateD1(int simTimeMs, int simTimeSec, int simTime) { __shared__ volatile int sh_NeuronCnt; __shared__ volatile int sh_neuronOffsetTable[NUM_THREADS / WARP_SIZE + 2]; __shared__ int sh_delayLength[NUM_THREADS / WARP_SIZE + 2]; __shared__ int sh_firingId[NUM_THREADS / WARP_SIZE + 2]; __shared__ int sh_delayIndexStart[NUM_THREADS / WARP_SIZE + 2]; __shared__ int sh_timing; __shared__ int kPosEnd; const int warpId = threadIdx.x / WARP_SIZE; // warp id const int numWarps = blockDim.x / WARP_SIZE; // number of warp const int threadIdWarp = threadIdx.x % WARP_SIZE; // thread id within a warp // load the time table for neuron firing if (threadIdx.x == 0) { sh_timing = timeTableD1GPU[simTimeMs + networkConfigGPU.maxDelay]; // number of fired neurons at simTimeMs - 1 kPosEnd = timeTableD1GPU[simTimeMs + networkConfigGPU.maxDelay + 1]; // number of fired neurons at simTimeMs, which is equal to spikeCountD1SecGPU } __syncthreads(); int kPos = sh_timing + (blockIdx.x * numWarps); __syncthreads(); // Do current update as long as we have some valid neuron while ((kPos >= 0) && (kPos < kPosEnd)) { int fPos = -1; // a group of threads (4 threads) loads the delay information if (threadIdx.x < numWarps) { sh_neuronOffsetTable[threadIdx.x] = -1; fPos = kPos + threadIdx.x; // find the neuron nid and also delay information from fPos if ((fPos >= 0) && (fPos < kPosEnd)) { atomicAdd((int*)&sh_NeuronCnt, 1); //int val = runtimeDataGPU.firingTableD1[fPos]; //int nid = GET_FIRING_TABLE_NID(val); int nid = runtimeDataGPU.firingTableD1[fPos]; int tPos = (networkConfigGPU.maxDelay + 1) * nid; //sh_firingId[threadIdx.x] = val; sh_firingId[threadIdx.x] = nid; sh_neuronOffsetTable[threadIdx.x] = runtimeDataGPU.cumulativePost[nid]; sh_delayLength[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_length; sh_delayIndexStart[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_index_start; } } __syncthreads(); // no more fired neuron from table... we just break from loop if (sh_NeuronCnt == 0) { break; } __syncthreads(); int offset = sh_neuronOffsetTable[warpId]; if (threadIdx.x == 0) { sh_NeuronCnt = 0; } // 32 threads for generatePostSynapticSpike() if (offset >= 0) { int delId = threadIdWarp; while (delId < sh_delayLength[warpId]) { // get the post synaptic information for specific delay SynInfo postInfo = runtimeDataGPU.postSynapticIds[offset + sh_delayIndexStart[warpId] + delId]; int postNId = GET_CONN_NEURON_ID(postInfo); // get post-neuron id int synId = GET_CONN_SYN_ID(postInfo); // get synaptic id if (postNId < networkConfigGPU.numN) // test if post-neuron is a local neuron generatePostSynapticSpike(simTime, sh_firingId[warpId] /* preNId */, postNId, synId); delId += WARP_SIZE; } } __syncthreads(); kPos = kPos + (gridDim.x * numWarps); } } __global__ void kernel_convertExtSpikesD2(int startIdx, int endIdx, int GtoLOffset) { int firingTableIdx = startIdx + blockIdx.x * blockDim.x + threadIdx.x; int spikeCountExtRx = endIdx - startIdx; // received external spike count if (threadIdx.x == 0 && blockIdx.x == 0) { secD2fireCntTest += spikeCountExtRx; spikeCountD2SecGPU += spikeCountExtRx; spikeCountExtRxD2GPU += spikeCountExtRx; spikeCountExtRxD2SecGPU += spikeCountExtRx; } // FIXME: if endIdx - startIdx > 64 * 128 if (firingTableIdx < endIdx) runtimeDataGPU.firingTableD2[firingTableIdx] += GtoLOffset; } __global__ void kernel_convertExtSpikesD1(int startIdx, int endIdx, int GtoLOffset) { int firingTableIdx = startIdx + blockIdx.x * blockDim.x + threadIdx.x; int spikeCountExtRx = endIdx - startIdx; // received external spike count if (threadIdx.x == 0 && blockIdx.x == 0) { secD1fireCntTest += spikeCountExtRx; spikeCountD1SecGPU += spikeCountExtRx; spikeCountExtRxD1GPU += spikeCountExtRx; spikeCountExtRxD1SecGPU += spikeCountExtRx; } // FIXME: if endIdx - startIdx > 64 * 128 if (firingTableIdx < endIdx) runtimeDataGPU.firingTableD1[firingTableIdx] += GtoLOffset; } /*! * \brief this function allocates device (GPU) memory sapce and copies information of pre-connections to it * * This function: * initialize Npre_plasticInv * (allocate and) copy Npre, Npre_plastic, Npre_plasticInv, cumulativePre, preSynapticIds * (allocate and) copy Npost, cumulativePost, postSynapticIds, postDelayInfo * * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copying * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU * \since v4.0 */ void SNN::copyPreConnectionInfo(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated.. int lengthN, lengthSyn, posN, posSyn; if (lGrpId == ALL) { lengthN = networkConfigs[netId].numNAssigned; posN = 0; } else { lengthN = groupConfigs[netId][lGrpId].numN; posN = groupConfigs[netId][lGrpId].lStartN; } // connection synaptic lengths and cumulative lengths... if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Npre, sizeof(short) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Npre[posN], &src->Npre[posN], sizeof(short) * lengthN, kind)); // we don't need these data structures if the network doesn't have any plastic synapses at all if (!sim_with_fixedwts) { // presyn excitatory connections if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Npre_plastic, sizeof(short) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Npre_plastic[posN], &src->Npre_plastic[posN], sizeof(short) * lengthN, kind)); // Npre_plasticInv is only used on GPUs, only allocate and copy it during initialization if(allocateMem) { float* Npre_plasticInv = new float[networkConfigs[netId].numNAssigned]; for (int i = 0; i < networkConfigs[netId].numNAssigned; i++) Npre_plasticInv[i] = 1.0f / managerRuntimeData.Npre_plastic[i]; CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Npre_plasticInv, sizeof(float) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(hipMemcpy(dest->Npre_plasticInv, Npre_plasticInv, sizeof(float) * networkConfigs[netId].numNAssigned, kind)); delete[] Npre_plasticInv; } } // beginning position for the pre-synaptic information if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->cumulativePre, sizeof(int) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->cumulativePre[posN], &src->cumulativePre[posN], sizeof(int) * lengthN, kind)); // Npre, cumulativePre has been copied to destination if (lGrpId == ALL) { lengthSyn = networkConfigs[netId].numPreSynNet; posSyn = 0; } else { lengthSyn = 0; for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) lengthSyn += dest->Npre[lNId]; posSyn = dest->cumulativePre[groupConfigs[netId][lGrpId].lStartN]; } if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->preSynapticIds, sizeof(SynInfo) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->preSynapticIds[posSyn], &src->preSynapticIds[posSyn], sizeof(SynInfo) * lengthSyn, kind)); } /*! * \brief this function allocates device (GPU) memory sapce and copies information of post-connections to it * * This function: * (allocate and) copy Npost, cumulativePost, postSynapticIds, postDelayInfo * * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copying * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU * \since v4.0 */ void SNN::copyPostConnectionInfo(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated.. int lengthN, lengthSyn, posN, posSyn; if (lGrpId == ALL) { lengthN = networkConfigs[netId].numNAssigned; posN = 0; } else { lengthN = groupConfigs[netId][lGrpId].numN; posN = groupConfigs[netId][lGrpId].lStartN; } // number of postsynaptic connections if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Npost, sizeof(short) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Npost[posN], &src->Npost[posN], sizeof(short) * lengthN, kind)); // beginning position for the post-synaptic information if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->cumulativePost, sizeof(int) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->cumulativePost[posN], &src->cumulativePost[posN], sizeof(int) * lengthN, kind)); // Npost, cumulativePost has been copied to destination if (lGrpId == ALL) { lengthSyn = networkConfigs[netId].numPostSynNet; posSyn = 0; } else { lengthSyn = 0; for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) lengthSyn += dest->Npost[lNId]; posSyn = dest->cumulativePost[groupConfigs[netId][lGrpId].lStartN]; } // actual post synaptic connection information... if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->postSynapticIds, sizeof(SynInfo) * networkConfigs[netId].numPostSynNet)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->postSynapticIds[posSyn], &src->postSynapticIds[posSyn], sizeof(SynInfo) * lengthSyn, kind)); // static specific mapping and actual post-synaptic delay metric if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->postDelayInfo, sizeof(DelayInfo) * networkConfigs[netId].numNAssigned * (glbNetworkConfig.maxDelay + 1))); CUDA_CHECK_ERRORS(hipMemcpy(&dest->postDelayInfo[posN * (glbNetworkConfig.maxDelay + 1)], &src->postDelayInfo[posN * (glbNetworkConfig.maxDelay + 1)], sizeof(DelayInfo) * lengthN * (glbNetworkConfig.maxDelay + 1), kind)); } void SNN::checkDestSrcPtrs(RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int lGrpId, int destOffset) { // source should always be allocated assert(src->allocated); if(kind == hipMemcpyHostToDevice) { assert(src->memType == CPU_MEM); assert(dest->memType == GPU_MEM); if (allocateMem) { assert(!dest->allocated); // if allocateMem = true, then the destination must be empty without allocation. assert(lGrpId == ALL); // if allocateMem = true, then we should not specify any specific group. } else { assert(dest->allocated); // if allocateMem = false, then the destination must be allocated. } assert(destOffset == 0); // H-to-D only allows local-to-local copy } else if (kind == hipMemcpyDeviceToHost) { assert(src->memType == GPU_MEM); assert(dest->memType == CPU_MEM); assert(dest->allocated); if (lGrpId == ALL) assert(destOffset == 0); // if copy all content, only local-to-local is allowed } else { KERNEL_ERROR("Wrong Host-Device copy direction"); exitSimulation(1); } } /*! * \brief this function allocates device (GPU) memory sapce and copies AMPA conductance to it * * This function: * (allocate and) copy gAMPA * * This funcion is called by copyNeuronState() and fetchConductanceAMPA(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copy * \param[in] destOffset the offset of data destination, which is used in local-to-global copy * * \sa copyNeuronState fetchConductanceAMPA * \since v3.0 */ void SNN::copyConductanceAMPA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int destOffset) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated.. assert(isSimulationWithCOBA()); int ptrPos, length; if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } assert(length <= networkConfigs[netId].numNReg); assert(length > 0); //conductance information assert(src->gAMPA != NULL); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->gAMPA, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->gAMPA[ptrPos + destOffset], &src->gAMPA[ptrPos], sizeof(float) * length, kind)); } /*! * \brief this function allocates device (GPU) memory sapce and copies NMDA conductance to it * * This function: * (allocate and) copy gNMDA, gNMDA_r, gNMDA_d * * This funcion is called by copyNeuronState() and fetchConductanceNMDA(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copy * \param[in] destOffset the offset of data destination, which is used in local-to-global copy * * \sa copyNeuronState fetchConductanceNMDA * \since v3.0 */ void SNN::copyConductanceNMDA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int destOffset) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated.. assert(isSimulationWithCOBA()); int ptrPos, length; if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } assert(length <= networkConfigs[netId].numNReg); assert(length > 0); if (isSimulationWithNMDARise()) { assert(src->gNMDA_r != NULL); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->gNMDA_r, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->gNMDA_r[ptrPos], &src->gNMDA_r[ptrPos], sizeof(float) * length, kind)); assert(src->gNMDA_d != NULL); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->gNMDA_d, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->gNMDA_d[ptrPos], &src->gNMDA_d[ptrPos], sizeof(float) * length, kind)); } else { assert(src->gNMDA != NULL); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->gNMDA, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->gNMDA[ptrPos + destOffset], &src->gNMDA[ptrPos], sizeof(float) * length, kind)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies GABAa conductance to it * * This function: * (allocate and) copy gGABAa * * This funcion is called by copyNeuronState() and fetchConductanceGABAa(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copy * \param[in] destOffset the offset of data destination, which is used in local-to-global copy * * \sa copyNeuronState fetchConductanceGABAa * \since v3.0 */ void SNN::copyConductanceGABAa(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int destOffset) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset); // check that the destination pointer is properly allocated.. assert(isSimulationWithCOBA()); int ptrPos, length; if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } assert(length <= networkConfigs[netId].numNReg); assert(length > 0); assert(src->gGABAa != NULL); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->gGABAa, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->gGABAa[ptrPos + destOffset], &src->gGABAa[ptrPos], sizeof(float) * length, kind)); } /*! * \brief this function allocates device (GPU) memory sapce and copies GABAb conductance to it * * This function: * (allocate and) copy gGABAb, gGABAb_r, gGABAb_d * * This funcion is called by copyNeuronState() and fetchConductanceGABAb(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copy * \param[in] destOffset the offset of data destination, which is used in local-to-global copy * * \sa copyNeuronState fetchConductanceGABAb * \since v3.0 */ void SNN::copyConductanceGABAb(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int destOffset) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset); // check that the destination pointer is properly allocated.. assert(isSimulationWithCOBA()); int ptrPos, length; if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } assert(length <= networkConfigs[netId].numNReg); assert(length > 0); if (isSimulationWithGABAbRise()) { assert(src->gGABAb_r != NULL); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->gGABAb_r, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->gGABAb_r[ptrPos], &src->gGABAb_r[ptrPos], sizeof(float) * length, kind)); assert(src->gGABAb_d != NULL); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->gGABAb_d, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->gGABAb_d[ptrPos], &src->gGABAb_d[ptrPos], sizeof(float) * length, kind)); } else { assert(src->gGABAb != NULL); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->gGABAb, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->gGABAb[ptrPos + destOffset], &src->gGABAb[ptrPos], sizeof(float) * length, kind)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies variables related to nueron state to it * * This function: * (allocate and) copy voltage, recovery, current, avgFiring * * This funcion is called by allocateSNN_GPU(). Only copying from host to device is required * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU fetchNeuronState * \since v3.0 */ void SNN::copyNeuronState(int netId, int lGrpId, RuntimeData* dest, hipMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated.. assert(kind == hipMemcpyHostToDevice); int ptrPos, length; if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } assert(length <= networkConfigs[netId].numNReg); if (length == 0) return; if(!allocateMem && groupConfigs[netId][lGrpId].Type & POISSON_NEURON) return; if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->recovery, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->recovery[ptrPos], &managerRuntimeData.recovery[ptrPos], sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->voltage, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->voltage[ptrPos], &managerRuntimeData.voltage[ptrPos], sizeof(float) * length, hipMemcpyHostToDevice)); if (allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->nextVoltage, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->nextVoltage[ptrPos], &managerRuntimeData.nextVoltage[ptrPos], sizeof(float) * length, hipMemcpyHostToDevice)); //neuron input current... if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->current, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->current[ptrPos], &managerRuntimeData.current[ptrPos], sizeof(float) * length, hipMemcpyHostToDevice)); if (sim_with_conductances) { //conductance information copyConductanceAMPA(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, 0); copyConductanceNMDA(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, 0); copyConductanceGABAa(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, 0); copyConductanceGABAb(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, 0); } // copying external current needs to be done separately because setExternalCurrent needs to call it, too // do it only from host to device copyExternalCurrent(netId, lGrpId, dest, hipMemcpyHostToDevice, allocateMem); if (allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->curSpike, sizeof(bool) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->curSpike[ptrPos], &managerRuntimeData.curSpike[ptrPos], sizeof(bool) * length, hipMemcpyHostToDevice)); copyNeuronParameters(netId, lGrpId, dest, hipMemcpyHostToDevice, allocateMem); if (networkConfigs[netId].sim_with_nm) copyNeuronStateBuffer(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem); if (sim_with_homeostasis) { //Included to enable homeostasis in GPU_MODE. // Avg. Firing... if (allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->avgFiring, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->avgFiring[ptrPos], &managerRuntimeData.avgFiring[ptrPos], sizeof(float) * length, hipMemcpyHostToDevice)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies the spike count of each neuron to it * * This function: * (allocate and) copy nSpikeCnt * * This funcion is called by copyAuxiliaryData() and fetchNeuronSpikeCount(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copy * \param[in] destOffset the offset of data destination, which is used in local-to-global copy * * \sa copyAuxiliaryData fetchNeuronSpikeCount * \since v4.0 */ void SNN::copyNeuronSpikeCount(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem, int destOffset) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated.. int posN, lengthN; if(lGrpId == ALL) { posN = 0; lengthN = networkConfigs[netId].numN; } else { posN = groupConfigs[netId][lGrpId].lStartN; lengthN = groupConfigs[netId][lGrpId].numN; } assert(lengthN > 0 && lengthN <= networkConfigs[netId].numN); // spike count information if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->nSpikeCnt, sizeof(int) * lengthN)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->nSpikeCnt[posN + destOffset], &src->nSpikeCnt[posN], sizeof(int) * lengthN, kind)); } // FIXME: move grpDA(5HT, ACh, NE)Buffer to copyAuxiliaryData /*! * \brief this function allocates device (GPU) memory sapce and copies variables related to group state to it * * This function: * (allocate and) copy grpDA, grp5HT, grpACh, grpNE, grpDABuffer, grp5HTBuffer, grpAChBuffer, grpNEBuffer * * This funcion is called by allocateSNN_GPU() and fetchGroupState(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copying * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU fetchGroupState * \since v3.0 */ void SNN::copyGroupState(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated.. if (allocateMem) { assert(dest->memType == GPU_MEM && !dest->allocated); CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpDA, sizeof(float) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grp5HT, sizeof(float) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpACh, sizeof(float) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpNE, sizeof(float) * networkConfigs[netId].numGroups)); } CUDA_CHECK_ERRORS(hipMemcpy(dest->grpDA, src->grpDA, sizeof(float) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(hipMemcpy(dest->grp5HT, src->grp5HT, sizeof(float) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(hipMemcpy(dest->grpACh, src->grpACh, sizeof(float) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(hipMemcpy(dest->grpNE, src->grpNE, sizeof(float) * networkConfigs[netId].numGroups, kind)); if (lGrpId < 0) { if (allocateMem) { assert(dest->memType == GPU_MEM && !dest->allocated); CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpDABuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grp5HTBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpAChBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMalloc((void**) &dest->grpNEBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups)); } CUDA_CHECK_ERRORS(hipMemcpy(dest->grpDABuffer, src->grpDABuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(hipMemcpy(dest->grp5HTBuffer, src->grp5HTBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(hipMemcpy(dest->grpAChBuffer, src->grpAChBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(hipMemcpy(dest->grpNEBuffer, src->grpNEBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind)); } else { assert(!allocateMem); CUDA_CHECK_ERRORS(hipMemcpy(&dest->grpDABuffer[lGrpId * 1000], &src->grpDABuffer[lGrpId * 1000], sizeof(float) * 1000, kind)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->grp5HTBuffer[lGrpId * 1000], &src->grp5HTBuffer[lGrpId * 1000], sizeof(float) * 1000, kind)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->grpAChBuffer[lGrpId * 1000], &src->grpAChBuffer[lGrpId * 1000], sizeof(float) * 1000, kind)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->grpNEBuffer[lGrpId * 1000], &src->grpNEBuffer[lGrpId * 1000], sizeof(float) * 1000, kind)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies neural parameters to it * * This function: * (allocate and) copy Izh_a, Izh_b, Izh_c, Izh_d * initialize baseFiringInv * (allocate and) copy baseFiring, baseFiringInv * * This funcion is only called by copyNeuronState(). Only copying direction from host to device is required. * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa copyNeuronState * \since v3.0 */ void SNN::copyNeuronParameters(int netId, int lGrpId, RuntimeData* dest, hipMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); assert(kind == hipMemcpyHostToDevice); int ptrPos, length; // check that the destination pointer is properly allocated.. checkDestSrcPtrs(dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated.. // cannot use checkDestSrcPtrs here because src pointer would be NULL if (dest->allocated && allocateMem) { KERNEL_ERROR("GPU Memory already allocated..."); exitSimulation(1); } // when allocating we are allocating the memory.. we need to do it completely... to avoid memory fragmentation.. if (allocateMem) { assert(lGrpId == ALL); assert(dest->Izh_a == NULL); assert(dest->Izh_b == NULL); assert(dest->Izh_c == NULL); assert(dest->Izh_d == NULL); assert(dest->Izh_C == NULL); assert(dest->Izh_k == NULL); assert(dest->Izh_vr == NULL); assert(dest->Izh_vt == NULL); assert(dest->Izh_vpeak == NULL); assert(dest->lif_tau_m == NULL); //LIF parameters assert(dest->lif_tau_ref == NULL); assert(dest->lif_tau_ref_c == NULL); assert(dest->lif_vTh == NULL); assert(dest->lif_vReset == NULL); assert(dest->lif_gain == NULL); assert(dest->lif_bias == NULL); } if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_a, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_a[ptrPos], &(managerRuntimeData.Izh_a[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_b, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_b[ptrPos], &(managerRuntimeData.Izh_b[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_c, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_c[ptrPos], &(managerRuntimeData.Izh_c[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_d, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_d[ptrPos], &(managerRuntimeData.Izh_d[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_C, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_C[ptrPos], &(managerRuntimeData.Izh_C[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_k, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_k[ptrPos], &(managerRuntimeData.Izh_k[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_vr, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_vr[ptrPos], &(managerRuntimeData.Izh_vr[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_vt, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_vt[ptrPos], &(managerRuntimeData.Izh_vt[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->Izh_vpeak, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->Izh_vpeak[ptrPos], &(managerRuntimeData.Izh_vpeak[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); //LIF parameters if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->lif_tau_m, sizeof(int) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->lif_tau_m[ptrPos], &(managerRuntimeData.lif_tau_m[ptrPos]), sizeof(int) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->lif_tau_ref, sizeof(int) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->lif_tau_ref[ptrPos], &(managerRuntimeData.lif_tau_ref[ptrPos]), sizeof(int) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->lif_tau_ref_c, sizeof(int) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->lif_tau_ref_c[ptrPos], &(managerRuntimeData.lif_tau_ref_c[ptrPos]), sizeof(int) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->lif_vTh, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->lif_vTh[ptrPos], &(managerRuntimeData.lif_vTh[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->lif_vReset, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->lif_vReset[ptrPos], &(managerRuntimeData.lif_vReset[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->lif_gain, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->lif_gain[ptrPos], &(managerRuntimeData.lif_gain[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->lif_bias, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->lif_bias[ptrPos], &(managerRuntimeData.lif_bias[ptrPos]), sizeof(float) * length, hipMemcpyHostToDevice)); // pre-compute baseFiringInv for fast computation on GPUs. if (sim_with_homeostasis) { float* baseFiringInv = new float[length]; for(int nid = 0; nid < length; nid++) { if (managerRuntimeData.baseFiring[nid] != 0.0f) baseFiringInv[nid] = 1.0f / managerRuntimeData.baseFiring[ptrPos + nid]; else baseFiringInv[nid] = 0.0; } if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->baseFiringInv, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->baseFiringInv[ptrPos], baseFiringInv, sizeof(float) * length, hipMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->baseFiring, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->baseFiring[ptrPos], managerRuntimeData.baseFiring, sizeof(float) * length, hipMemcpyHostToDevice)); delete [] baseFiringInv; } } /*! * \brief this function allocates device (GPU) memory sapce and copies short-term plasticity (STP) state to it * * This function: * initialize STP_Pitch * (allocate and) copy stpu, stpx * * This funcion is called by allocateSNN_GPU() and fetchSTPState(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copying * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU fetchSTPState * \since v3.0 */ void SNN::copySTPState(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated.. // STP feature is optional, do addtional check for memory space if(allocateMem) { assert(dest->stpu == NULL); assert(dest->stpx == NULL); } else { assert(dest->stpu != NULL); assert(dest->stpx != NULL); } assert(src->stpu != NULL); assert(src->stpx != NULL); size_t STP_Pitch; size_t widthInBytes = sizeof(float) * networkConfigs[netId].numN; // if(allocateMem) CUDA_CHECK_ERRORS( hipMalloc( (void**) &dest->stpu, sizeof(float)*networkConfigs[0].numN)); // CUDA_CHECK_ERRORS( hipMemcpy( &dest->stpu[0], &src->stpu[0], sizeof(float)*networkConfigs[0].numN, kind)); // if(allocateMem) CUDA_CHECK_ERRORS( hipMalloc( (void**) &dest->stpx, sizeof(float)*networkConfigs[0].numN)); // CUDA_CHECK_ERRORS( hipMemcpy( &dest->stpx[0], &src->stpx[0], sizeof(float)*networkConfigs[0].numN, kind)); // allocate the stpu and stpx variable if (allocateMem) CUDA_CHECK_ERRORS(hipMallocPitch((void**)&dest->stpu, &networkConfigs[netId].STP_Pitch, widthInBytes, networkConfigs[netId].maxDelay + 1)); if (allocateMem) CUDA_CHECK_ERRORS(hipMallocPitch((void**)&dest->stpx, &STP_Pitch, widthInBytes, networkConfigs[netId].maxDelay + 1)); assert(networkConfigs[netId].STP_Pitch > 0); assert(STP_Pitch > 0); // stp_pitch should be greater than zero assert(STP_Pitch == networkConfigs[netId].STP_Pitch); // we want same Pitch for stpu and stpx assert(networkConfigs[netId].STP_Pitch >= widthInBytes); // stp_pitch should be greater than the width // convert the Pitch value to multiples of float assert(networkConfigs[netId].STP_Pitch % (sizeof(float)) == 0); if (allocateMem) networkConfigs[netId].STP_Pitch = networkConfigs[netId].STP_Pitch / sizeof(float); // fprintf(stderr, "STP_Pitch = %ld, STP_witdhInBytes = %d\n", networkConfigs[0].STP_Pitch, widthInBytes); float* tmp_stp = new float[networkConfigs[netId].numN]; // copy the already generated values of stpx and stpu to the GPU for(int t = 0; t < networkConfigs[netId].maxDelay + 1; t++) { if (kind == hipMemcpyHostToDevice) { // stpu in the CPU might be mapped in a specific way. we want to change the format // to something that is okay with the GPU STP_U and STP_X variable implementation.. for (int n = 0; n < networkConfigs[netId].numN; n++) { int idx = STP_BUF_POS(n, t, glbNetworkConfig.maxDelay); tmp_stp[n] = managerRuntimeData.stpu[idx]; //assert(tmp_stp[n] == 0.0f); // STP is not enabled for all groups } CUDA_CHECK_ERRORS(hipMemcpy(&dest->stpu[t * networkConfigs[netId].STP_Pitch], tmp_stp, sizeof(float) * networkConfigs[netId].numN, hipMemcpyHostToDevice)); for (int n = 0; n < networkConfigs[netId].numN; n++) { int idx = STP_BUF_POS(n, t, glbNetworkConfig.maxDelay); tmp_stp[n] = managerRuntimeData.stpx[idx]; //assert(tmp_stp[n] == 1.0f); // STP is not enabled for all groups } CUDA_CHECK_ERRORS(hipMemcpy(&dest->stpx[t * networkConfigs[netId].STP_Pitch], tmp_stp, sizeof(float) * networkConfigs[netId].numN, hipMemcpyHostToDevice)); } else { CUDA_CHECK_ERRORS(hipMemcpy(tmp_stp, &dest->stpu[t * networkConfigs[netId].STP_Pitch], sizeof(float) * networkConfigs[netId].numN, hipMemcpyDeviceToHost)); for (int n = 0; n < networkConfigs[netId].numN; n++) managerRuntimeData.stpu[STP_BUF_POS(n, t, glbNetworkConfig.maxDelay)] = tmp_stp[n]; CUDA_CHECK_ERRORS(hipMemcpy(tmp_stp, &dest->stpx[t * networkConfigs[netId].STP_Pitch], sizeof(float) * networkConfigs[netId].numN, hipMemcpyDeviceToHost)); for (int n = 0; n < networkConfigs[netId].numN; n++) managerRuntimeData.stpx[STP_BUF_POS(n, t, glbNetworkConfig.maxDelay)] = tmp_stp[n]; } } delete [] tmp_stp; } /*! * \brief This function copies networkConfig form host to device * * This function: * copy networkConfig * * \param[in] netId the id of a local network whose networkConfig will be copied to device (GPU) memory * * \since v4.0 */ void SNN::copyNetworkConfig(int netId, hipMemcpyKind kind) { checkAndSetGPUDevice(netId); assert(kind == hipMemcpyHostToDevice); CUDA_CHECK_ERRORS(hipMemcpyToSymbol(networkConfigGPU, &networkConfigs[netId], sizeof(NetworkConfigRT), 0, hipMemcpyHostToDevice)); } /*! * \brief This function copies groupConfigs form host to device * * This function: * copy groupConfigs * * \param[in] netId the id of a local network whose groupConfigs will be copied to device (GPU) memory * * \since v4.0 */ void SNN::copyGroupConfigs(int netId){ checkAndSetGPUDevice(netId); CUDA_CHECK_ERRORS(hipMemcpyToSymbol(groupConfigsGPU, groupConfigs[netId], (networkConfigs[netId].numGroupsAssigned) * sizeof(GroupConfigRT), 0, hipMemcpyHostToDevice)); } /*! * \brief this function copy weight state in device (GPU) memory sapce to main (CPU) memory space * * This function: * copy wt, wtChange synSpikeTime * * This funcion is only called by fetchWeightState(). Only copying direction from device to host is required. * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * * \sa fetchWeightState * \since v4.0 */ void SNN::copyWeightState(int netId, int lGrpId, hipMemcpyKind kind) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], hipMemcpyDeviceToHost, false, lGrpId, 0); // check that the destination pointer is properly allocated.. assert(kind == hipMemcpyDeviceToHost); int lengthSyn, posSyn; // first copy pre-connections info copyPreConnectionInfo(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], hipMemcpyDeviceToHost, false); if (lGrpId == ALL) { lengthSyn = networkConfigs[netId].numPreSynNet; posSyn = 0; } else { lengthSyn = 0; for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) lengthSyn += managerRuntimeData.Npre[lNId]; posSyn = managerRuntimeData.cumulativePre[groupConfigs[netId][lGrpId].lStartN]; } assert(posSyn < networkConfigs[netId].numPreSynNet || networkConfigs[netId].numPreSynNet == 0); assert(lengthSyn <= networkConfigs[netId].numPreSynNet); CUDA_CHECK_ERRORS(hipMemcpy(&managerRuntimeData.wt[posSyn], &runtimeData[netId].wt[posSyn], sizeof(float) * lengthSyn, hipMemcpyDeviceToHost)); // copy firing time for individual synapses //CUDA_CHECK_ERRORS(hipMemcpy(&managerRuntimeData.synSpikeTime[cumPos_syn], &runtimeData[netId].synSpikeTime[cumPos_syn], sizeof(int) * length_wt, hipMemcpyDeviceToHost)); if ((!sim_with_fixedwts) || sim_with_stdp) { // copy synaptic weight derivative CUDA_CHECK_ERRORS(hipMemcpy(&managerRuntimeData.wtChange[posSyn], &runtimeData[netId].wtChange[posSyn], sizeof(float) * lengthSyn, hipMemcpyDeviceToHost)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies variables related to syanpses to it * * This function: * (allocate and) copy wt, wtChange, maxSynWt * * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU * \since v4.0 */ void SNN::copySynapseState(int netId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, ALL, 0); // check that the destination pointer is properly allocated.. assert(networkConfigs[netId].numPreSynNet > 0); // synaptic information based if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->wt, sizeof(float) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(hipMemcpy(dest->wt, src->wt, sizeof(float) * networkConfigs[netId].numPreSynNet, kind)); // we don't need these data structures if the network doesn't have any plastic synapses at all // they show up in gpuUpdateLTP() and updateSynapticWeights(), two functions that do not get called if // sim_with_fixedwts is set if (!sim_with_fixedwts) { // synaptic weight derivative if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->wtChange, sizeof(float) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(hipMemcpy(dest->wtChange, src->wtChange, sizeof(float) * networkConfigs[netId].numPreSynNet, kind)); // synaptic weight maximum value if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->maxSynWt, sizeof(float) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(hipMemcpy(dest->maxSynWt, src->maxSynWt, sizeof(float) * networkConfigs[netId].numPreSynNet, kind)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies auxiliary runtime data to it * * This function: * (allocate and) reset spikeGenBits, poissonFireRate * initialize I_setLength, I_setPitch; (allocate and) reset I_set * (allocate and) copy synSpikeTime, lastSpikeTime * (allocate and) copy nSpikeCnt * (allocate and) copy grpIds, connIdsPreIdx * (allocate and) copy firingTableD1, firingTableD2 * This funcion is only called by allocateSNN_GPU. Therefore, only copying direction from host to device is required * * \param[in] netId the id of local network, which is the same as device (GPU) id * \param[in] dest pointer to runtime data desitnation * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU * \since v4.0 */ void SNN::copyAuxiliaryData(int netId, int lGrpId, RuntimeData* dest, hipMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, ALL, 0); // check that the destination pointer is properly allocated.. assert(kind == hipMemcpyHostToDevice); assert(networkConfigs[netId].numN > 0); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->spikeGenBits, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1))); CUDA_CHECK_ERRORS(hipMemset(dest->spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1))); // allocate the poisson neuron poissonFireRate if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->poissonFireRate, sizeof(float) * networkConfigs[netId].numNPois)); CUDA_CHECK_ERRORS(hipMemset(dest->poissonFireRate, 0, sizeof(float) * networkConfigs[netId].numNPois)); // synaptic auxiliary data // I_set: a bit vector indicates which synapse got a spike if(allocateMem) { networkConfigs[netId].I_setLength = ceil(((networkConfigs[netId].maxNumPreSynN) / 32.0f)); CUDA_CHECK_ERRORS(hipMallocPitch((void**)&dest->I_set, &networkConfigs[netId].I_setPitch, sizeof(int) * networkConfigs[netId].numNReg, networkConfigs[netId].I_setLength)); } assert(networkConfigs[netId].I_setPitch > 0 || networkConfigs[netId].maxNumPreSynN == 0); CUDA_CHECK_ERRORS(hipMemset(dest->I_set, 0, networkConfigs[netId].I_setPitch * networkConfigs[netId].I_setLength)); // synSpikeTime: an array indicates the last time when a synapse got a spike if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->synSpikeTime, sizeof(int) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(hipMemcpy(dest->synSpikeTime, managerRuntimeData.synSpikeTime, sizeof(int) * networkConfigs[netId].numPreSynNet, hipMemcpyHostToDevice)); // neural auxiliary data // lastSpikeTime: an array indicates the last time of a neuron emitting a spike // neuron firing time if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->lastSpikeTime, sizeof(int) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(hipMemcpy(dest->lastSpikeTime, managerRuntimeData.lastSpikeTime, sizeof(int) * networkConfigs[netId].numNAssigned, hipMemcpyHostToDevice)); // auxiliary data for recording spike count of each neuron copyNeuronSpikeCount(netId, lGrpId, dest, &managerRuntimeData, hipMemcpyHostToDevice, true, 0); // quick lookup array for local group ids if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(hipMemcpy(dest->grpIds, managerRuntimeData.grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned, hipMemcpyHostToDevice)); // quick lookup array for conn ids if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(hipMemcpy(dest->connIdsPreIdx, managerRuntimeData.connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet, hipMemcpyHostToDevice)); // firing table if(allocateMem) { assert(dest->firingTableD1 == NULL); assert(dest->firingTableD2 == NULL); } // allocate 1ms firing table if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->firingTableD1, sizeof(int) * networkConfigs[netId].maxSpikesD1)); if (networkConfigs[netId].maxSpikesD1 > 0) CUDA_CHECK_ERRORS(hipMemcpy(dest->firingTableD1, managerRuntimeData.firingTableD1, sizeof(int) * networkConfigs[netId].maxSpikesD1, hipMemcpyHostToDevice)); // allocate 2+ms firing table if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->firingTableD2, sizeof(int) * networkConfigs[netId].maxSpikesD2)); if (networkConfigs[netId].maxSpikesD2 > 0) CUDA_CHECK_ERRORS(hipMemcpy(dest->firingTableD2, managerRuntimeData.firingTableD2, sizeof(int) * networkConfigs[netId].maxSpikesD2, hipMemcpyHostToDevice)); // allocate external 1ms firing table if(allocateMem) { void* devPtr; CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMemset(dest->extFiringTableD1, 0 /* NULL */, sizeof(int*) * networkConfigs[netId].numGroups)); for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { if (groupConfigs[netId][lGrpId].hasExternalConnect) { CUDA_CHECK_ERRORS(hipMalloc((void**)&devPtr, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE)); CUDA_CHECK_ERRORS(hipMemset(devPtr, 0, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->extFiringTableD1[lGrpId], &devPtr, sizeof(int*), hipMemcpyHostToDevice)); } } } // allocate external 2+ms firing table if(allocateMem) { void* devPtr; CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMemset(dest->extFiringTableD2, 0 /* NULL */, sizeof(int*) * networkConfigs[netId].numGroups)); for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { if (groupConfigs[netId][lGrpId].hasExternalConnect) { CUDA_CHECK_ERRORS(hipMalloc((void**)&devPtr, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE)); CUDA_CHECK_ERRORS(hipMemset(devPtr, 0, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->extFiringTableD2[lGrpId], &devPtr, sizeof(int*), hipMemcpyHostToDevice)); } } } // allocate external 1ms firing table index if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->extFiringTableEndIdxD1, sizeof(int) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMemset(dest->extFiringTableEndIdxD1, 0, sizeof(int) * networkConfigs[netId].numGroups)); // allocate external 2+ms firing table index if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->extFiringTableEndIdxD2, sizeof(int) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMemset(dest->extFiringTableEndIdxD2, 0, sizeof(int) * networkConfigs[netId].numGroups)); } void SNN::copyGrpIdsLookupArray(int netId, hipMemcpyKind kind) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], hipMemcpyDeviceToHost, false, ALL, 0);// check that the destination pointer is properly allocated.. assert(kind == hipMemcpyDeviceToHost); CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.grpIds, runtimeData[netId].grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned, hipMemcpyDeviceToHost)); } void SNN::copyConnIdsLookupArray(int netId, hipMemcpyKind kind) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], hipMemcpyDeviceToHost, false, ALL, 0);// check that the destination pointer is properly allocated.. assert(kind == hipMemcpyDeviceToHost); CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.connIdsPreIdx, runtimeData[netId].connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet, hipMemcpyDeviceToHost)); } void SNN::copyLastSpikeTime(int netId, hipMemcpyKind kind) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], hipMemcpyDeviceToHost, false, ALL, 0); // check that the destination pointer is properly allocated.. assert(kind == hipMemcpyDeviceToHost); CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.lastSpikeTime, runtimeData[netId].lastSpikeTime, sizeof(int) * networkConfigs[netId].numN, hipMemcpyDeviceToHost)); } // spikeGeneratorUpdate on GPUs.. void SNN::spikeGeneratorUpdate_GPU(int netId) { assert(runtimeData[netId].allocated); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); // update the random number for poisson spike generator (spikes generated by rate) if ((networkConfigs[netId].numNPois > 0) && (runtimeData[netId].gpuRandGen != NULL)) { hiprandGenerateUniform(runtimeData[netId].gpuRandGen, runtimeData[netId].randNum, networkConfigs[netId].numNPois); } // Use spike generators (user-defined callback function) if (networkConfigs[netId].numNSpikeGen > 0) { assert(managerRuntimeData.spikeGenBits != NULL); // reset the bit status of the spikeGenBits... memset(managerRuntimeData.spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1)); // fill spikeGenBits from SpikeBuffer fillSpikeGenBits(netId); // copy the spikeGenBits from the manager to the GPU.. CUDA_CHECK_ERRORS(hipMemcpy(runtimeData[netId].spikeGenBits, managerRuntimeData.spikeGenBits, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1), hipMemcpyHostToDevice)); } } void SNN::findFiring_GPU(int netId) { assert(runtimeData[netId].allocated); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); hipLaunchKernelGGL(( kernel_findFiring), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, simTimeMs, simTime); CUDA_GET_LAST_ERROR("findFiring kernel failed\n"); } void SNN::updateTimingTable_GPU(int netId) { assert(runtimeData[netId].allocated); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); hipLaunchKernelGGL(( kernel_updateTimeTable), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, simTimeMs); CUDA_GET_LAST_ERROR("timing Table update kernel failed\n"); } void SNN::doCurrentUpdateD2_GPU(int netId) { assert(runtimeData[netId].allocated); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); if (networkConfigs[netId].maxDelay > 1) { hipLaunchKernelGGL(( kernel_doCurrentUpdateD2), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, simTimeMs, simTimeSec, simTime); CUDA_GET_LAST_ERROR("Kernel execution failed"); } } void SNN::doCurrentUpdateD1_GPU(int netId) { assert(runtimeData[netId].allocated); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); hipLaunchKernelGGL(( kernel_doCurrentUpdateD1), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, simTimeMs, simTimeSec, simTime); CUDA_GET_LAST_ERROR("Kernel execution failed"); } void SNN::doSTPUpdateAndDecayCond_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); if (sim_with_stp || sim_with_conductances) { hipLaunchKernelGGL(( kernel_STPUpdateAndDecayConductances), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, simTimeMs, simTimeSec, simTime); CUDA_GET_LAST_ERROR("STP update\n"); } } void SNN::initGPU(int netId) { checkAndSetGPUDevice(netId); assert(runtimeData[netId].allocated); hipLaunchKernelGGL(( kernel_initGPUMemory), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, ); CUDA_GET_LAST_ERROR("initGPUMemory kernel failed\n"); } void SNN::deleteRuntimeData_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); // hipFree all device pointers CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].voltage) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].nextVoltage)); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].recovery) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].current) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].extCurrent) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].curSpike)); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Npre) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Npre_plastic) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Npre_plasticInv) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Npost) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].cumulativePost) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].cumulativePre) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].synSpikeTime) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].wt) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].wtChange) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].maxSynWt) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].nSpikeCnt) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].avgFiring) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].baseFiring) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].baseFiringInv) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpDA) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grp5HT) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpACh) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpNE) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpDABuffer) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grp5HTBuffer) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpAChBuffer) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpNEBuffer) ); if (networkConfigs[netId].sim_with_nm) { CUDA_CHECK_ERRORS(hipFree(runtimeData[netId].nVBuffer)); CUDA_CHECK_ERRORS(hipFree(runtimeData[netId].nUBuffer)); CUDA_CHECK_ERRORS(hipFree(runtimeData[netId].nIBuffer)); } CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].grpIds) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_a) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_b) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_c) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_d) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_C)); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_k)); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_vr)); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_vt)); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].Izh_vpeak)); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gAMPA) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].lif_tau_m) ); //LIF parameters CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].lif_tau_ref) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].lif_tau_ref_c) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].lif_vTh) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].lif_vReset) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].lif_gain) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].lif_bias) ); if (sim_with_NMDA_rise) { CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gNMDA_r) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gNMDA_d) ); } else { CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gNMDA) ); } CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gGABAa) ); if (sim_with_GABAb_rise) { CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gGABAb_r) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gGABAb_d) ); } else { CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].gGABAb) ); } CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].stpu) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].stpx) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].connIdsPreIdx) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].groupIdInfo) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].neuronAllocation) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].postDelayInfo) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].postSynapticIds) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].preSynapticIds) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].I_set) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].poissonFireRate) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].lastSpikeTime) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].spikeGenBits) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].firingTableD2) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].firingTableD1) ); int** tempPtrs; tempPtrs = new int*[networkConfigs[netId].numGroups]; // fetch device memory address stored in extFiringTableD2 CUDA_CHECK_ERRORS( hipMemcpy(tempPtrs, runtimeData[netId].extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups, hipMemcpyDeviceToHost) ); for (int i = 0; i < networkConfigs[netId].numGroups; i++) CUDA_CHECK_ERRORS( hipFree(tempPtrs[i]) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].extFiringTableD2) ); // fetch device memory address stored in extFiringTableD1 CUDA_CHECK_ERRORS( hipMemcpy(tempPtrs, runtimeData[netId].extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups, hipMemcpyDeviceToHost) ); for (int i = 0; i < networkConfigs[netId].numGroups; i++) CUDA_CHECK_ERRORS( hipFree(tempPtrs[i]) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].extFiringTableD1) ); delete [] tempPtrs; CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].extFiringTableEndIdxD2) ); CUDA_CHECK_ERRORS( hipFree(runtimeData[netId].extFiringTableEndIdxD1) ); // delete random numbr generator on GPU(s) // Note: RNG_rand48 objects allocate device memory if (runtimeData[netId].gpuRandGen != NULL) hiprandDestroyGenerator(runtimeData[netId].gpuRandGen); runtimeData[netId].gpuRandGen = NULL; if (runtimeData[netId].randNum != NULL) CUDA_CHECK_ERRORS(hipFree(runtimeData[netId].randNum)); runtimeData[netId].randNum = NULL; } void SNN::globalStateUpdate_C_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); kernel_conductanceUpdate << <NUM_BLOCKS, NUM_THREADS >> > (simTimeMs, simTimeSec, simTime); CUDA_GET_LAST_ERROR("kernel_conductanceUpdate failed"); // use memset to reset I_set for debugging, resume it later //CUDA_CHECK_ERRORS(hipMemset(runtimeData[netId].I_set, 0, networkConfigs[netId].I_setPitch * networkConfigs[netId].I_setLength)); } void SNN::globalStateUpdate_N_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); bool lastIteration = false; for (int j = 1; j <= networkConfigs[netId].simNumStepsPerMs; j++) { if (j == networkConfigs[netId].simNumStepsPerMs) lastIteration = true; // update all neuron state (i.e., voltage and recovery), including homeostasis kernel_neuronStateUpdate << <NUM_BLOCKS, NUM_THREADS >> > (simTimeMs, lastIteration); CUDA_GET_LAST_ERROR("Kernel execution failed"); // the above kernel should end with a syncthread statement to be on the safe side CUDA_CHECK_ERRORS(hipMemcpy(&runtimeData[netId].voltage[0], &runtimeData[netId].nextVoltage[0], sizeof(float) * networkConfigs[netId].numNReg, hipMemcpyDeviceToDevice)); } } void SNN::globalStateUpdate_G_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); // update all group state (i.e., concentration of neuronmodulators) // currently support 4 x 128 groups hipLaunchKernelGGL(( kernel_groupStateUpdate), dim3(4), dim3(NUM_THREADS), 0, 0, simTimeMs); CUDA_GET_LAST_ERROR("Kernel execution failed"); } void SNN::assignPoissonFiringRate_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { // given group of neurons belong to the poisson group.... if (groupConfigs[netId][lGrpId].isSpikeGenerator) { int lNId = groupConfigs[netId][lGrpId].lStartN; int gGrpId = groupConfigs[netId][lGrpId].gGrpId; PoissonRate* rate = groupConfigMDMap[gGrpId].ratePtr; // if spikeGenFunc group does not have a Poisson pointer, skip if (groupConfigMap[gGrpId].spikeGenFunc || rate == NULL) continue; assert(runtimeData[netId].poissonFireRate != NULL); if (rate->isOnGPU()) { // rates allocated on GPU CUDA_CHECK_ERRORS(hipMemcpy(&runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg], rate->getRatePtrGPU(), sizeof(float) * rate->getNumNeurons(), hipMemcpyDeviceToDevice)); }else { // rates allocated on CPU CUDA_CHECK_ERRORS(hipMemcpy(&runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg], rate->getRatePtrCPU(), sizeof(float) * rate->getNumNeurons(), hipMemcpyHostToDevice)); } } } } // Note: for temporarily use, might be merged into exchangeExternalSpike void SNN::clearExtFiringTable_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); CUDA_CHECK_ERRORS(hipMemset(runtimeData[netId].extFiringTableEndIdxD1, 0, sizeof(int) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(hipMemset(runtimeData[netId].extFiringTableEndIdxD2, 0, sizeof(int) * networkConfigs[netId].numGroups)); } //void SNN::routeSpikes_GPU() { // int firingTableIdxD2, firingTableIdxD1; // int GtoLOffset; // // ToDo: route spikes using routing table. currently only exchange spikes between GPU0 and GPU1 // // GPU0 -> GPU1 // if (!groupPartitionLists[0].empty() && !groupPartitionLists[1].empty()) { // checkAndSetGPUDevice(0); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[0].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[0].numGroups, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[0].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[0].numGroups, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[0].extFiringTableD2, sizeof(int*) * networkConfigs[0].numGroups, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[0].extFiringTableD1, sizeof(int*) * networkConfigs[0].numGroups, hipMemcpyDeviceToHost)); // //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]); // // checkAndSetGPUDevice(1); // CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost)); // firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1]; // firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1]; // //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // // for (int lGrpId = 0; lGrpId < networkConfigs[0].numGroups; lGrpId++) { // if (groupConfigs[0][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) { // CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[1].firingTableD2 + firingTableIdxD2, 1, // managerRuntimeData.extFiringTableD2[lGrpId], 0, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[1].begin(); grpIt != groupPartitionLists[1].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[0][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // hipLaunchKernelGGL(( kernel_convertExtSpikesD2), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, firingTableIdxD2, // firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId]; // } // // if (groupConfigs[0][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) { // CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[1].firingTableD1 + firingTableIdxD1, 1, // managerRuntimeData.extFiringTableD1[lGrpId], 0, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[1].begin(); grpIt != groupPartitionLists[1].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[0][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // hipLaunchKernelGGL(( kernel_convertExtSpikesD1), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, firingTableIdxD1, // firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId]; // // } // //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // } // managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2; // managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1; // CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice)); // CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice)); // } // // // GPU1 -> GPU0 // if (!groupPartitionLists[1].empty() && !groupPartitionLists[0].empty()) { // checkAndSetGPUDevice(1); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[1].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[1].numGroups, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[1].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[1].numGroups, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[1].extFiringTableD2, sizeof(int*) * networkConfigs[1].numGroups, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[1].extFiringTableD1, sizeof(int*) * networkConfigs[1].numGroups, hipMemcpyDeviceToHost)); // //KERNEL_DEBUG("GPU1 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]); // // checkAndSetGPUDevice(0); // CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost)); // firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1]; // firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1]; // //KERNEL_DEBUG("GPU0 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // // for (int lGrpId = 0; lGrpId < networkConfigs[1].numGroups; lGrpId++) { // if (groupConfigs[1][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) { // CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[0].firingTableD2 + firingTableIdxD2, 0, // managerRuntimeData.extFiringTableD2[lGrpId], 1, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[0].begin(); grpIt != groupPartitionLists[0].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[1][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // hipLaunchKernelGGL(( kernel_convertExtSpikesD2), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, firingTableIdxD2, // firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId]; // } // // if (groupConfigs[1][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) { // CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[0].firingTableD1 + firingTableIdxD1, 0, // managerRuntimeData.extFiringTableD1[lGrpId], 1, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[0].begin(); grpIt != groupPartitionLists[0].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[1][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // hipLaunchKernelGGL(( kernel_convertExtSpikesD1), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, firingTableIdxD1, // firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId]; // } // //KERNEL_DEBUG("GPU0 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // } // managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2; // managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1; // CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice)); // CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice)); // } // // // for (std::list<RoutingTableEntry>::iterator rteItr = spikeRoutingTable.begin(); rteItr != spikeRoutingTable.end(); rteItr++) { // int srcNetId = rteItr->srcNetId; // int destNetId = rteItr->destNetId; // assert(srcNetId < CPU_RUNTIME_BASE); // assert(destNetId < CPU_RUNTIME_BASE); // checkAndSetGPUDevice(srcNetId); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[srcNetId].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[srcNetId].numGroups, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[srcNetId].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[srcNetId].numGroups, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[srcNetId].extFiringTableD2, sizeof(int*) * networkConfigs[srcNetId].numGroups, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[srcNetId].extFiringTableD1, sizeof(int*) * networkConfigs[srcNetId].numGroups, hipMemcpyDeviceToHost)); // //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]); // // checkAndSetGPUDevice(destNetId); // CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( hipMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyDeviceToHost)); // firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1]; // firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1]; // //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // // for (int lGrpId = 0; lGrpId < networkConfigs[srcNetId].numGroups; lGrpId++) { // if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) { // CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[destNetId].firingTableD2 + firingTableIdxD2, destNetId, // managerRuntimeData.extFiringTableD2[lGrpId], srcNetId, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // hipLaunchKernelGGL(( kernel_convertExtSpikesD2), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, firingTableIdxD2, // firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId]; // } // // if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) { // CUDA_CHECK_ERRORS( hipMemcpyPeer(runtimeData[destNetId].firingTableD1 + firingTableIdxD1, destNetId, // managerRuntimeData.extFiringTableD1[lGrpId], srcNetId, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // hipLaunchKernelGGL(( kernel_convertExtSpikesD1), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, firingTableIdxD1, // firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId]; // // } // //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // } // managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2; // managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1; // CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice)); // CUDA_CHECK_ERRORS( hipMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, hipMemcpyHostToDevice)); // } //} /*! * \brief This function is called every second by SNN::runNetwork(). It updates the firingTableD1(D2)GPU and * timeTableD1(D2)GPU by removing older firing information. */ void SNN::shiftSpikeTables_F_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); hipLaunchKernelGGL(( kernel_shiftFiringTable), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, ); } void SNN::shiftSpikeTables_T_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); hipLaunchKernelGGL(( kernel_shiftTimeTable), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, ); } /* * \brief Update syanptic weights every 10ms, 100ms, or 1000ms * * */ void SNN::updateWeights_GPU(int netId) { assert(sim_in_testing == false); assert(sim_with_fixedwts == false); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); hipLaunchKernelGGL(( kernel_updateWeights), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, ); } //__global__ void gpu_resetFiringInformation() { // if(threadIdx.x==0 && blockIdx.x==0) { // for(int i = 0; i < ROUNDED_TIMING_COUNT; i++) { // timeTableD2GPU[i] = 0; // timeTableD1GPU[i] = 0; // } // spikeCountD2SecGPU=0; // spikeCountD1SecGPU=0; // secD2fireCntTest=0; // secD1fireCntTest=0; // spikeCountD2GPU=0; // spikeCountD1GPU=0; // // //spikeCountAll1Sec=0;//assigned in fetchSpikeTables() // } // //} // //void SNN::resetFiringInformation_GPU() { // checkAndSetGPUDevice(); // // gpu_resetFiringInformation<<<NUM_BLOCKS,NUM_THREADS>>>(); //} /*! * \brief this function allocates device (GPU) memory sapce and copies external current to it * * This function: * (allocate and) copy extCurrent * * This funcion is called by copyNeuronState() and setExternalCurrent. Only host-to-divice copy is required * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU fetchSTPState * \since v3.0 */ void SNN::copyExternalCurrent(int netId, int lGrpId, RuntimeData* dest, hipMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, &managerRuntimeData, hipMemcpyHostToDevice, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated.. assert(kind == hipMemcpyHostToDevice); int posN, lengthN; if (lGrpId == ALL) { posN = 0; lengthN = networkConfigs[netId].numNReg; } else { assert(lGrpId >= 0); posN = groupConfigs[netId][lGrpId].lStartN; lengthN = groupConfigs[netId][lGrpId].numN; } assert(lengthN >= 0 && lengthN <= networkConfigs[netId].numNReg); // assert NOT poisson neurons //KERNEL_DEBUG("copyExternalCurrent: lGrpId=%d, ptrPos=%d, length=%d, allocate=%s", lGrpId, posN, lengthN, allocateMem?"y":"n"); if(allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->extCurrent, sizeof(float) * lengthN)); CUDA_CHECK_ERRORS(hipMemcpy(&(dest->extCurrent[posN]), &(managerRuntimeData.extCurrent[posN]), sizeof(float) * lengthN, hipMemcpyHostToDevice)); } /*! * \brief This function fetch the spike count in all local networks and sum the up */ void SNN::copyNetworkSpikeCount(int netId, hipMemcpyKind kind, unsigned int* spikeCountD1, unsigned int* spikeCountD2, unsigned int* spikeCountExtD1, unsigned int* spikeCountExtD2) { checkAndSetGPUDevice(netId); assert(kind == hipMemcpyDeviceToHost); CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(spikeCountExtD2, spikeCountExtRxD2GPU, sizeof(int), 0, hipMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(spikeCountExtD1, spikeCountExtRxD1GPU, sizeof(int), 0, hipMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(spikeCountD2, spikeCountD2GPU, sizeof(int), 0, hipMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(spikeCountD1, spikeCountD1GPU, sizeof(int), 0, hipMemcpyDeviceToHost)); } /*! * \brief This function fetch spikeTables in the local network specified by netId * * \param[in] netId the id of local network of which timeTableD1(D2) and firingTableD1(D2) are copied to manager runtime data */ void SNN::copySpikeTables(int netId, hipMemcpyKind kind) { unsigned int gpuSpikeCountD1Sec, gpuSpikeCountD2Sec, gpuSpikeCountLastSecLeftD2; checkAndSetGPUDevice(netId); assert(kind == hipMemcpyDeviceToHost); CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(&gpuSpikeCountLastSecLeftD2, spikeCountLastSecLeftD2GPU, sizeof(int), 0, hipMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(&gpuSpikeCountD2Sec, spikeCountD2SecGPU, sizeof(int), 0, hipMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(&gpuSpikeCountD1Sec, spikeCountD1SecGPU, sizeof(int), 0, hipMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.firingTableD2, runtimeData[netId].firingTableD2, sizeof(int)*(gpuSpikeCountD2Sec + gpuSpikeCountLastSecLeftD2), hipMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.firingTableD1, runtimeData[netId].firingTableD1, sizeof(int)*gpuSpikeCountD1Sec, hipMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, hipMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, hipMemcpyDeviceToHost)); } /*! * \brief This function fetch neuron state buffer in the local network specified by netId * * This function: * (allocate and) copy * * This funcion is called by copyNeuronState() * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa copyNeuronState * \since v4.0 */ void SNN::copyNeuronStateBuffer(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, hipMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated.. int ptrPos, length; if (lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * 1000; } else { ptrPos = lGrpId * MAX_NEURON_MON_GRP_SZIE * 1000; length = MAX_NEURON_MON_GRP_SZIE * 1000; } assert(length <= networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * 1000); assert(length > 0); // neuron information assert(src->nVBuffer != NULL); if (allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->nVBuffer, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->nVBuffer[ptrPos], &src->nVBuffer[ptrPos], sizeof(float) * length, kind)); assert(src->nUBuffer != NULL); if (allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->nUBuffer, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->nUBuffer[ptrPos], &src->nUBuffer[ptrPos], sizeof(float) * length, kind)); assert(src->nIBuffer != NULL); if (allocateMem) CUDA_CHECK_ERRORS(hipMalloc((void**)&dest->nIBuffer, sizeof(float) * length)); CUDA_CHECK_ERRORS(hipMemcpy(&dest->nIBuffer[ptrPos], &src->nIBuffer[ptrPos], sizeof(float) * length, kind)); } void SNN::copyTimeTable(int netId, hipMemcpyKind kind) { assert(netId < CPU_RUNTIME_BASE); checkAndSetGPUDevice(netId); if (kind == hipMemcpyDeviceToHost) { CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, hipMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(hipMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, hipMemcpyDeviceToHost)); } else { // kind == hipMemcpyHostToDevice CUDA_CHECK_ERRORS(hipMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, hipMemcpyHostToDevice)); CUDA_CHECK_ERRORS(hipMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, hipMemcpyHostToDevice)); } } void SNN::copyExtFiringTable(int netId, hipMemcpyKind kind) { assert(netId < CPU_RUNTIME_BASE); checkAndSetGPUDevice(netId); CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[netId].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[netId].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[netId].extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(hipMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[netId].extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups, kind)); //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]); } int SNN::configGPUDevice() { int devCount, devMax; hipDeviceProp_t deviceProp; CUDA_CHECK_ERRORS(hipGetDeviceCount(&devCount)); KERNEL_INFO("CUDA devices Configuration:"); KERNEL_INFO(" - Number of CUDA devices = %9d", devCount); devMax = CUDA_GET_MAXGFLOP_DEVICE_ID(); KERNEL_INFO(" - CUDA device ID with max GFLOPs = %9d", devMax); for (int ithGPU = 0; ithGPU < devCount; ithGPU++) { CUDA_CHECK_ERRORS(hipGetDeviceProperties(&deviceProp, ithGPU)); KERNEL_INFO(" + Use CUDA device[%1d] = %9s", ithGPU, deviceProp.name); KERNEL_INFO(" + CUDA Compute Capability (CC) = %2d.%d", deviceProp.major, deviceProp.minor); } if (deviceProp.major < 2) { // Unmark this when CC 1.3 is deprecated //KERNEL_ERROR("CARLsim does not support CUDA devices older than CC 2.0"); //exitSimulation(1); KERNEL_WARN("CUDA device with CC 1.3 will be deprecated in a future release"); } for (int ithGPU = 0; ithGPU < devCount; ithGPU++) { CUDA_CHECK_ERRORS(hipSetDevice(ithGPU)); CUDA_DEVICE_RESET(); } if (devCount >= 2) { // try to setup P2P access if more than 2 GPUs are presented // FIXME: generalize the initialization for mulit-GPUs up to 4 or 8 // enable P2P access int canAccessPeer_0_1, canAccessPeer_1_0; hipDeviceCanAccessPeer(&canAccessPeer_0_1, 0, 1); hipDeviceCanAccessPeer(&canAccessPeer_1_0, 1, 0); // enable peer access between GPU0 and GPU1 if (canAccessPeer_0_1 & canAccessPeer_1_0) { hipSetDevice(0); hipDeviceEnablePeerAccess(1, 0); hipSetDevice(1); hipDeviceEnablePeerAccess(0, 0); KERNEL_INFO("* Peer Access is enabled"); } else { KERNEL_INFO("* Peer Access is not enabled"); } } return devCount; } void SNN::convertExtSpikesD2_GPU(int netId, int startIdx, int endIdx, int GtoLOffset) { checkAndSetGPUDevice(netId); hipLaunchKernelGGL(( kernel_convertExtSpikesD2), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, startIdx, endIdx, GtoLOffset); // [StartIdx, EndIdx) } void SNN::convertExtSpikesD1_GPU(int netId, int startIdx, int endIdx, int GtoLOffset) { checkAndSetGPUDevice(netId); hipLaunchKernelGGL(( kernel_convertExtSpikesD1), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, startIdx, endIdx, GtoLOffset); // [StartIdx, EndIdx) } void SNN::checkAndSetGPUDevice(int netId) { int currentDevice; hipGetDevice(&currentDevice); assert(netId >= 0 && netId < numAvailableGPUs); if (currentDevice != netId) { //KERNEL_DEBUG("Change GPU context from GPU %d to GPU %d", currentDevice, netId); CUDA_CHECK_ERRORS(hipSetDevice(netId)); } } // deprecated //void SNN::copyWeightsGPU(int nid, int src_grp) { // checkAndSetGPUDevice("copyWeightsGPU"); // // assert(nid < numNReg); // unsigned int cumId = managerRuntimeData.cumulativePre[nid]; // float* synWts = &(managerRuntimeData.wt[cumId]); // //TODO: NEEDED TO COMMENT THIS FOR CARLSIM 2.1-2.2 FILEMERGE -- KDC // // assert(cumId >= (nid-numNPois)); // //assert(cumId < numPreSynapses*networkConfigs[0].numN); // // CUDA_CHECK_ERRORS( hipMemcpy( synWts, &runtimeData[0].wt[cumId], sizeof(float)*managerRuntimeData.Npre[nid], hipMemcpyDeviceToHost)); //} // Allocates required memory and then initialize the GPU void SNN::allocateSNN_GPU(int netId) { checkAndSetGPUDevice(netId); // setup memory type of GPU runtime data runtimeData[netId].memType = GPU_MEM; // display some memory management info size_t avail, total, previous; float toMB = ::pow(1024.0f, 2); hipMemGetInfo(&avail, &total); KERNEL_INFO("GPU Memory Management: (Total %2.3f MB)", (float)(total/toMB)); KERNEL_INFO("Data\t\t\tSize\t\tTotal Used\tTotal Available"); KERNEL_INFO("Init:\t\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(total)/toMB, (float)((total - avail) / toMB), (float)(avail/toMB)); previous=avail; // allocate random number generator on GPU(s) if(runtimeData[netId].gpuRandGen == NULL) { hiprandCreateGenerator(&runtimeData[netId].gpuRandGen, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(runtimeData[netId].gpuRandGen, randSeed_ + netId); } // allocate SNN::runtimeData[0].randNum for random number generators CUDA_CHECK_ERRORS(hipMalloc((void **)&runtimeData[netId].randNum, networkConfigs[netId].numNPois * sizeof(float))); hipMemGetInfo(&avail, &total); KERNEL_INFO("Random Gen:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // initialize runtimeData[0].neuronAllocation, __device__ loadBufferCount, loadBufferSize allocateStaticLoad(netId, NUM_THREADS); allocateGroupId(netId); // this table is useful for quick evaluation of the position of fired neuron // given a sequence of bits denoting the firing.. // initialize __device__ quickSynIdTableGPU[256] initQuickSynIdTable(netId); hipMemGetInfo(&avail, &total); KERNEL_INFO("Static Load:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // initialize (copy from SNN) runtimeData[0].Npre, runtimeData[0].Npre_plastic, runtimeData[0].Npre_plasticInv, runtimeData[0].cumulativePre // initialize (copy from SNN) runtimeData[0].cumulativePost, runtimeData[0].Npost, runtimeData[0].postDelayInfo // initialize (copy from SNN) runtimeData[0].postSynapticIds, runtimeData[0].preSynapticIds copyPreConnectionInfo(netId, ALL, &runtimeData[netId], &managerRuntimeData, hipMemcpyHostToDevice, true); copyPostConnectionInfo(netId, ALL, &runtimeData[netId], &managerRuntimeData, hipMemcpyHostToDevice, true); hipMemGetInfo(&avail, &total); KERNEL_INFO("Conn Info:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // initialize (copy from SNN) runtimeData[0].wt, runtimeData[0].wtChange, runtimeData[0].maxSynWt copySynapseState(netId, &runtimeData[netId], &managerRuntimeData, hipMemcpyHostToDevice, true); hipMemGetInfo(&avail, &total); KERNEL_INFO("Syn State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // copy the neuron state information to the GPU.. // initialize (copy from managerRuntimeData) runtimeData[0].recovery, runtimeData[0].voltage, runtimeData[0].current // initialize (copy from managerRuntimeData) runtimeData[0].gGABAa, runtimeData[0].gGABAb, runtimeData[0].gAMPA, runtimeData[0].gNMDA // initialize (copy from SNN) runtimeData[0].Izh_a, runtimeData[0].Izh_b, runtimeData[0].Izh_c, runtimeData[0].Izh_d // initialize (copy form SNN) runtimeData[0].baseFiring, runtimeData[0].baseFiringInv // initialize (copy from SNN) runtimeData[0].n(V,U,I)Buffer[] copyNeuronState(netId, ALL, &runtimeData[netId], hipMemcpyHostToDevice, true); // copy STP state, considered as neuron state if (sim_with_stp) { // initialize (copy from SNN) stpu, stpx copySTPState(netId, ALL, &runtimeData[netId], &managerRuntimeData, hipMemcpyHostToDevice, true); } hipMemGetInfo(&avail, &total); KERNEL_INFO("Neuron State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // initialize (copy from SNN) runtimeData[0].grpDA(5HT,ACh,NE) // initialize (copy from SNN) runtimeData[0].grpDA(5HT,ACh,NE)Buffer[] copyGroupState(netId, ALL, &runtimeData[netId], &managerRuntimeData, hipMemcpyHostToDevice, true); hipMemGetInfo(&avail, &total); KERNEL_INFO("Group State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // initialize (hipMemset) runtimeData[0].I_set, runtimeData[0].poissonFireRate // initialize (copy from SNN) runtimeData[0].firingTableD1, runtimeData[0].firingTableD2 // initialize (hipMalloc) runtimeData[0].spikeGenBits // initialize (copy from managerRuntimeData) runtimeData[0].nSpikeCnt, // initialize (copy from SNN) runtimeData[0].synSpikeTime, runtimeData[0].lastSpikeTime copyAuxiliaryData(netId, ALL, &runtimeData[netId], hipMemcpyHostToDevice, true); hipMemGetInfo(&avail, &total); KERNEL_INFO("Auxiliary Data:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB\n\n", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous = avail; // copy relevant pointers and network information to GPU CUDA_CHECK_ERRORS(hipMemcpyToSymbol(runtimeDataGPU, &runtimeData[netId], sizeof(RuntimeData), 0, hipMemcpyHostToDevice)); // copy data to from SNN:: to NetworkConfigRT SNN::networkConfigs[0] copyNetworkConfig(netId, hipMemcpyHostToDevice); // FIXME: we can change the group properties such as STDP as the network is running. So, we need a way to updating the GPU when changes are made. // TODO: move mulSynFast, mulSynSlow to ConnectConfig structure // copy connection configs CUDA_CHECK_ERRORS(hipMemcpyToSymbol(d_mulSynFast, mulSynFast, sizeof(float) * networkConfigs[netId].numConnections, 0, hipMemcpyHostToDevice)); CUDA_CHECK_ERRORS(hipMemcpyToSymbol(d_mulSynSlow, mulSynSlow, sizeof(float) * networkConfigs[netId].numConnections, 0, hipMemcpyHostToDevice)); copyGroupConfigs(netId); KERNEL_DEBUG("Transfering group settings to GPU:"); for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroupsAssigned; lGrpId++) { KERNEL_DEBUG("Settings for Group %s:", groupConfigMap[groupConfigs[netId][lGrpId].gGrpId].grpName.c_str()); KERNEL_DEBUG("\tType: %d", (int)groupConfigs[netId][lGrpId].Type); KERNEL_DEBUG("\tNumN: %d", groupConfigs[netId][lGrpId].numN); KERNEL_DEBUG("\tM: %d", groupConfigs[netId][lGrpId].numPostSynapses); KERNEL_DEBUG("\tPreM: %d", groupConfigs[netId][lGrpId].numPreSynapses); KERNEL_DEBUG("\tspikeGenerator: %d", (int)groupConfigs[netId][lGrpId].isSpikeGenerator); KERNEL_DEBUG("\tFixedInputWts: %d", (int)groupConfigs[netId][lGrpId].FixedInputWts); KERNEL_DEBUG("\tMaxDelay: %d", (int)groupConfigs[netId][lGrpId].MaxDelay); KERNEL_DEBUG("\tWithSTDP: %d", (int)groupConfigs[netId][lGrpId].WithSTDP); if (groupConfigs[netId][lGrpId].WithSTDP) { KERNEL_DEBUG("\t\tE-STDP type: %s", stdpType_string[groupConfigs[netId][lGrpId].WithESTDPtype]); KERNEL_DEBUG("\t\tTAU_PLUS_INV_EXC: %f", groupConfigs[netId][lGrpId].TAU_PLUS_INV_EXC); KERNEL_DEBUG("\t\tTAU_MINUS_INV_EXC: %f", groupConfigs[netId][lGrpId].TAU_MINUS_INV_EXC); KERNEL_DEBUG("\t\tALPHA_PLUS_EXC: %f", groupConfigs[netId][lGrpId].ALPHA_PLUS_EXC); KERNEL_DEBUG("\t\tALPHA_MINUS_EXC: %f", groupConfigs[netId][lGrpId].ALPHA_MINUS_EXC); KERNEL_DEBUG("\t\tI-STDP type: %s", stdpType_string[groupConfigs[netId][lGrpId].WithISTDPtype]); KERNEL_DEBUG("\t\tTAU_PLUS_INV_INB: %f", groupConfigs[netId][lGrpId].TAU_PLUS_INV_INB); KERNEL_DEBUG("\t\tTAU_MINUS_INV_INB: %f", groupConfigs[netId][lGrpId].TAU_MINUS_INV_INB); KERNEL_DEBUG("\t\tALPHA_PLUS_INB: %f", groupConfigs[netId][lGrpId].ALPHA_PLUS_INB); KERNEL_DEBUG("\t\tALPHA_MINUS_INB: %f", groupConfigs[netId][lGrpId].ALPHA_MINUS_INB); KERNEL_DEBUG("\t\tLAMBDA: %f", groupConfigs[netId][lGrpId].LAMBDA); KERNEL_DEBUG("\t\tDELTA: %f", groupConfigs[netId][lGrpId].DELTA); KERNEL_DEBUG("\t\tBETA_LTP: %f", groupConfigs[netId][lGrpId].BETA_LTP); KERNEL_DEBUG("\t\tBETA_LTD: %f", groupConfigs[netId][lGrpId].BETA_LTD); } KERNEL_DEBUG("\tWithSTP: %d", (int)groupConfigs[netId][lGrpId].WithSTP); if (groupConfigs[netId][lGrpId].WithSTP) { KERNEL_DEBUG("\t\tSTP_U: %f", groupConfigs[netId][lGrpId].STP_U); // KERNEL_DEBUG("\t\tSTP_tD: %f",groupConfigs[netId][lGrpId].STP_tD); // KERNEL_DEBUG("\t\tSTP_tF: %f",groupConfigs[netId][lGrpId].STP_tF); } KERNEL_DEBUG("\tspikeGen: %s", groupConfigs[netId][lGrpId].isSpikeGenFunc ? "is Set" : "is not set "); } // allocation of gpu runtime data is done runtimeData[netId].allocated = true; // map the timing table to texture.. saves a lot of headache in using shared memory void* devPtr; size_t offset; CUDA_CHECK_ERRORS(hipGetSymbolAddress(&devPtr, timeTableD2GPU)); CUDA_CHECK_ERRORS(hipBindTexture(&offset, timeTableD2GPU_tex, devPtr, sizeof(int) * TIMING_COUNT)); offset = offset / sizeof(int); CUDA_CHECK_ERRORS(hipGetSymbolAddress(&devPtr, timeTableD2GPU_tex_offset)); CUDA_CHECK_ERRORS(hipMemcpy(devPtr, &offset, sizeof(int), hipMemcpyHostToDevice)); CUDA_CHECK_ERRORS(hipGetSymbolAddress(&devPtr, timeTableD1GPU)); CUDA_CHECK_ERRORS(hipBindTexture(&offset, timeTableD1GPU_tex, devPtr, sizeof(int) * TIMING_COUNT)); offset = offset / sizeof(int); CUDA_CHECK_ERRORS(hipGetSymbolAddress(&devPtr, timeTableD1GPU_tex_offset)); CUDA_CHECK_ERRORS(hipMemcpy(devPtr, &offset, sizeof(int), hipMemcpyHostToDevice)); initGPU(netId); }
50b0ec23cd7973b00b8c72032c9d383e1b50b691.cu
/* * Copyright (c) 2016 Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. The names of its contributors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * *********************************************************************************************** * * CARLsim * created by: (MDR) Micah Richert, (JN) Jayram M. Nageswaran * maintained by: * (MA) Mike Avery <[email protected]> * (MB) Michael Beyeler <[email protected]>, * (KDC) Kristofor Carlson <[email protected]> * (TSC) Ting-Shuo Chou <[email protected]> * (HK) Hirak J Kashyap <[email protected]> * * CARLsim v1.0: JM, MDR * CARLsim v2.0/v2.1/v2.2: JM, MDR, MA, MB, KDC * CARLsim3: MB, KDC, TSC * CARLsim4: TSC, HK * * CARLsim available from http://socsci.uci.edu/~jkrichma/CARLsim/ * Ver 12/31/2016 */ #include <snn.h> #include <spike_buffer.h> #include <error_code.h> #include <cuda_runtime.h> #define NUM_THREADS 128 #define NUM_BLOCKS 64 #define WARP_SIZE 32 /////////////////////////////////////////////////////////////////// // Some important ideas that explains the GPU execution are as follows: // 1. Each GPU block has a local firing table (called fireTable). The block of threads // reads a bunch of neurons parameters and determines if it needs to fire or not // Whenever a neuron need to fire, it keeps track of the fired neuron in the local // table. When the table is full, we go and write back the fireTable to the global // firing table. // 2. Firing information is maintained in two tables globally (timingTable and the globalFiringTable) // for excitatory neuron population and inhibitory neurons. // The globalFiringTable only stores a sequence of id corresponding to fired neurons. // The timingTable store the total number of fired neurons till the current time step t. // These two tables are flushed and adjusted every second. // This approach requires about half of the memory compared to the traditional AER scheme which // stores the firing time and firing id together. // For more details kindly read the enclosed report (report.pdf) in the source directory // // // timeTableD2GPU[0] always is 0 -- index into firingTableD2 // timeTableD2GPU[maxDelay_] -- should be the number of spikes "leftover" from the previous second // timeTableD2GPU[maxDelay_+1]-timeTableD2GPU[maxDelay_] -- should be the number of spikes in the first ms of the current second // timeTableD2GPU[1000+maxDelay_] -- should be the number of spikes in the current second + the leftover spikes. // /////////////////////////////////////////////////////////////////// __device__ unsigned int timeTableD2GPU[TIMING_COUNT]; __device__ unsigned int timeTableD1GPU[TIMING_COUNT]; __device__ unsigned int spikeCountD2SecGPU; __device__ unsigned int spikeCountD1SecGPU; __device__ unsigned int spikeCountD2GPU; __device__ unsigned int spikeCountD1GPU; __device__ unsigned int recBufferIdx; __device__ unsigned int secD2fireCntTest; __device__ unsigned int secD1fireCntTest; __device__ unsigned int spikeCountLastSecLeftD2GPU; __device__ unsigned int spikeCountExtRxD1SecGPU; __device__ unsigned int spikeCountExtRxD2SecGPU; __device__ unsigned int spikeCountExtRxD2GPU; __device__ unsigned int spikeCountExtRxD1GPU; __device__ __constant__ RuntimeData runtimeDataGPU; __device__ __constant__ NetworkConfigRT networkConfigGPU; __device__ __constant__ GroupConfigRT groupConfigsGPU[MAX_GRP_PER_SNN]; __device__ __constant__ float d_mulSynFast[MAX_CONN_PER_SNN]; __device__ __constant__ float d_mulSynSlow[MAX_CONN_PER_SNN]; __device__ int loadBufferCount; __device__ int loadBufferSize; texture <int, 1, cudaReadModeElementType> timeTableD2GPU_tex; texture <int, 1, cudaReadModeElementType> timeTableD1GPU_tex; texture <int, 1, cudaReadModeElementType> groupIdInfo_tex; // groupIDInfo is allocated using cudaMalloc thus doesn't require an offset when using textures __device__ int timeTableD1GPU_tex_offset; __device__ int timeTableD2GPU_tex_offset; // example of the quick synaptic table // index cnt // 0000000 - 0 // 0000001 - 0 // 0000010 - 1 // 0100000 - 5 // 0110000 - 4 int quickSynIdTable[256]; __device__ int quickSynIdTableGPU[256]; void initQuickSynIdTable(int netId) { void* devPtr; for(int i = 1; i < 256; i++) { int cnt = 0; while(i) { if(((i >> cnt) & 1) == 1) break; cnt++; assert(cnt <= 7); } quickSynIdTable[i] = cnt; } cudaSetDevice(netId); cudaGetSymbolAddress(&devPtr, quickSynIdTableGPU); CUDA_CHECK_ERRORS(cudaMemcpy( devPtr, quickSynIdTable, sizeof(quickSynIdTable), cudaMemcpyHostToDevice)); } __device__ inline bool isPoissonGroup(short int lGrpId) { return (groupConfigsGPU[lGrpId].Type & POISSON_NEURON); } __device__ inline void setFiringBitSynapses(int lNId, int synId) { unsigned int* tmp_I_set_p = ((unsigned int*)((char*)runtimeDataGPU.I_set + ((synId >> 5) * networkConfigGPU.I_setPitch)) + lNId); atomicOr(tmp_I_set_p, 1 << (synId % 32)); } __device__ inline unsigned int* getFiringBitGroupPtr(int lNId, int synId) { return (((unsigned int*)((char*)runtimeDataGPU.I_set + synId * networkConfigGPU.I_setPitch)) + lNId); } __device__ inline int getSTPBufPos(int lNId, int simTime) { return (((simTime + 1) % (networkConfigGPU.maxDelay + 1)) * networkConfigGPU.STP_Pitch + lNId); } __device__ inline int2 getStaticThreadLoad(int bufPos) { return (runtimeDataGPU.neuronAllocation[bufPos]); } __device__ inline bool getPoissonSpike(int lNId) { // Random number value is less than the poisson firing probability // if poisson firing probability is say 1.0 then the random poisson ptr // will always be less than 1.0 and hence it will continiously fire return runtimeDataGPU.randNum[lNId - networkConfigGPU.numNReg] * 1000.0f < runtimeDataGPU.poissonFireRate[lNId - networkConfigGPU.numNReg]; } __device__ inline bool getSpikeGenBit(unsigned int nidPos) { const int nidBitPos = nidPos % 32; const int nidIndex = nidPos / 32; return ((runtimeDataGPU.spikeGenBits[nidIndex] >> nidBitPos) & 0x1); } /*! * \brief This device function updates the average firing rate of each neuron, which is required for homeostasis * * \param[in] lNId The neuron id to be updated * \param[in] lGrpId The group id of the neuron */ __device__ inline void updateHomeoStaticState(int lNId, int lGrpId) { // here the homeostasis adjustment runtimeDataGPU.avgFiring[lNId] *= (groupConfigsGPU[lGrpId].avgTimeScale_decay); } /*! * \brief After every time step we update the time table * * Only one cuda thread is required for updating the time table * * \param[in] simTime The current time step */ __global__ void kernel_updateTimeTable(int simTime) { if (threadIdx.x == 0 && blockIdx.x == 0) { timeTableD2GPU[simTime + networkConfigGPU.maxDelay + 1] = spikeCountD2SecGPU + spikeCountLastSecLeftD2GPU; timeTableD1GPU[simTime + networkConfigGPU.maxDelay + 1] = spikeCountD1SecGPU; } __syncthreads(); } ///////////////////////////////////////////////////////////////////////////////// // Device Kernel Function: Intialization of the GPU side of the simulator /// // KERNEL: This kernel is called after initialization of various parameters /// // so that we can reset all required parameters. /// ///////////////////////////////////////////////////////////////////////////////// __global__ void kernel_initGPUMemory() { // FIXME: use parallel access int timeTableIdx = blockIdx.x * blockDim.x + threadIdx.x; if (timeTableIdx < TIMING_COUNT) { timeTableD2GPU[timeTableIdx] = 0; timeTableD1GPU[timeTableIdx] = 0; } if (threadIdx.x == 0 && blockIdx.x == 0) { spikeCountD2SecGPU = 0; spikeCountD1SecGPU = 0; spikeCountD2GPU = 0; spikeCountD1GPU = 0; recBufferIdx = 0; secD2fireCntTest = 0; secD1fireCntTest = 0; spikeCountLastSecLeftD2GPU = 0; spikeCountExtRxD2GPU = 0; spikeCountExtRxD1GPU = 0; spikeCountExtRxD2SecGPU = 0; spikeCountExtRxD1SecGPU = 0; } } // Allocation of the group and its id.. void SNN::allocateGroupId(int netId) { checkAndSetGPUDevice(netId); assert (runtimeData[netId].groupIdInfo == NULL); int3* tempNeuronAllocation = (int3*)malloc(sizeof(int3) * networkConfigs[netId].numGroups); for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { int3 threadLoad; threadLoad.x = groupConfigs[netId][lGrpId].lStartN; threadLoad.y = groupConfigs[netId][lGrpId].lEndN; threadLoad.z = lGrpId; tempNeuronAllocation[lGrpId] = threadLoad; } CUDA_CHECK_ERRORS(cudaMalloc((void**)&runtimeData[netId].groupIdInfo, sizeof(int3) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMemcpy(runtimeData[netId].groupIdInfo, tempNeuronAllocation, sizeof(int3) * networkConfigs[netId].numGroups, cudaMemcpyHostToDevice)); CUDA_CHECK_ERRORS(cudaBindTexture(NULL, groupIdInfo_tex, runtimeData[netId].groupIdInfo, sizeof(int3) * networkConfigs[netId].numGroups)); free(tempNeuronAllocation); } /************************ VARIOUS KERNELS FOR FIRING CALCULATION AND FIRING UPDATE ****************************/ // Static Thread Load Allocation... // This function is necessary for static allocation of load that each CUDA-SM needs for its computation. // We store the static load allocation using the following format // Neuron starting position (32 bit): Group identification (16) : Buffer size (16 bit) // if we have 3 groups. grp(1) = 400 neurons, grp(2) = 100, grp(3) = 600 // The allocated static table will look as follows.. //------------------------- // start | grp | size //------------------------- // 0 : 0 : 256 // 256 : 0 : 144 // 400 : 1 : 100 // 500 : 2 : 256 // 756 : 2 : 256 // 1012 : 2 : 88 //----------------------- int SNN::allocateStaticLoad(int netId, int bufSize) { checkAndSetGPUDevice(netId); // only one thread does the static load table int bufferCnt = 0; for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { int grpBufCnt = (int) ceil(1.0f * groupConfigs[netId][lGrpId].numN / bufSize); assert(grpBufCnt >= 0); bufferCnt += grpBufCnt; KERNEL_DEBUG("Grp Size = %d, Total Buffer Cnt = %d, Buffer Cnt = %d", groupConfigs[netId][lGrpId].numN, bufferCnt, grpBufCnt); } assert(bufferCnt > 0); int2* tempNeuronAllocation = (int2*)malloc(sizeof(int2) * bufferCnt); KERNEL_DEBUG("STATIC THREAD ALLOCATION"); KERNEL_DEBUG("------------------------"); KERNEL_DEBUG("Buffer Size = %d, Buffer Count = %d", bufSize, bufferCnt); bufferCnt = 0; for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId += bufSize) { int2 threadLoad; // starting neuron id is saved... threadLoad.x = lNId; if ((lNId + bufSize - 1) <= groupConfigs[netId][lGrpId].lEndN) // grpID + full size threadLoad.y = (lGrpId + (bufSize << 16)); // can't support group id > 2^16 else // grpID + left-over size threadLoad.y = (lGrpId + ((groupConfigs[netId][lGrpId].lEndN - lNId + 1) << 16)); // can't support group id > 2^16 // fill the static load distribution here... int testGrpId = STATIC_LOAD_GROUP(threadLoad); tempNeuronAllocation[bufferCnt] = threadLoad; KERNEL_DEBUG("%d. Start=%d, size=%d grpId=%d:%s (SpikeMonId=%d) (GroupMonId=%d)", bufferCnt, STATIC_LOAD_START(threadLoad), STATIC_LOAD_SIZE(threadLoad), STATIC_LOAD_GROUP(threadLoad), groupConfigMap[groupConfigs[netId][testGrpId].gGrpId].grpName.c_str(), groupConfigMDMap[groupConfigs[netId][testGrpId].gGrpId].spikeMonitorId, groupConfigMDMap[groupConfigs[netId][testGrpId].gGrpId].groupMonitorId); bufferCnt++; } } assert(runtimeData[netId].allocated == false); // Finally writeback the total bufferCnt // Note down the buffer size for reference KERNEL_DEBUG("GPU loadBufferSize = %d, GPU loadBufferCount = %d", bufSize, bufferCnt); CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(loadBufferCount, &bufferCnt, sizeof(int), 0, cudaMemcpyHostToDevice)); CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(loadBufferSize, &bufSize, sizeof(int), 0, cudaMemcpyHostToDevice)); CUDA_CHECK_ERRORS(cudaMalloc((void**) &runtimeData[netId].neuronAllocation, sizeof(int2) * bufferCnt)); CUDA_CHECK_ERRORS(cudaMemcpy(runtimeData[netId].neuronAllocation, tempNeuronAllocation, sizeof(int2) * bufferCnt, cudaMemcpyHostToDevice)); free(tempNeuronAllocation); return bufferCnt; } ////////////////////////////////////////////////// // 1. KERNELS used when a specific neuron fires // ////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////// // Device local function: Update the STP Variables /// // update the STPU and STPX variable after firing /// ///////////////////////////////////////////////////////////////////////////////// // update the spike-dependent part of du/dt and dx/dt __device__ void firingUpdateSTP (int nid, int simTime, short int grpId) { // we need to retrieve the STP values from the right buffer position (right before vs. right after the spike) int ind_plus = getSTPBufPos(nid, simTime); int ind_minus = getSTPBufPos(nid, (simTime - 1)); // at this point, stpu[ind_plus] has already been assigned, and the decay applied // so add the spike-dependent part to that // du/dt = -u/tau_F + U * (1-u^-) * \delta(t-t_{spk}) runtimeDataGPU.stpu[ind_plus] += groupConfigsGPU[grpId].STP_U * (1.0f - runtimeDataGPU.stpu[ind_minus]); // dx/dt = (1-x)/tau_D - u^+ * x^- * \delta(t-t_{spk}) runtimeDataGPU.stpx[ind_plus] -= runtimeDataGPU.stpu[ind_plus] * runtimeDataGPU.stpx[ind_minus]; } __device__ void resetFiredNeuron(int lNId, short int lGrpId, int simTime) { // \FIXME \TODO: convert this to use coalesced access by grouping into a // single 16 byte access. This might improve bandwidth performance // This is fully uncoalsced access...need to convert to coalsced access.. if (groupConfigsGPU[lGrpId].WithSTDP) runtimeDataGPU.lastSpikeTime[lNId] = simTime; if (networkConfigGPU.sim_with_homeostasis) { // with homeostasis flag can be used here. runtimeDataGPU.avgFiring[lNId] += 1000/(groupConfigsGPU[lGrpId].avgTimeScale*1000); } } /*! * \brief 1. Copy neuron id from local table to global firing table. 2. Reset all neuron properties of neuron id in local table * * * \param[in] fireTablePtr the local shared memory firing table with neuron ids of fired neuron * \param[in] fireCntD2 the number of neurons in local table that has fired with group's max delay == 1 * \param[in] fireCntD1 the number of neurons in local table that has fired with group's max delay > 1 * \param[in] simTime the current time step, stored as neuron firing time entry */ __device__ void updateSpikeCount(volatile unsigned int& fireCnt, volatile unsigned int& fireCntD1, volatile unsigned int& cntD2, volatile unsigned int& cntD1, volatile int& blkErrCode) { int fireCntD2 = fireCnt - fireCntD1; cntD2 = atomicAdd(&secD2fireCntTest, fireCntD2); cntD1 = atomicAdd(&secD1fireCntTest, fireCntD1); //check for overflow in the firing table size.... if(secD2fireCntTest>networkConfigGPU.maxSpikesD2) { blkErrCode = NEW_FIRE_UPDATE_OVERFLOW_ERROR2; return; } else if(secD1fireCntTest>networkConfigGPU.maxSpikesD1) { blkErrCode = NEW_FIRE_UPDATE_OVERFLOW_ERROR1; return; } blkErrCode = 0; // get a distinct counter to store firing info // into the firing table cntD2 = atomicAdd(&spikeCountD2SecGPU, fireCntD2) + spikeCountLastSecLeftD2GPU; cntD1 = atomicAdd(&spikeCountD1SecGPU, fireCntD1); } // update the firing table... __device__ void updateFiringTable(int lNId, short int lGrpId, volatile unsigned int& cntD2, volatile unsigned int& cntD1) { int pos; if (groupConfigsGPU[lGrpId].MaxDelay == 1) { // this group has a delay of only 1 pos = atomicAdd((int*)&cntD1, 1); //runtimeDataGPU.firingTableD1[pos] = SET_FIRING_TABLE(nid, grpId); runtimeDataGPU.firingTableD1[pos] = lNId; } else { // all other groups is dumped here pos = atomicAdd((int*)&cntD2, 1); //runtimeDataGPU.firingTableD2[pos] = SET_FIRING_TABLE(nid, grpId); runtimeDataGPU.firingTableD2[pos] = lNId; } } // update the firing table... __device__ void updateExtFiringTable(int lNId, short int lGrpId) { int pos; if (groupConfigsGPU[lGrpId].MaxDelay == 1) { // this group has a delay of only 1 pos = atomicAdd((int*)&runtimeDataGPU.extFiringTableEndIdxD1[lGrpId], 1); //runtimeDataGPU.firingTableD1[pos] = SET_FIRING_TABLE(nid, grpId); runtimeDataGPU.extFiringTableD1[lGrpId][pos] = lNId + groupConfigsGPU[lGrpId].LtoGOffset; // convert to global neuron id } else { // all other groups is dumped here pos = atomicAdd((int*)&runtimeDataGPU.extFiringTableEndIdxD2[lGrpId], 1); //runtimeDataGPU.firingTableD2[pos] = SET_FIRING_TABLE(nid, grpId); runtimeDataGPU.extFiringTableD2[lGrpId][pos] = lNId + groupConfigsGPU[lGrpId].LtoGOffset; // convert to global neuron id } } __device__ int updateNewFirings(int* fireTablePtr, short int* fireGrpId, volatile unsigned int& fireCnt, volatile unsigned int& fireCntD1, int simTime) { __shared__ volatile unsigned int cntD2; __shared__ volatile unsigned int cntD1; __shared__ volatile int blkErrCode; blkErrCode = 0; if (threadIdx.x == 0) { updateSpikeCount(fireCnt, fireCntD1, cntD2, cntD1, blkErrCode); } __syncthreads(); // if we overflow the spike buffer space that is available, // then we return with an error here... if (blkErrCode) return blkErrCode; for (int i = threadIdx.x; i < fireCnt; i += blockDim.x) { // Read the firing id from the local table..... int lNId = fireTablePtr[i]; updateFiringTable(lNId, fireGrpId[i], cntD2, cntD1); if (groupConfigsGPU[fireGrpId[i]].hasExternalConnect) updateExtFiringTable(lNId, fireGrpId[i]); if (groupConfigsGPU[fireGrpId[i]].WithSTP) firingUpdateSTP(lNId, simTime, fireGrpId[i]); // keep track of number spikes per neuron runtimeDataGPU.nSpikeCnt[lNId]++; // only neurons would do the remaining settings... // pure poisson generators will return without changing anything else.. if (IS_REGULAR_NEURON(lNId, networkConfigGPU.numNReg, networkConfigGPU.numNPois)) resetFiredNeuron(lNId, fireGrpId[i], simTime); } __syncthreads(); return 0; } // zero GPU spike counts __global__ void kernel_resetNSpikeCnt(int lGrpId) { const int totBuffers = loadBufferCount; for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... int2 threadLoad = getStaticThreadLoad(bufPos); int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x); int lastId = STATIC_LOAD_SIZE(threadLoad); int grpId = STATIC_LOAD_GROUP(threadLoad); if ((lGrpId == ALL || lGrpId == grpId) && (nid <= lastId)) { runtimeDataGPU.nSpikeCnt[nid] = 0; } } } // wrapper to call resetSpikeCnt void SNN::resetSpikeCnt_GPU(int netId, int lGrpId) { assert(runtimeData[netId].memType == GPU_MEM); if (lGrpId == ALL) { checkAndSetGPUDevice(netId); CUDA_CHECK_ERRORS(cudaMemset((void*)runtimeData[netId].nSpikeCnt, 0, sizeof(int) * networkConfigs[netId].numN)); } else { checkAndSetGPUDevice(netId); kernel_resetNSpikeCnt<<<NUM_BLOCKS, NUM_THREADS>>>(lGrpId); } } #define LTP_GROUPING_SZ 16 //!< synaptic grouping for LTP Calculation /*! * \brief Computes the STDP update values for each of fired neurons stored in the local firing table. * * \param[in] fireTablePtr the local firing table with neuron ids of fired neuron * \param[in] fireCnt the number of fired neurons in local firing table * \param[in] simTime the current time step, stored as neuron firing time entry */ __device__ void updateLTP(int* fireTablePtr, short int* fireGrpId, volatile unsigned int& fireCnt, int simTime) { for(int pos=threadIdx.x/LTP_GROUPING_SZ; pos < fireCnt; pos += (blockDim.x/LTP_GROUPING_SZ)) { // each neuron has two variable pre and pre_exc // pre: number of pre-neuron // pre_exc: number of neuron had has plastic connections short int grpId = fireGrpId[pos]; // STDP calculation: the post-synaptic neron fires after the arrival of pre-synaptic neuron's spike if (groupConfigsGPU[grpId].WithSTDP) { // MDR, FIXME this probably will cause more thread divergence than need be... int nid = fireTablePtr[pos]; unsigned int end_p = runtimeDataGPU.cumulativePre[nid] + runtimeDataGPU.Npre_plastic[nid]; for(unsigned int p = runtimeDataGPU.cumulativePre[nid] + threadIdx.x % LTP_GROUPING_SZ; p < end_p; p+=LTP_GROUPING_SZ) { int stdp_tDiff = (simTime - runtimeDataGPU.synSpikeTime[p]); if (stdp_tDiff > 0) { if (groupConfigsGPU[grpId].WithESTDP) { // Handle E-STDP curves switch (groupConfigsGPU[grpId].WithESTDPcurve) { case EXP_CURVE: // exponential curve if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_EXC < 25) runtimeDataGPU.wtChange[p] += STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC); break; case TIMING_BASED_CURVE: // sc curve if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_EXC < 25) { if (stdp_tDiff <= groupConfigsGPU[grpId].GAMMA) runtimeDataGPU.wtChange[p] += groupConfigsGPU[grpId].OMEGA + groupConfigsGPU[grpId].KAPPA * STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC); else // stdp_tDiff > GAMMA runtimeDataGPU.wtChange[p] -= STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_EXC, groupConfigsGPU[grpId].TAU_PLUS_INV_EXC); } break; default: break; } } if (groupConfigsGPU[grpId].WithISTDP) { // Handle I-STDP curves switch (groupConfigsGPU[grpId].WithISTDPcurve) { case EXP_CURVE: // exponential curve if (stdp_tDiff * groupConfigsGPU[grpId].TAU_PLUS_INV_INB < 25) { // LTP of inhibitory synapse, which decreases synapse weight runtimeDataGPU.wtChange[p] -= STDP(stdp_tDiff, groupConfigsGPU[grpId].ALPHA_PLUS_INB, groupConfigsGPU[grpId].TAU_PLUS_INV_INB); } break; case PULSE_CURVE: // pulse curve if (stdp_tDiff <= groupConfigsGPU[grpId].LAMBDA) { // LTP of inhibitory synapse, which decreases synapse weight runtimeDataGPU.wtChange[p] -= groupConfigsGPU[grpId].BETA_LTP; } else if (stdp_tDiff <= groupConfigsGPU[grpId].DELTA) { // LTD of inhibitory syanpse, which increase sysnapse weight runtimeDataGPU.wtChange[p] -= groupConfigsGPU[grpId].BETA_LTD; } break; default: break; } } } } } } __syncthreads(); } #define FIRE_CHUNK_CNT 512 /*! * \brief This kernel is responsible for finding the neurons that need to be fired. * * We use a buffered firing table that allows neuron to gradually load * the buffer and make it easy to carry out the calculations in a single group. * A single function is used for simple neurons and also for poisson neurons. * The function also update LTP * * device access: spikeCountD2SecGPU, spikeCountD1SecGPU * net access: numNReg numNPois, numN, sim_with_stdp, sim_in_testing, sim_with_homeostasis, maxSpikesD1, maxSpikesD2 * grp access: Type, spikeGenFunc, Noffset, withSpikeCounter, spkCntBufPos, StartN, WithSTP, avgTimeScale WithSTDP, WithESTDP, WithISTDP, WithESTDPCurve, With ISTDPCurve, all STDP parameters * rtd access: randNum, poissonFireRate, spkCntBuf, nSpikeCnt, voltage, recovery, Izh_c, Izh_d * cumulativePre, Npre_plastic, (R)synSpikeTime, (W)lastSpikeTime, (W)wtChange, * avgFiring */ __global__ void kernel_findFiring (int simTimeMs, int simTime) { __shared__ volatile unsigned int fireCnt; __shared__ volatile unsigned int fireCntTest; __shared__ volatile unsigned int fireCntD1; __shared__ int fireTable[FIRE_CHUNK_CNT]; __shared__ short int fireGrpId[FIRE_CHUNK_CNT]; __shared__ volatile int errCode; if (threadIdx.x == 0) { fireCnt = 0; // initialize total cnt to 0 fireCntD1 = 0; // initialize d1 cnt to 0 fireCntTest = 0; // initialize test cnt to 0 } const int totBuffers=loadBufferCount; __syncthreads(); for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... int2 threadLoad = getStaticThreadLoad(bufPos); int lNId = (STATIC_LOAD_START(threadLoad) + threadIdx.x); int lastLNId = STATIC_LOAD_SIZE(threadLoad); short int lGrpId = STATIC_LOAD_GROUP(threadLoad); bool needToWrite = false; // used by all neuron to indicate firing condition int fireId = 0; // threadId is valid and lies within the lastId..... if ((threadIdx.x < lastLNId) && (lNId < networkConfigGPU.numN)) { // Simple poisson spiker uses the poisson firing probability // to detect whether it has fired or not.... if(isPoissonGroup(lGrpId)) { // spikes generated by spikeGenFunc if(groupConfigsGPU[lGrpId].isSpikeGenFunc) { unsigned int offset = lNId - groupConfigsGPU[lGrpId].lStartN + groupConfigsGPU[lGrpId].Noffset; needToWrite = getSpikeGenBit(offset); } else { // spikes generated by poission rate needToWrite = getPoissonSpike(lNId); } // Note: valid lastSpikeTime of spike gen neurons is required by userDefinedSpikeGenerator() if (needToWrite) runtimeDataGPU.lastSpikeTime[lNId] = simTime; } else { // Regular neuron if (runtimeDataGPU.curSpike[lNId]) { runtimeDataGPU.curSpike[lNId] = false; needToWrite = true; } // log v, u value if any active neuron monitor is presented if (networkConfigGPU.sim_with_nm && lNId - groupConfigsGPU[lGrpId].lStartN < MAX_NEURON_MON_GRP_SZIE) { int idxBase = networkConfigGPU.numGroups * MAX_NEURON_MON_GRP_SZIE * simTimeMs + lGrpId * MAX_NEURON_MON_GRP_SZIE; runtimeDataGPU.nVBuffer[idxBase + lNId - groupConfigsGPU[lGrpId].lStartN] = runtimeDataGPU.voltage[lNId]; runtimeDataGPU.nUBuffer[idxBase + lNId - groupConfigsGPU[lGrpId].lStartN] = runtimeDataGPU.recovery[lNId]; } } } // loop through a few times to ensure that we have added/processed all spikes that need to be written // if the buffer is small relative to the number of spikes needing to be written, we may have to empty the buffer a few times... for (int c = 0; c < 2; c++) { // we first increment fireCntTest to make sure we haven't filled the buffer if (needToWrite) fireId = atomicAdd((int*)&fireCntTest, 1); // if there is a spike and the buffer still has space... if (needToWrite && (fireId <(FIRE_CHUNK_CNT))) { // get our position in the buffer fireId = atomicAdd((int*)&fireCnt, 1); if (groupConfigsGPU[lGrpId].MaxDelay == 1) atomicAdd((int*)&fireCntD1, 1); // store ID of the fired neuron needToWrite = false; fireTable[fireId] = lNId; fireGrpId[fireId] = lGrpId;//setFireProperties(grpId, isInhib); } __syncthreads(); // the local firing table is full. dump the local firing table to the global firing table before proceeding if (fireCntTest >= (FIRE_CHUNK_CNT)) { // clear the table and update... int retCode = updateNewFirings(fireTable, fireGrpId, fireCnt, fireCntD1, simTime); if (retCode != 0) return; // update based on stdp rule // KILLME !!! if (simTime > 0)) if (networkConfigGPU.sim_with_stdp && !networkConfigGPU.sim_in_testing) updateLTP (fireTable, fireGrpId, fireCnt, simTime); // reset counters if (threadIdx.x == 0) { fireCntD1 = 0; fireCnt = 0; fireCntTest = 0; } } } } __syncthreads(); // few more fired neurons are left. we update their firing state here.. if (fireCnt) { int retCode = updateNewFirings(fireTable, fireGrpId, fireCnt, fireCntD1, simTime); if (retCode != 0) return; if (networkConfigGPU.sim_with_stdp && !networkConfigGPU.sim_in_testing) updateLTP(fireTable, fireGrpId, fireCnt, simTime); } } //******************************** UPDATE CONDUCTANCES AND TOTAL SYNAPTIC CURRENT EVERY TIME STEP ***************************** #define LOG_CURRENT_GROUP 5 /*! * \brief Based on the bitvector used for indicating the presence of spike, the global conductance values are updated. * * net access: numNReg, numNPois, I_setPitch, maxDelay, STP_Pitch, sim_with_conductances, sim_with_NMDA_rise, sim_withGABAb_Rise, sNMDA, sGABAb * grp access: WithSTP, STP_A * rtd access: Npre, cumulativePre, I_set, preSynapticIds, grpIds, wt, stpx, stpu, connIdsPreIdx, gAMPA, gGABAa, gNMDA_r, gNMDA_d, gNMDA, gGABAb_r, gGABAb_d, gGABAb * glb access: d_mulSynFast, d_mulSynSlow */ __global__ void kernel_conductanceUpdate (int simTimeMs, int simTimeSec, int simTime) { __shared__ int sh_quickSynIdTable[256]; // Table for quick access for (int i = 0; i < 256; i += blockDim.x) { if ((i + threadIdx.x) < 256) { sh_quickSynIdTable[i + threadIdx.x] = quickSynIdTableGPU[i + threadIdx.x]; } } __syncthreads(); const int totBuffers = loadBufferCount; for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... int2 threadLoad = getStaticThreadLoad(bufPos); int postNId = STATIC_LOAD_START(threadLoad) + threadIdx.x; int lastNId = STATIC_LOAD_SIZE(threadLoad); if ((threadIdx.x < lastNId) && (IS_REGULAR_NEURON(postNId, networkConfigGPU.numNReg, networkConfigGPU.numNPois))) { // P6-1 // load the initial current due to noise inputs for neuron 'post_nid' // initial values of the conductances for neuron 'post_nid' float AMPA_sum = 0.0f; float NMDA_sum = 0.0f; float NMDA_r_sum = 0.0f; float NMDA_d_sum = 0.0f; float GABAa_sum = 0.0f; float GABAb_sum = 0.0f; float GABAb_r_sum = 0.0f; float GABAb_d_sum = 0.0f; int lmt = runtimeDataGPU.Npre[postNId]; unsigned int cum_pos = runtimeDataGPU.cumulativePre[postNId]; // find the total current to this neuron... for (int j = 0; (lmt) && (j <= ((lmt - 1) >> LOG_CURRENT_GROUP)); j++) { // because of malloc2D operation we are using pitch, post_nid, j to get // actual position of the input current.... // int* tmp_I_set_p = ((int*)((char*)runtimeDataGPU.I_set + j * networkConfigGPU.I_setPitch) + post_nid); uint32_t* tmp_I_set_p = getFiringBitGroupPtr(postNId, j); uint32_t tmp_I_set = *tmp_I_set_p; // table lookup based find bits that are set int cnt = 0; int tmp_I_cnt = 0; while (tmp_I_set) { int k = (tmp_I_set >> (8 * cnt)) & 0xff; if (k == 0) { cnt = cnt + 1; continue; } int wt_i = sh_quickSynIdTable[k]; int wtId = (j * 32 + cnt * 8 + wt_i); SynInfo synInfo = runtimeDataGPU.preSynapticIds[cum_pos + wtId]; //uint8_t pre_grpId = GET_CONN_GRP_ID(pre_Id); uint32_t preNId = GET_CONN_NEURON_ID(synInfo); short int preGrpId = runtimeDataGPU.grpIds[preNId]; char type = groupConfigsGPU[preGrpId].Type; // load the synaptic weight for the wtId'th input float change = runtimeDataGPU.wt[cum_pos + wtId]; // Adjust the weight according to STP scaling if (groupConfigsGPU[preGrpId].WithSTP) { int tD = 0; // \FIXME find delay // \FIXME I think pre_nid needs to be adjusted for the delay int ind_minus = getSTPBufPos(preNId, (simTime - tD - 1)); // \FIXME should be adjusted for delay int ind_plus = getSTPBufPos(preNId, (simTime - tD)); // dI/dt = -I/tau_S + A * u^+ * x^- * \delta(t-t_{spk}) change *= groupConfigsGPU[preGrpId].STP_A * runtimeDataGPU.stpx[ind_minus] * runtimeDataGPU.stpu[ind_plus]; } if (networkConfigGPU.sim_with_conductances) { short int connId = runtimeDataGPU.connIdsPreIdx[cum_pos+wtId]; if (type & TARGET_AMPA) AMPA_sum += change * d_mulSynFast[connId]; if (type & TARGET_NMDA) { if (networkConfigGPU.sim_with_NMDA_rise) { NMDA_r_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sNMDA; NMDA_d_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sNMDA; } else { NMDA_sum += change * d_mulSynSlow[connId]; } } if (type & TARGET_GABAa) GABAa_sum += change * d_mulSynFast[connId]; // wt should be negative for GABAa and GABAb if (type & TARGET_GABAb) { // but that is dealt with below if (networkConfigGPU.sim_with_GABAb_rise) { GABAb_r_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sGABAb; GABAb_d_sum += change * d_mulSynSlow[connId] * networkConfigGPU.sGABAb; } else { GABAb_sum += change * d_mulSynSlow[connId]; } } } else { // current based model with STP (CUBA) // updated current for neuron 'post_nid' AMPA_sum += change; } tmp_I_cnt++; tmp_I_set = tmp_I_set & (~(1 << (8 * cnt + wt_i))); } // FIXME: move reset outside kernel for debbuing I_set, resume it later // reset the input if there are any bit'wt set if(tmp_I_cnt) *tmp_I_set_p = 0; __syncthreads(); } __syncthreads(); // P6-2 if (networkConfigGPU.sim_with_conductances) { // don't add mulSynFast/mulSynSlow here, because they depend on the exact pre<->post connection, not // just post_nid runtimeDataGPU.gAMPA[postNId] += AMPA_sum; runtimeDataGPU.gGABAa[postNId] -= GABAa_sum; // wt should be negative for GABAa and GABAb if (networkConfigGPU.sim_with_NMDA_rise) { runtimeDataGPU.gNMDA_r[postNId] += NMDA_r_sum; runtimeDataGPU.gNMDA_d[postNId] += NMDA_d_sum; } else { runtimeDataGPU.gNMDA[postNId] += NMDA_sum; } if (networkConfigGPU.sim_with_GABAb_rise) { runtimeDataGPU.gGABAb_r[postNId] -= GABAb_r_sum; runtimeDataGPU.gGABAb_d[postNId] -= GABAb_d_sum; } else { runtimeDataGPU.gGABAb[postNId] -= GABAb_sum; } } else { runtimeDataGPU.current[postNId] += AMPA_sum; } } } } // single integration step for voltage equation of 4-param Izhikevich __device__ inline float dvdtIzhikevich4(float volt, float recov, float totCurrent, float timeStep = 1.0f) { return (((0.04f * volt + 5.0f) * volt + 140.0f - recov + totCurrent) * timeStep); } // single integration step for recovery equation of 4-param Izhikevich __device__ inline float dudtIzhikevich4(float volt, float recov, float izhA, float izhB, float timeStep = 1.0f) { return (izhA * (izhB * volt - recov) * timeStep); } // single integration step for voltage equation of 9-param Izhikevich __device__ inline float dvdtIzhikevich9(float volt, float recov, float invCapac, float izhK, float voltRest, float voltInst, float totCurrent, float timeStep = 1.0f) { return ((izhK * (volt - voltRest) * (volt - voltInst) - recov + totCurrent) * invCapac * timeStep); } // single integration step for recovery equation of 9-param Izhikevich __device__ inline float dudtIzhikevich9(float volt, float recov, float voltRest, float izhA, float izhB, float timeStep = 1.0f) { return (izhA * (izhB * (volt - voltRest) - recov) * timeStep); } __device__ inline float dvdtLIF(float volt, float lif_vReset, float lif_gain, float lif_bias, int lif_tau_m, float totalCurrent, float timeStep=1.0f){ return ((lif_vReset -volt + ((totalCurrent * lif_gain) + lif_bias))/ (float) lif_tau_m) * timeStep; } __device__ float getCompCurrent_GPU(int grpId, int neurId, float const0 = 0.0f, float const1 = 0.0f) { float compCurrent = 0.0f; for (int k = 0; k<groupConfigsGPU[grpId].numCompNeighbors; k++) { int grpIdOther = groupConfigsGPU[grpId].compNeighbors[k]; int neurIdOther = neurId - groupConfigsGPU[grpId].lStartN + groupConfigsGPU[grpIdOther].lStartN; compCurrent += groupConfigsGPU[grpId].compCoupling[k] * ((runtimeDataGPU.voltage[neurIdOther] + const1) - (runtimeDataGPU.voltage[neurId] + const0)); } return compCurrent; } //************************ UPDATE GLOBAL STATE EVERY TIME STEP *******************************************************// /*! * \brief This device function implements the equations of neuron dynamics * * \param[in] nid The neuron id to be updated * \param[in] grpId The group id of the neuron */ __device__ void updateNeuronState(int nid, int grpId, int simTimeMs, bool lastIteration) { float v = runtimeDataGPU.voltage[nid]; float v_next = runtimeDataGPU.nextVoltage[nid]; float u = runtimeDataGPU.recovery[nid]; float I_sum, NMDAtmp; float gNMDA, gGABAb; float k = runtimeDataGPU.Izh_k[nid]; float vr = runtimeDataGPU.Izh_vr[nid]; float vt = runtimeDataGPU.Izh_vt[nid]; float inverse_C = 1.0f / runtimeDataGPU.Izh_C[nid]; float vpeak = runtimeDataGPU.Izh_vpeak[nid]; float a = runtimeDataGPU.Izh_a[nid]; float b = runtimeDataGPU.Izh_b[nid]; // pre-load LIF parameters int lif_tau_m = runtimeDataGPU.lif_tau_m[nid]; int lif_tau_ref = runtimeDataGPU.lif_tau_ref[nid]; int lif_tau_ref_c = runtimeDataGPU.lif_tau_ref_c[nid]; float lif_vTh = runtimeDataGPU.lif_vTh[nid]; float lif_vReset = runtimeDataGPU.lif_vReset[nid]; float lif_gain = runtimeDataGPU.lif_gain[nid]; float lif_bias = runtimeDataGPU.lif_bias[nid]; const float one_sixth = 1.0f / 6.0f; float timeStep = networkConfigGPU.timeStep; float totalCurrent = runtimeDataGPU.extCurrent[nid]; if (networkConfigGPU.sim_with_conductances) { NMDAtmp = (v + 80.0f) * (v + 80.0f) / 60.0f / 60.0f; gNMDA = (networkConfigGPU.sim_with_NMDA_rise) ? (runtimeDataGPU.gNMDA_d[nid] - runtimeDataGPU.gNMDA_r[nid]) : runtimeDataGPU.gNMDA[nid]; gGABAb = (networkConfigGPU.sim_with_GABAb_rise) ? (runtimeDataGPU.gGABAb_d[nid] - runtimeDataGPU.gGABAb_r[nid]) : runtimeDataGPU.gGABAb[nid]; I_sum = -(runtimeDataGPU.gAMPA[nid] * (v - 0.0f) + gNMDA * NMDAtmp / (1.0f + NMDAtmp) * (v - 0.0f) + runtimeDataGPU.gGABAa[nid] * (v + 70.0f) + gGABAb * (v + 90.0f)); totalCurrent += I_sum; } else { totalCurrent += runtimeDataGPU.current[nid]; } if (groupConfigsGPU[grpId].withCompartments) { totalCurrent += getCompCurrent_GPU(grpId, nid); } switch (networkConfigGPU.simIntegrationMethod) { case FORWARD_EULER: if (!groupConfigsGPU[grpId].withParamModel_9 && !groupConfigsGPU[grpId].isLIF) { // 4-param Izhikevich // update vpos and upos for the current neuron v_next = v + dvdtIzhikevich4(v, u, totalCurrent, timeStep); if (v_next > 30.0f) { // record spike but keep integrating runtimeDataGPU.curSpike[nid] = true; v_next = runtimeDataGPU.Izh_c[nid]; u += runtimeDataGPU.Izh_d[nid]; } } else if(!groupConfigsGPU[grpId].isLIF) { // 9-param Izhikevich // update vpos and upos for the current neuron v_next = v + dvdtIzhikevich9(v, u, inverse_C, k, vr, vt, totalCurrent, timeStep); if (v_next > vpeak) { runtimeDataGPU.curSpike[nid] = true; v_next = runtimeDataGPU.Izh_c[nid]; u += runtimeDataGPU.Izh_d[nid]; } } else{ if (lif_tau_ref_c > 0){ if(lastIteration){ runtimeDataGPU.lif_tau_ref_c[nid] -= 1; v_next = lif_vReset; } } else{ if (v_next > lif_vTh) { runtimeDataGPU.curSpike[nid] = true; v_next = lif_vReset; if(lastIteration){ runtimeDataGPU.lif_tau_ref_c[nid] = lif_tau_ref; } else{ runtimeDataGPU.lif_tau_ref_c[nid] = lif_tau_ref+1; } } else{ v_next = v + dvdtLIF(v, lif_vReset, lif_gain, lif_bias, lif_tau_m, totalCurrent, timeStep); } } } if (groupConfigsGPU[grpId].isLIF){ if (v_next < lif_vReset) v_next = lif_vReset; } else{ if (v_next < -90.0f) v_next = -90.0f; if (!groupConfigsGPU[grpId].withParamModel_9) { u += dudtIzhikevich4(v_next, u, a, b, timeStep); } else { u += dudtIzhikevich9(v_next, u, vr, a, b, timeStep); } } break; case RUNGE_KUTTA4: if (!groupConfigsGPU[grpId].withParamModel_9 && !groupConfigsGPU[grpId].isLIF) { // 4-param Izhikevich float k1 = dvdtIzhikevich4(v, u, totalCurrent, timeStep); float l1 = dudtIzhikevich4(v, u, a, b, timeStep); float k2 = dvdtIzhikevich4(v + k1 / 2.0f, u + l1 / 2.0f, totalCurrent, timeStep); float l2 = dudtIzhikevich4(v + k1 / 2.0f, u + l1 / 2.0f, a, b, timeStep); float k3 = dvdtIzhikevich4(v + k2 / 2.0f, u + l2 / 2.0f, totalCurrent, timeStep); float l3 = dudtIzhikevich4(v + k2 / 2.0f, u + l2 / 2.0f, a, b, timeStep); float k4 = dvdtIzhikevich4(v + k3, u + l3, totalCurrent, timeStep); float l4 = dudtIzhikevich4(v + k3, u + l3, a, b, timeStep); v_next = v + one_sixth * (k1 + 2.0f * k2 + 2.0f * k3 + k4); if (v_next > 30.0f) { // record spike but keep integrating runtimeDataGPU.curSpike[nid] = true; v_next = runtimeDataGPU.Izh_c[nid]; u += runtimeDataGPU.Izh_d[nid]; } if (v_next < -90.0f) v_next = -90.0f; u += one_sixth * (l1 + 2.0f * l2 + 2.0f * l3 + l4); } else if(!groupConfigsGPU[grpId].isLIF){ // 9-param Izhikevich float k1 = dvdtIzhikevich9(v, u, inverse_C, k, vr, vt, totalCurrent, timeStep); float l1 = dudtIzhikevich9(v, u, vr, a, b, timeStep); float k2 = dvdtIzhikevich9(v + k1 / 2.0f, u + l1 / 2.0f, inverse_C, k, vr, vt, totalCurrent, timeStep); float l2 = dudtIzhikevich9(v + k1 / 2.0f, u + l1 / 2.0f, vr, a, b, timeStep); float k3 = dvdtIzhikevich9(v + k2 / 2.0f, u + l2 / 2.0f, inverse_C, k, vr, vt, totalCurrent, timeStep); float l3 = dudtIzhikevich9(v + k2 / 2.0f, u + l2 / 2.0f, vr, a, b, timeStep); float k4 = dvdtIzhikevich9(v + k3, u + l3, inverse_C, k, vr, vt, totalCurrent, timeStep); float l4 = dudtIzhikevich9(v + k3, u + l3, vr, a, b, timeStep); v_next = v + one_sixth * (k1 + 2.0f * k2 + 2.0f * k3 + k4); if (v_next > vpeak) { // record spike but keep integrating runtimeDataGPU.curSpike[nid] = true; v_next = runtimeDataGPU.Izh_c[nid]; u += runtimeDataGPU.Izh_d[nid]; } if (v_next < -90.0f) v_next = -90.0f; u += one_sixth * (l1 + 2.0f * l2 + 2.0f * l3 + l4); } else{ // LIF integration is always FORWARD_EULER if (lif_tau_ref_c > 0){ if(lastIteration){ runtimeDataGPU.lif_tau_ref_c[nid] -= 1; v_next = lif_vReset; } } else{ if (v_next > lif_vTh) { runtimeDataGPU.curSpike[nid] = true; v_next = lif_vReset; if(lastIteration){ runtimeDataGPU.lif_tau_ref_c[nid] = lif_tau_ref; } else{ runtimeDataGPU.lif_tau_ref_c[nid] = lif_tau_ref+1; } } else{ v_next = v + dvdtLIF(v, lif_vReset, lif_gain, lif_bias, lif_tau_m, totalCurrent, timeStep); } } if (v_next < lif_vReset) v_next = lif_vReset; } break; case UNKNOWN_INTEGRATION: default: // unknown integration method assert(false); } if(lastIteration) { if (networkConfigGPU.sim_with_conductances) { runtimeDataGPU.current[nid] = I_sum; } else { // current must be reset here for CUBA and not kernel_STPUpdateAndDecayConductances runtimeDataGPU.current[nid] = 0.0f; } // log i value if any active neuron monitor is presented if (networkConfigGPU.sim_with_nm && nid - groupConfigsGPU[grpId].lStartN < MAX_NEURON_MON_GRP_SZIE) { int idxBase = networkConfigGPU.numGroups * MAX_NEURON_MON_GRP_SZIE * simTimeMs + grpId * MAX_NEURON_MON_GRP_SZIE; runtimeDataGPU.nIBuffer[idxBase + nid - groupConfigsGPU[grpId].lStartN] = totalCurrent; } } runtimeDataGPU.nextVoltage[nid] = v_next; runtimeDataGPU.recovery[nid] = u; } /*! * \brief update neuron state * * This kernel update neurons' membrance potential according to neurons' dynamics model. * This kernel also update variables required by homeostasis * * net access: numN, numNReg, numNPois, sim_with_conductances, sim_with_NMDA_rise, sim_with_GABAb_rise * grp access: WithHomeostasis, avgTimeScale_decay * rtd access: avgFiring, voltage, recovery, gNMDA, gNMDA_r, gNMDA_d, gGABAb, gGABAb_r, gGABAb_d, gAMPA, gGABAa, * current, extCurrent, Izh_a, Izh_b * glb access: */ __global__ void kernel_neuronStateUpdate(int simTimeMs, bool lastIteration) { const int totBuffers = loadBufferCount; // update neuron state for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... int2 threadLoad = getStaticThreadLoad(bufPos); int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x); int lastId = STATIC_LOAD_SIZE(threadLoad); int grpId = STATIC_LOAD_GROUP(threadLoad); if ((threadIdx.x < lastId) && (nid < networkConfigGPU.numN)) { if (IS_REGULAR_NEURON(nid, networkConfigGPU.numNReg, networkConfigGPU.numNPois)) { // P7 // update neuron state here.... updateNeuronState(nid, grpId, simTimeMs, lastIteration); // P8 if (groupConfigsGPU[grpId].WithHomeostasis) updateHomeoStaticState(nid, grpId); } } } } /*! * \brief Update the state of groups, which includes concentration of dopamine currently * * Update the concentration of neuronmodulator * * net access: numGroups * grp access: WithESTDPtype, WithISTDPtype, baseDP, decayDP * rtd access: grpDA, grpDABuffer * glb access: */ __global__ void kernel_groupStateUpdate(int simTime) { // update group state int grpIdx = blockIdx.x * blockDim.x + threadIdx.x; // P9 if (grpIdx < networkConfigGPU.numGroups) { // decay dopamine concentration if ((groupConfigsGPU[grpIdx].WithESTDPtype == DA_MOD || groupConfigsGPU[grpIdx].WithISTDPtype == DA_MOD) && runtimeDataGPU.grpDA[grpIdx] > groupConfigsGPU[grpIdx].baseDP) { runtimeDataGPU.grpDA[grpIdx] *= groupConfigsGPU[grpIdx].decayDP; } runtimeDataGPU.grpDABuffer[grpIdx * 1000 + simTime] = runtimeDataGPU.grpDA[grpIdx]; // log dopamine concentration } } //******************************** UPDATE STP STATE EVERY TIME STEP ********************************************** /*! * \brief This function is called for updat STP and decay coductance every time step * * net access sim_with_conductance, sim_with_NMDA_rise, sim_with_GABAb_rise, numNReg, numNPois, numN, STP_Pitch, maxDelay * grp access WithSTP * rtd access gAMPA, gNMDA_r, gNMDA_d, gNMDA, gBABAa, gGABAb_r, gGABAb_d, gGABAb * rtd access stpu, stpx */ __global__ void kernel_STPUpdateAndDecayConductances (int t, int sec, int simTime) { const int totBuffers = loadBufferCount; for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... int2 threadLoad = getStaticThreadLoad(bufPos); int nid = (STATIC_LOAD_START(threadLoad) + threadIdx.x); int lastId = STATIC_LOAD_SIZE(threadLoad); int grpId = STATIC_LOAD_GROUP(threadLoad); // update the conductane parameter of the current neron if (networkConfigGPU.sim_with_conductances && IS_REGULAR_NEURON(nid, networkConfigGPU.numNReg, networkConfigGPU.numNPois)) { runtimeDataGPU.gAMPA[nid] *= networkConfigGPU.dAMPA; if (networkConfigGPU.sim_with_NMDA_rise) { runtimeDataGPU.gNMDA_r[nid] *= networkConfigGPU.rNMDA; runtimeDataGPU.gNMDA_d[nid] *= networkConfigGPU.dNMDA; } else { runtimeDataGPU.gNMDA[nid] *= networkConfigGPU.dNMDA; } runtimeDataGPU.gGABAa[nid] *= networkConfigGPU.dGABAa; if (networkConfigGPU.sim_with_GABAb_rise) { runtimeDataGPU.gGABAb_r[nid] *= networkConfigGPU.rGABAb; runtimeDataGPU.gGABAb_d[nid] *= networkConfigGPU.dGABAb; } else { runtimeDataGPU.gGABAb[nid] *= networkConfigGPU.dGABAb; } } if (groupConfigsGPU[grpId].WithSTP && (threadIdx.x < lastId) && (nid < networkConfigGPU.numN)) { int ind_plus = getSTPBufPos(nid, simTime); int ind_minus = getSTPBufPos(nid, (simTime-1)); // \FIXME sure? runtimeDataGPU.stpu[ind_plus] = runtimeDataGPU.stpu[ind_minus]*(1.0f-groupConfigsGPU[grpId].STP_tau_u_inv); runtimeDataGPU.stpx[ind_plus] = runtimeDataGPU.stpx[ind_minus] + (1.0f-runtimeDataGPU.stpx[ind_minus])*groupConfigsGPU[grpId].STP_tau_x_inv; } } } //********************************UPDATE SYNAPTIC WEIGHTS EVERY SECOND ************************************************************* /*! * \brief This kernel update synaptic weights * * This kernel is called every second to adjust the timingTable and globalFiringTable * We do the following thing: * 1. We discard all firing information that happened more than 1000-maxDelay_ time step. * 2. We move the firing information that happened in the last 1000-maxDelay_ time step to * the begining of the gloalFiringTable. * 3. We read each value of "wtChange" and update the value of "synaptic weights wt". * We also clip the "synaptic weight wt" to lie within the required range. */ __device__ void updateSynapticWeights(int nid, unsigned int synId, int grpId, float diff_firing, float homeostasisScale, float baseFiring, float avgTimeScaleInv) { // This function does not get called if the neuron group has all fixed weights. // t_twChange is adjusted by stdpScaleFactor based on frequency of weight updates (e.g., 10ms, 100ms, 1s) float t_wt = runtimeDataGPU.wt[synId]; float t_wtChange = runtimeDataGPU.wtChange[synId]; float t_effectiveWtChange = networkConfigGPU.stdpScaleFactor * t_wtChange; float t_maxWt = runtimeDataGPU.maxSynWt[synId]; switch (groupConfigsGPU[grpId].WithESTDPtype) { case STANDARD: if (groupConfigsGPU[grpId].WithHomeostasis) { // this factor is slow t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f); } else { t_wt += t_effectiveWtChange; } break; case DA_MOD: if (groupConfigsGPU[grpId].WithHomeostasis) { t_effectiveWtChange = runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange; t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f); } else { t_wt += runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange; } break; case UNKNOWN_STDP: default: // we shouldn't even be here if !WithSTDP break; } switch (groupConfigsGPU[grpId].WithISTDPtype) { case STANDARD: if (groupConfigsGPU[grpId].WithHomeostasis) { // this factor is slow t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f+fabs(diff_firing)*50.0f); } else { t_wt += t_effectiveWtChange; } break; case DA_MOD: if (groupConfigsGPU[grpId].WithHomeostasis) { t_effectiveWtChange = runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange; t_wt += (diff_firing*t_wt*homeostasisScale + t_effectiveWtChange) * baseFiring * avgTimeScaleInv / (1.0f + fabs(diff_firing)*50.0f); } else { t_wt += runtimeDataGPU.grpDA[grpId] * t_effectiveWtChange; } break; case UNKNOWN_STDP: default: // we shouldn't even be here if !WithSTDP break; } // It's user's choice to decay weight change or not // see setWeightAndWeightChangeUpdate() t_wtChange *= networkConfigGPU.wtChangeDecay; // Check the synapse is excitatory or inhibitory first if (t_maxWt >= 0.0f) { // excitatory synapse if (t_wt >= t_maxWt) t_wt = t_maxWt; if (t_wt < 0.0f) t_wt = 0.0f; } else { // inhibitory synapse if (t_wt <= t_maxWt) t_wt = t_maxWt; if (t_wt > 0.0f) t_wt = 0.0f; } runtimeDataGPU.wt[synId] = t_wt; runtimeDataGPU.wtChange[synId] = t_wtChange; } #define UPWTS_CLUSTERING_SZ 32 /*! * \brief this kernel updates all synaptic weights * * net access: stdpScaleFactor, wtChangeDecay * grp access: homeostasisScale, avgTimeScaleInv, FixedInputWts, WithESTDPtype, WithISTDOtype, WithHomeostasis * rtd access: Npre_plastic, cumulativePre, avgFiring, baseFiringInv, baseFiring, wt, wtChange, maxSynWt * glb access: */ __global__ void kernel_updateWeights() { __shared__ volatile int errCode; __shared__ int startId, lastId, grpId, totBuffers, grpNCnt; __shared__ int2 threadLoad; // added for homeostasis __shared__ float homeostasisScale, avgTimeScaleInv; if(threadIdx.x == 0) { totBuffers = loadBufferCount; grpNCnt = (blockDim.x / UPWTS_CLUSTERING_SZ) + ((blockDim.x % UPWTS_CLUSTERING_SZ) != 0); } __syncthreads(); for (int bufPos = blockIdx.x; bufPos < totBuffers; bufPos += gridDim.x) { // KILLME !!! This can be further optimized .... // instead of reading each neuron group separately ..... // read a whole buffer and use the result ...... // if ( threadIdx.x) { // TSC: this could be a performance bug, 127 threads other than the first thread try to read // threadLoad and wirte homeostatsisScale and avgTimeScaleInv at the same time if (threadIdx.x == 0) { threadLoad = getStaticThreadLoad(bufPos); startId = STATIC_LOAD_START(threadLoad); lastId = STATIC_LOAD_SIZE(threadLoad); grpId = STATIC_LOAD_GROUP(threadLoad); // load homestasis parameters if (groupConfigsGPU[grpId].WithHomeostasis) { homeostasisScale = groupConfigsGPU[grpId].homeostasisScale; avgTimeScaleInv = groupConfigsGPU[grpId].avgTimeScaleInv; } else { homeostasisScale = 0.0f; avgTimeScaleInv = 1.0f; } } __syncthreads(); // the weights are fixed for this group.. so dont make any changes on // the weight and continue to the next set of neurons... if (groupConfigsGPU[grpId].FixedInputWts) continue; int nid = (threadIdx.x / UPWTS_CLUSTERING_SZ) + startId; // update the synaptic weights from the synaptic weight derivatives for(; nid < startId + lastId; nid += grpNCnt) { int Npre_plastic = runtimeDataGPU.Npre_plastic[nid]; unsigned int cumulativePre = runtimeDataGPU.cumulativePre[nid]; float diff_firing = 0.0f; float baseFiring = 0.0f; if (groupConfigsGPU[grpId].WithHomeostasis) { diff_firing = (1.0f - runtimeDataGPU.avgFiring[nid] * runtimeDataGPU.baseFiringInv[nid]); baseFiring = runtimeDataGPU.baseFiring[nid]; } const int threadIdGrp = (threadIdx.x % UPWTS_CLUSTERING_SZ); // use 32 threads to update 32 synapses parallely for(unsigned int synIdOffset = cumulativePre; synIdOffset < cumulativePre + Npre_plastic; synIdOffset += UPWTS_CLUSTERING_SZ) { //excitatory connection change the synaptic weights unsigned int synId = synIdOffset + threadIdGrp; if(synId < cumulativePre + Npre_plastic) { updateSynapticWeights(nid, synId, grpId, diff_firing, homeostasisScale, baseFiring, avgTimeScaleInv); } } } } } //********************************UPDATE TABLES AND COUNTERS EVERY SECOND ************************************************************* /*! * \brief This kernel shift the un-processed firing information in firingTableD2 to the beginning of * firingTableD2 for the next second of simulation. * * net access: maxDelay * grp access: N/A * rtd access: firingTableD2 * glb access: timeTableD2GPU */ __global__ void kernel_shiftFiringTable() { int gnthreads = blockDim.x * gridDim.x; for(int p = timeTableD2GPU[999], k = 0; p < timeTableD2GPU[999 + networkConfigGPU.maxDelay + 1]; p += gnthreads, k += gnthreads) { if ((p + threadIdx.x) < timeTableD2GPU[999 + networkConfigGPU.maxDelay + 1]) runtimeDataGPU.firingTableD2[k + threadIdx.x] = runtimeDataGPU.firingTableD2[p + threadIdx.x]; } } /*! * \brief This kernel shift the un-processed firing information in timeTableD1(D2)GPU to the beginning of * timeTableD1(D2)GPU for the next second of simulation. * * After all the threads/blocks had adjusted the firingTableD1(D2)GPU, we update the timeTableD1(D2)GPU * so that the firing information that happended in the last maxDelay_ time step would become * the first maxDelay_ time step firing information for the next second of simulation. * We also reset/update all spike counters to appropriate values as indicated in the second part * of this kernel. */ __global__ void kernel_shiftTimeTable() { int maxDelay = networkConfigGPU.maxDelay; if(blockIdx.x == 0) { for(int i = threadIdx.x; i < maxDelay; i += blockDim.x) { // use i+1 instead of just i because timeTableD2GPU[0] should always be 0 timeTableD2GPU[i + 1] = timeTableD2GPU[1000 + i + 1] - timeTableD2GPU[1000]; timeTableD1GPU[i + 1] = timeTableD1GPU[1000 + i + 1] - timeTableD1GPU[1000]; } } __syncthreads(); // reset various counters for the firing information if((blockIdx.x == 0) && (threadIdx.x == 0)) { timeTableD1GPU[maxDelay] = 0; spikeCountD2GPU += spikeCountD2SecGPU; spikeCountD1GPU += spikeCountD1SecGPU; spikeCountD2SecGPU = 0; spikeCountD1SecGPU = 0; spikeCountExtRxD2SecGPU = 0; spikeCountExtRxD1SecGPU = 0; spikeCountLastSecLeftD2GPU = timeTableD2GPU[maxDelay]; secD2fireCntTest = timeTableD2GPU[maxDelay]; secD1fireCntTest = 0; } } //****************************** GENERATE POST-SYNAPTIC CURRENT EVERY TIME-STEP **************************** /* * The sequence of handling an post synaptic spike in GPU mode: * P1. Update synSpikeTime * P2. Update DA,5HT,ACh,NE accordingly * P3. Update STDP wtChange * P4. Load wt into change (temporary variable) * P5. Modulate change by STP (if enabled) * P6-1. Modulate change by d_mulSynSlow and d_mulSynFast * P6-2. Accumulate g(AMPA,NMDA,GABAa,GABAb) or current * P7. Update v(voltage), u(recovery) * P8. Update homeostasis * P9. Decay and log DA,5HT,ACh,NE */ __device__ void generatePostSynapticSpike(int simTime, int preNId, int postNId, int synId) { // get the actual position of the synapses and other variables... unsigned int pos = runtimeDataGPU.cumulativePre[postNId] + synId; short int preGrpId = runtimeDataGPU.grpIds[preNId]; // STP uses preGrpId short int postGrpId = runtimeDataGPU.grpIds[postNId]; // STDP uses postGrpId setFiringBitSynapses(postNId, synId); // P1 runtimeDataGPU.synSpikeTime[pos] = simTime; //uncoalesced access // P2 // Got one spike from dopaminergic neuron, increase dopamine concentration in the target area if (groupConfigsGPU[preGrpId].Type & TARGET_DA) { atomicAdd(&(runtimeDataGPU.grpDA[postGrpId]), 0.04f); } // P3 // STDP calculation: the post-synaptic neuron fires before the arrival of pre-synaptic neuron's spike if (groupConfigsGPU[postGrpId].WithSTDP && !networkConfigGPU.sim_in_testing) { int stdp_tDiff = simTime - runtimeDataGPU.lastSpikeTime[postNId]; if (stdp_tDiff >= 0) { if (groupConfigsGPU[postGrpId].WithESTDP) { // Handle E-STDP curves switch (groupConfigsGPU[postGrpId].WithESTDPcurve) { case EXP_CURVE: // exponential curve case TIMING_BASED_CURVE: // sc curve if (stdp_tDiff * groupConfigsGPU[postGrpId].TAU_MINUS_INV_EXC < 25.0f) runtimeDataGPU.wtChange[pos] += STDP(stdp_tDiff, groupConfigsGPU[postGrpId].ALPHA_MINUS_EXC, groupConfigsGPU[postGrpId].TAU_MINUS_INV_EXC); // uncoalesced access break; default: break; } } if (groupConfigsGPU[postGrpId].WithISTDP) { // Handle I-STDP curves switch (groupConfigsGPU[postGrpId].WithISTDPcurve) { case EXP_CURVE: // exponential curve if ((stdp_tDiff * groupConfigsGPU[postGrpId].TAU_MINUS_INV_INB) < 25.0f) { // LTD of inhibitory syanpse, which increase synapse weight runtimeDataGPU.wtChange[pos] -= STDP(stdp_tDiff, groupConfigsGPU[postGrpId].ALPHA_MINUS_INB, groupConfigsGPU[postGrpId].TAU_MINUS_INV_INB); } break; case PULSE_CURVE: // pulse curve if (stdp_tDiff <= groupConfigsGPU[postGrpId].LAMBDA) { // LTP of inhibitory synapse, which decreases synapse weight runtimeDataGPU.wtChange[pos] -= groupConfigsGPU[postGrpId].BETA_LTP; } else if (stdp_tDiff <= groupConfigsGPU[postGrpId].DELTA) { // LTD of inhibitory syanpse, which increase synapse weight runtimeDataGPU.wtChange[pos] -= groupConfigsGPU[postGrpId].BETA_LTD; } break; default: break; } } } } } #define READ_CHUNK_SZ 64 /*! * \brief This kernel updates and generates spikes for delays greater than 1 from the fired neuron. * * The LTD computation is also executed by this kernel. * * net access: maxDelay, I_setPitch, sim_in_testing * grp access: Type, WithSTDP, WithESTDP, WithESTDPcurve, WithISDP, WithISTDPcurve, all STDP parameters * rtd access: firingTableD2, cumulativePost, postDelayInfo, postSynapticIds, cumulativePre, grpIds, * grpDA, I_set, (W)synSpikeTime, (R)lastSpikeTime, wtChange * glb access: spikeCountD2SecGPU, timeTableD2GPU_tex, timeTableD2GPU_tex_offset */ __global__ void kernel_doCurrentUpdateD2(int simTimeMs, int simTimeSec, int simTime) { __shared__ volatile int sh_neuronOffsetTable[READ_CHUNK_SZ + 2]; __shared__ int sh_delayLength[READ_CHUNK_SZ + 2]; __shared__ int sh_delayIndexStart[READ_CHUNK_SZ + 2]; __shared__ int sh_firingId[READ_CHUNK_SZ + 2]; __shared__ volatile int sh_NeuronCnt; const int threadIdWarp = (threadIdx.x % WARP_SIZE); const int warpId = (threadIdx.x / WARP_SIZE); // this variable is used to record the // number of updates done by different blocks if(threadIdx.x<=0) { sh_NeuronCnt = 0; } __syncthreads(); // stores the number of fired neurons at time t int k = tex1Dfetch(timeTableD2GPU_tex, simTimeMs + networkConfigGPU.maxDelay + 1 + timeTableD2GPU_tex_offset) - 1; // stores the number of fired neurons at time (t - maxDelay_) int k_end = tex1Dfetch(timeTableD2GPU_tex, simTimeMs + 1 + timeTableD2GPU_tex_offset); int t_pos = simTimeMs; // we need to read (k-k_end) neurons from the firing // table and do necesary updates for all these post-synaptic // connection in these neurons.. while ((k >= k_end) && (k >= 0)) { // at any point of time EXCIT_READ_CHUNK_SZ neurons // read different firing id from the firing table if (threadIdx.x < READ_CHUNK_SZ) { // use 64 threads int fPos = k - (READ_CHUNK_SZ * blockIdx.x) - threadIdx.x; if ((fPos >= 0) && (fPos >= k_end)) { // get the neuron nid here.... //int val = runtimeDataGPU.firingTableD2[fPos]; //int nid = GET_FIRING_TABLE_NID(val); int nid = runtimeDataGPU.firingTableD2[fPos]; // find the time of firing based on the firing number fPos while (!((fPos >= tex1Dfetch(timeTableD2GPU_tex, t_pos + networkConfigGPU.maxDelay + timeTableD2GPU_tex_offset)) && (fPos < tex1Dfetch(timeTableD2GPU_tex, t_pos + networkConfigGPU.maxDelay + 1 + timeTableD2GPU_tex_offset)))) { t_pos--; } // find the time difference between firing of the neuron and the current time int tD = simTimeMs - t_pos; // find the various delay parameters for neuron 'nid', with a delay of 'tD' //sh_axonDelay[threadIdx.x] = tD; int tPos = (networkConfigGPU.maxDelay + 1) * nid + tD; //sh_firingId[threadIdx.x] = val; sh_firingId[threadIdx.x] = nid; sh_neuronOffsetTable[threadIdx.x] = runtimeDataGPU.cumulativePost[nid]; sh_delayLength[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_length; sh_delayIndexStart[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_index_start; // This is to indicate that the current thread // has a valid delay parameter for post-synaptic firing generation atomicAdd((int*)&sh_NeuronCnt, 1); } } __syncthreads(); // if cnt is zero than no more neurons need to generate // post-synaptic firing, then we break the loop. if (sh_NeuronCnt == 0) { break; } // first WARP_SIZE threads the post synaptic // firing for first neuron, and so on. each of this group // needs to generate (numPostSynapses/maxDelay_) spikes for every fired neuron, every second // for numPostSynapses=500,maxDelay_=20, we need to generate 25 spikes for each fired neuron // for numPostSynapses=600,maxDelay_=20, we need to generate 30 spikes for each fired neuron for (int pos = warpId; pos < sh_NeuronCnt; pos += (NUM_THREADS / WARP_SIZE)) { int delId = threadIdWarp; while (delId < sh_delayLength[pos]) { // get the post synaptic information for specific delay SynInfo postInfo = runtimeDataGPU.postSynapticIds[sh_neuronOffsetTable[pos] + sh_delayIndexStart[pos] + delId]; int postNId = GET_CONN_NEURON_ID(postInfo); // get post-neuron id int synId = GET_CONN_SYN_ID(postInfo); // get synaptic id if (postNId < networkConfigGPU.numN) // test if post-neuron is a local neuron generatePostSynapticSpike(simTime, sh_firingId[pos] /* preNId */, postNId, synId); delId += WARP_SIZE; } } //(for all excitory neurons in table) __syncthreads(); if(threadIdx.x == 0) { sh_NeuronCnt = 0; } k = k - (gridDim.x * READ_CHUNK_SZ); __syncthreads(); } __syncthreads(); } /*! * \brief This kernel updating and generating spikes on connections with a delay of 1ms from the fired neuron. * * This function looks mostly like kernel_doCurrentUpdateD2() but has been optimized for a fixed delay of 1ms. * Ultimately we may merge this kernel with the kernel_doCurrentUpdateD2(). * The LTD computation is also executed by this kernel. * * net access: maxDelay, I_setPitch, sim_in_testing * grp access: Type, grpDA, WithSTDP, WithESTDP, WithISTDP, WithESTDPcurve, WithISTDPcurve, all STDP parameters * rtd access: postSynapticIds, cumulativePre, grpIds, I_set, wtChange, (R)lastSpikeTime, (W)synSpikeTime * glb access: timeTableD1GPU, spikeCountD1SecGPU, firingTableD1 */ __global__ void kernel_doCurrentUpdateD1(int simTimeMs, int simTimeSec, int simTime) { __shared__ volatile int sh_NeuronCnt; __shared__ volatile int sh_neuronOffsetTable[NUM_THREADS / WARP_SIZE + 2]; __shared__ int sh_delayLength[NUM_THREADS / WARP_SIZE + 2]; __shared__ int sh_firingId[NUM_THREADS / WARP_SIZE + 2]; __shared__ int sh_delayIndexStart[NUM_THREADS / WARP_SIZE + 2]; __shared__ int sh_timing; __shared__ int kPosEnd; const int warpId = threadIdx.x / WARP_SIZE; // warp id const int numWarps = blockDim.x / WARP_SIZE; // number of warp const int threadIdWarp = threadIdx.x % WARP_SIZE; // thread id within a warp // load the time table for neuron firing if (threadIdx.x == 0) { sh_timing = timeTableD1GPU[simTimeMs + networkConfigGPU.maxDelay]; // number of fired neurons at simTimeMs - 1 kPosEnd = timeTableD1GPU[simTimeMs + networkConfigGPU.maxDelay + 1]; // number of fired neurons at simTimeMs, which is equal to spikeCountD1SecGPU } __syncthreads(); int kPos = sh_timing + (blockIdx.x * numWarps); __syncthreads(); // Do current update as long as we have some valid neuron while ((kPos >= 0) && (kPos < kPosEnd)) { int fPos = -1; // a group of threads (4 threads) loads the delay information if (threadIdx.x < numWarps) { sh_neuronOffsetTable[threadIdx.x] = -1; fPos = kPos + threadIdx.x; // find the neuron nid and also delay information from fPos if ((fPos >= 0) && (fPos < kPosEnd)) { atomicAdd((int*)&sh_NeuronCnt, 1); //int val = runtimeDataGPU.firingTableD1[fPos]; //int nid = GET_FIRING_TABLE_NID(val); int nid = runtimeDataGPU.firingTableD1[fPos]; int tPos = (networkConfigGPU.maxDelay + 1) * nid; //sh_firingId[threadIdx.x] = val; sh_firingId[threadIdx.x] = nid; sh_neuronOffsetTable[threadIdx.x] = runtimeDataGPU.cumulativePost[nid]; sh_delayLength[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_length; sh_delayIndexStart[threadIdx.x] = runtimeDataGPU.postDelayInfo[tPos].delay_index_start; } } __syncthreads(); // no more fired neuron from table... we just break from loop if (sh_NeuronCnt == 0) { break; } __syncthreads(); int offset = sh_neuronOffsetTable[warpId]; if (threadIdx.x == 0) { sh_NeuronCnt = 0; } // 32 threads for generatePostSynapticSpike() if (offset >= 0) { int delId = threadIdWarp; while (delId < sh_delayLength[warpId]) { // get the post synaptic information for specific delay SynInfo postInfo = runtimeDataGPU.postSynapticIds[offset + sh_delayIndexStart[warpId] + delId]; int postNId = GET_CONN_NEURON_ID(postInfo); // get post-neuron id int synId = GET_CONN_SYN_ID(postInfo); // get synaptic id if (postNId < networkConfigGPU.numN) // test if post-neuron is a local neuron generatePostSynapticSpike(simTime, sh_firingId[warpId] /* preNId */, postNId, synId); delId += WARP_SIZE; } } __syncthreads(); kPos = kPos + (gridDim.x * numWarps); } } __global__ void kernel_convertExtSpikesD2(int startIdx, int endIdx, int GtoLOffset) { int firingTableIdx = startIdx + blockIdx.x * blockDim.x + threadIdx.x; int spikeCountExtRx = endIdx - startIdx; // received external spike count if (threadIdx.x == 0 && blockIdx.x == 0) { secD2fireCntTest += spikeCountExtRx; spikeCountD2SecGPU += spikeCountExtRx; spikeCountExtRxD2GPU += spikeCountExtRx; spikeCountExtRxD2SecGPU += spikeCountExtRx; } // FIXME: if endIdx - startIdx > 64 * 128 if (firingTableIdx < endIdx) runtimeDataGPU.firingTableD2[firingTableIdx] += GtoLOffset; } __global__ void kernel_convertExtSpikesD1(int startIdx, int endIdx, int GtoLOffset) { int firingTableIdx = startIdx + blockIdx.x * blockDim.x + threadIdx.x; int spikeCountExtRx = endIdx - startIdx; // received external spike count if (threadIdx.x == 0 && blockIdx.x == 0) { secD1fireCntTest += spikeCountExtRx; spikeCountD1SecGPU += spikeCountExtRx; spikeCountExtRxD1GPU += spikeCountExtRx; spikeCountExtRxD1SecGPU += spikeCountExtRx; } // FIXME: if endIdx - startIdx > 64 * 128 if (firingTableIdx < endIdx) runtimeDataGPU.firingTableD1[firingTableIdx] += GtoLOffset; } /*! * \brief this function allocates device (GPU) memory sapce and copies information of pre-connections to it * * This function: * initialize Npre_plasticInv * (allocate and) copy Npre, Npre_plastic, Npre_plasticInv, cumulativePre, preSynapticIds * (allocate and) copy Npost, cumulativePost, postSynapticIds, postDelayInfo * * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copying * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU * \since v4.0 */ void SNN::copyPreConnectionInfo(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated.. int lengthN, lengthSyn, posN, posSyn; if (lGrpId == ALL) { lengthN = networkConfigs[netId].numNAssigned; posN = 0; } else { lengthN = groupConfigs[netId][lGrpId].numN; posN = groupConfigs[netId][lGrpId].lStartN; } // connection synaptic lengths and cumulative lengths... if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Npre, sizeof(short) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Npre[posN], &src->Npre[posN], sizeof(short) * lengthN, kind)); // we don't need these data structures if the network doesn't have any plastic synapses at all if (!sim_with_fixedwts) { // presyn excitatory connections if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Npre_plastic, sizeof(short) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Npre_plastic[posN], &src->Npre_plastic[posN], sizeof(short) * lengthN, kind)); // Npre_plasticInv is only used on GPUs, only allocate and copy it during initialization if(allocateMem) { float* Npre_plasticInv = new float[networkConfigs[netId].numNAssigned]; for (int i = 0; i < networkConfigs[netId].numNAssigned; i++) Npre_plasticInv[i] = 1.0f / managerRuntimeData.Npre_plastic[i]; CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Npre_plasticInv, sizeof(float) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->Npre_plasticInv, Npre_plasticInv, sizeof(float) * networkConfigs[netId].numNAssigned, kind)); delete[] Npre_plasticInv; } } // beginning position for the pre-synaptic information if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->cumulativePre, sizeof(int) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->cumulativePre[posN], &src->cumulativePre[posN], sizeof(int) * lengthN, kind)); // Npre, cumulativePre has been copied to destination if (lGrpId == ALL) { lengthSyn = networkConfigs[netId].numPreSynNet; posSyn = 0; } else { lengthSyn = 0; for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) lengthSyn += dest->Npre[lNId]; posSyn = dest->cumulativePre[groupConfigs[netId][lGrpId].lStartN]; } if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->preSynapticIds, sizeof(SynInfo) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->preSynapticIds[posSyn], &src->preSynapticIds[posSyn], sizeof(SynInfo) * lengthSyn, kind)); } /*! * \brief this function allocates device (GPU) memory sapce and copies information of post-connections to it * * This function: * (allocate and) copy Npost, cumulativePost, postSynapticIds, postDelayInfo * * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copying * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU * \since v4.0 */ void SNN::copyPostConnectionInfo(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated.. int lengthN, lengthSyn, posN, posSyn; if (lGrpId == ALL) { lengthN = networkConfigs[netId].numNAssigned; posN = 0; } else { lengthN = groupConfigs[netId][lGrpId].numN; posN = groupConfigs[netId][lGrpId].lStartN; } // number of postsynaptic connections if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Npost, sizeof(short) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Npost[posN], &src->Npost[posN], sizeof(short) * lengthN, kind)); // beginning position for the post-synaptic information if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->cumulativePost, sizeof(int) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->cumulativePost[posN], &src->cumulativePost[posN], sizeof(int) * lengthN, kind)); // Npost, cumulativePost has been copied to destination if (lGrpId == ALL) { lengthSyn = networkConfigs[netId].numPostSynNet; posSyn = 0; } else { lengthSyn = 0; for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) lengthSyn += dest->Npost[lNId]; posSyn = dest->cumulativePost[groupConfigs[netId][lGrpId].lStartN]; } // actual post synaptic connection information... if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->postSynapticIds, sizeof(SynInfo) * networkConfigs[netId].numPostSynNet)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->postSynapticIds[posSyn], &src->postSynapticIds[posSyn], sizeof(SynInfo) * lengthSyn, kind)); // static specific mapping and actual post-synaptic delay metric if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->postDelayInfo, sizeof(DelayInfo) * networkConfigs[netId].numNAssigned * (glbNetworkConfig.maxDelay + 1))); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->postDelayInfo[posN * (glbNetworkConfig.maxDelay + 1)], &src->postDelayInfo[posN * (glbNetworkConfig.maxDelay + 1)], sizeof(DelayInfo) * lengthN * (glbNetworkConfig.maxDelay + 1), kind)); } void SNN::checkDestSrcPtrs(RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int lGrpId, int destOffset) { // source should always be allocated assert(src->allocated); if(kind == cudaMemcpyHostToDevice) { assert(src->memType == CPU_MEM); assert(dest->memType == GPU_MEM); if (allocateMem) { assert(!dest->allocated); // if allocateMem = true, then the destination must be empty without allocation. assert(lGrpId == ALL); // if allocateMem = true, then we should not specify any specific group. } else { assert(dest->allocated); // if allocateMem = false, then the destination must be allocated. } assert(destOffset == 0); // H-to-D only allows local-to-local copy } else if (kind == cudaMemcpyDeviceToHost) { assert(src->memType == GPU_MEM); assert(dest->memType == CPU_MEM); assert(dest->allocated); if (lGrpId == ALL) assert(destOffset == 0); // if copy all content, only local-to-local is allowed } else { KERNEL_ERROR("Wrong Host-Device copy direction"); exitSimulation(1); } } /*! * \brief this function allocates device (GPU) memory sapce and copies AMPA conductance to it * * This function: * (allocate and) copy gAMPA * * This funcion is called by copyNeuronState() and fetchConductanceAMPA(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copy * \param[in] destOffset the offset of data destination, which is used in local-to-global copy * * \sa copyNeuronState fetchConductanceAMPA * \since v3.0 */ void SNN::copyConductanceAMPA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated.. assert(isSimulationWithCOBA()); int ptrPos, length; if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } assert(length <= networkConfigs[netId].numNReg); assert(length > 0); //conductance information assert(src->gAMPA != NULL); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->gAMPA, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gAMPA[ptrPos + destOffset], &src->gAMPA[ptrPos], sizeof(float) * length, kind)); } /*! * \brief this function allocates device (GPU) memory sapce and copies NMDA conductance to it * * This function: * (allocate and) copy gNMDA, gNMDA_r, gNMDA_d * * This funcion is called by copyNeuronState() and fetchConductanceNMDA(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copy * \param[in] destOffset the offset of data destination, which is used in local-to-global copy * * \sa copyNeuronState fetchConductanceNMDA * \since v3.0 */ void SNN::copyConductanceNMDA(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated.. assert(isSimulationWithCOBA()); int ptrPos, length; if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } assert(length <= networkConfigs[netId].numNReg); assert(length > 0); if (isSimulationWithNMDARise()) { assert(src->gNMDA_r != NULL); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->gNMDA_r, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gNMDA_r[ptrPos], &src->gNMDA_r[ptrPos], sizeof(float) * length, kind)); assert(src->gNMDA_d != NULL); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->gNMDA_d, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gNMDA_d[ptrPos], &src->gNMDA_d[ptrPos], sizeof(float) * length, kind)); } else { assert(src->gNMDA != NULL); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->gNMDA, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gNMDA[ptrPos + destOffset], &src->gNMDA[ptrPos], sizeof(float) * length, kind)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies GABAa conductance to it * * This function: * (allocate and) copy gGABAa * * This funcion is called by copyNeuronState() and fetchConductanceGABAa(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copy * \param[in] destOffset the offset of data destination, which is used in local-to-global copy * * \sa copyNeuronState fetchConductanceGABAa * \since v3.0 */ void SNN::copyConductanceGABAa(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset); // check that the destination pointer is properly allocated.. assert(isSimulationWithCOBA()); int ptrPos, length; if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } assert(length <= networkConfigs[netId].numNReg); assert(length > 0); assert(src->gGABAa != NULL); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->gGABAa, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gGABAa[ptrPos + destOffset], &src->gGABAa[ptrPos], sizeof(float) * length, kind)); } /*! * \brief this function allocates device (GPU) memory sapce and copies GABAb conductance to it * * This function: * (allocate and) copy gGABAb, gGABAb_r, gGABAb_d * * This funcion is called by copyNeuronState() and fetchConductanceGABAb(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copy * \param[in] destOffset the offset of data destination, which is used in local-to-global copy * * \sa copyNeuronState fetchConductanceGABAb * \since v3.0 */ void SNN::copyConductanceGABAb(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset); // check that the destination pointer is properly allocated.. assert(isSimulationWithCOBA()); int ptrPos, length; if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } assert(length <= networkConfigs[netId].numNReg); assert(length > 0); if (isSimulationWithGABAbRise()) { assert(src->gGABAb_r != NULL); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->gGABAb_r, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gGABAb_r[ptrPos], &src->gGABAb_r[ptrPos], sizeof(float) * length, kind)); assert(src->gGABAb_d != NULL); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->gGABAb_d, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gGABAb_d[ptrPos], &src->gGABAb_d[ptrPos], sizeof(float) * length, kind)); } else { assert(src->gGABAb != NULL); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->gGABAb, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->gGABAb[ptrPos + destOffset], &src->gGABAb[ptrPos], sizeof(float) * length, kind)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies variables related to nueron state to it * * This function: * (allocate and) copy voltage, recovery, current, avgFiring * * This funcion is called by allocateSNN_GPU(). Only copying from host to device is required * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU fetchNeuronState * \since v3.0 */ void SNN::copyNeuronState(int netId, int lGrpId, RuntimeData* dest, cudaMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated.. assert(kind == cudaMemcpyHostToDevice); int ptrPos, length; if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } assert(length <= networkConfigs[netId].numNReg); if (length == 0) return; if(!allocateMem && groupConfigs[netId][lGrpId].Type & POISSON_NEURON) return; if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->recovery, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->recovery[ptrPos], &managerRuntimeData.recovery[ptrPos], sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->voltage, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->voltage[ptrPos], &managerRuntimeData.voltage[ptrPos], sizeof(float) * length, cudaMemcpyHostToDevice)); if (allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->nextVoltage, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->nextVoltage[ptrPos], &managerRuntimeData.nextVoltage[ptrPos], sizeof(float) * length, cudaMemcpyHostToDevice)); //neuron input current... if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->current, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->current[ptrPos], &managerRuntimeData.current[ptrPos], sizeof(float) * length, cudaMemcpyHostToDevice)); if (sim_with_conductances) { //conductance information copyConductanceAMPA(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, 0); copyConductanceNMDA(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, 0); copyConductanceGABAa(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, 0); copyConductanceGABAb(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, 0); } // copying external current needs to be done separately because setExternalCurrent needs to call it, too // do it only from host to device copyExternalCurrent(netId, lGrpId, dest, cudaMemcpyHostToDevice, allocateMem); if (allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->curSpike, sizeof(bool) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->curSpike[ptrPos], &managerRuntimeData.curSpike[ptrPos], sizeof(bool) * length, cudaMemcpyHostToDevice)); copyNeuronParameters(netId, lGrpId, dest, cudaMemcpyHostToDevice, allocateMem); if (networkConfigs[netId].sim_with_nm) copyNeuronStateBuffer(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem); if (sim_with_homeostasis) { //Included to enable homeostasis in GPU_MODE. // Avg. Firing... if (allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->avgFiring, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->avgFiring[ptrPos], &managerRuntimeData.avgFiring[ptrPos], sizeof(float) * length, cudaMemcpyHostToDevice)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies the spike count of each neuron to it * * This function: * (allocate and) copy nSpikeCnt * * This funcion is called by copyAuxiliaryData() and fetchNeuronSpikeCount(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copy * \param[in] destOffset the offset of data destination, which is used in local-to-global copy * * \sa copyAuxiliaryData fetchNeuronSpikeCount * \since v4.0 */ void SNN::copyNeuronSpikeCount(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem, int destOffset) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, destOffset);// check that the destination pointer is properly allocated.. int posN, lengthN; if(lGrpId == ALL) { posN = 0; lengthN = networkConfigs[netId].numN; } else { posN = groupConfigs[netId][lGrpId].lStartN; lengthN = groupConfigs[netId][lGrpId].numN; } assert(lengthN > 0 && lengthN <= networkConfigs[netId].numN); // spike count information if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->nSpikeCnt, sizeof(int) * lengthN)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->nSpikeCnt[posN + destOffset], &src->nSpikeCnt[posN], sizeof(int) * lengthN, kind)); } // FIXME: move grpDA(5HT, ACh, NE)Buffer to copyAuxiliaryData /*! * \brief this function allocates device (GPU) memory sapce and copies variables related to group state to it * * This function: * (allocate and) copy grpDA, grp5HT, grpACh, grpNE, grpDABuffer, grp5HTBuffer, grpAChBuffer, grpNEBuffer * * This funcion is called by allocateSNN_GPU() and fetchGroupState(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copying * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU fetchGroupState * \since v3.0 */ void SNN::copyGroupState(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated.. if (allocateMem) { assert(dest->memType == GPU_MEM && !dest->allocated); CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpDA, sizeof(float) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grp5HT, sizeof(float) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpACh, sizeof(float) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpNE, sizeof(float) * networkConfigs[netId].numGroups)); } CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpDA, src->grpDA, sizeof(float) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->grp5HT, src->grp5HT, sizeof(float) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpACh, src->grpACh, sizeof(float) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpNE, src->grpNE, sizeof(float) * networkConfigs[netId].numGroups, kind)); if (lGrpId < 0) { if (allocateMem) { assert(dest->memType == GPU_MEM && !dest->allocated); CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpDABuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grp5HTBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpAChBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMalloc((void**) &dest->grpNEBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups)); } CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpDABuffer, src->grpDABuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->grp5HTBuffer, src->grp5HTBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpAChBuffer, src->grpAChBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpNEBuffer, src->grpNEBuffer, sizeof(float) * 1000 * networkConfigs[netId].numGroups, kind)); } else { assert(!allocateMem); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->grpDABuffer[lGrpId * 1000], &src->grpDABuffer[lGrpId * 1000], sizeof(float) * 1000, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->grp5HTBuffer[lGrpId * 1000], &src->grp5HTBuffer[lGrpId * 1000], sizeof(float) * 1000, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->grpAChBuffer[lGrpId * 1000], &src->grpAChBuffer[lGrpId * 1000], sizeof(float) * 1000, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->grpNEBuffer[lGrpId * 1000], &src->grpNEBuffer[lGrpId * 1000], sizeof(float) * 1000, kind)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies neural parameters to it * * This function: * (allocate and) copy Izh_a, Izh_b, Izh_c, Izh_d * initialize baseFiringInv * (allocate and) copy baseFiring, baseFiringInv * * This funcion is only called by copyNeuronState(). Only copying direction from host to device is required. * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa copyNeuronState * \since v3.0 */ void SNN::copyNeuronParameters(int netId, int lGrpId, RuntimeData* dest, cudaMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); assert(kind == cudaMemcpyHostToDevice); int ptrPos, length; // check that the destination pointer is properly allocated.. checkDestSrcPtrs(dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated.. // cannot use checkDestSrcPtrs here because src pointer would be NULL if (dest->allocated && allocateMem) { KERNEL_ERROR("GPU Memory already allocated..."); exitSimulation(1); } // when allocating we are allocating the memory.. we need to do it completely... to avoid memory fragmentation.. if (allocateMem) { assert(lGrpId == ALL); assert(dest->Izh_a == NULL); assert(dest->Izh_b == NULL); assert(dest->Izh_c == NULL); assert(dest->Izh_d == NULL); assert(dest->Izh_C == NULL); assert(dest->Izh_k == NULL); assert(dest->Izh_vr == NULL); assert(dest->Izh_vt == NULL); assert(dest->Izh_vpeak == NULL); assert(dest->lif_tau_m == NULL); //LIF parameters assert(dest->lif_tau_ref == NULL); assert(dest->lif_tau_ref_c == NULL); assert(dest->lif_vTh == NULL); assert(dest->lif_vReset == NULL); assert(dest->lif_gain == NULL); assert(dest->lif_bias == NULL); } if(lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numNReg; } else { ptrPos = groupConfigs[netId][lGrpId].lStartN; length = groupConfigs[netId][lGrpId].numN; } if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_a, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_a[ptrPos], &(managerRuntimeData.Izh_a[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_b, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_b[ptrPos], &(managerRuntimeData.Izh_b[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_c, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_c[ptrPos], &(managerRuntimeData.Izh_c[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_d, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_d[ptrPos], &(managerRuntimeData.Izh_d[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_C, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_C[ptrPos], &(managerRuntimeData.Izh_C[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_k, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_k[ptrPos], &(managerRuntimeData.Izh_k[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_vr, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_vr[ptrPos], &(managerRuntimeData.Izh_vr[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_vt, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_vt[ptrPos], &(managerRuntimeData.Izh_vt[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->Izh_vpeak, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->Izh_vpeak[ptrPos], &(managerRuntimeData.Izh_vpeak[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); //LIF parameters if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->lif_tau_m, sizeof(int) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->lif_tau_m[ptrPos], &(managerRuntimeData.lif_tau_m[ptrPos]), sizeof(int) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->lif_tau_ref, sizeof(int) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->lif_tau_ref[ptrPos], &(managerRuntimeData.lif_tau_ref[ptrPos]), sizeof(int) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->lif_tau_ref_c, sizeof(int) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->lif_tau_ref_c[ptrPos], &(managerRuntimeData.lif_tau_ref_c[ptrPos]), sizeof(int) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->lif_vTh, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->lif_vTh[ptrPos], &(managerRuntimeData.lif_vTh[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->lif_vReset, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->lif_vReset[ptrPos], &(managerRuntimeData.lif_vReset[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->lif_gain, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->lif_gain[ptrPos], &(managerRuntimeData.lif_gain[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->lif_bias, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->lif_bias[ptrPos], &(managerRuntimeData.lif_bias[ptrPos]), sizeof(float) * length, cudaMemcpyHostToDevice)); // pre-compute baseFiringInv for fast computation on GPUs. if (sim_with_homeostasis) { float* baseFiringInv = new float[length]; for(int nid = 0; nid < length; nid++) { if (managerRuntimeData.baseFiring[nid] != 0.0f) baseFiringInv[nid] = 1.0f / managerRuntimeData.baseFiring[ptrPos + nid]; else baseFiringInv[nid] = 0.0; } if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->baseFiringInv, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->baseFiringInv[ptrPos], baseFiringInv, sizeof(float) * length, cudaMemcpyHostToDevice)); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->baseFiring, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->baseFiring[ptrPos], managerRuntimeData.baseFiring, sizeof(float) * length, cudaMemcpyHostToDevice)); delete [] baseFiringInv; } } /*! * \brief this function allocates device (GPU) memory sapce and copies short-term plasticity (STP) state to it * * This function: * initialize STP_Pitch * (allocate and) copy stpu, stpx * * This funcion is called by allocateSNN_GPU() and fetchSTPState(). It supports bi-directional copying * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copying * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU fetchSTPState * \since v3.0 */ void SNN::copySTPState(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated.. // STP feature is optional, do addtional check for memory space if(allocateMem) { assert(dest->stpu == NULL); assert(dest->stpx == NULL); } else { assert(dest->stpu != NULL); assert(dest->stpx != NULL); } assert(src->stpu != NULL); assert(src->stpx != NULL); size_t STP_Pitch; size_t widthInBytes = sizeof(float) * networkConfigs[netId].numN; // if(allocateMem) CUDA_CHECK_ERRORS( cudaMalloc( (void**) &dest->stpu, sizeof(float)*networkConfigs[0].numN)); // CUDA_CHECK_ERRORS( cudaMemcpy( &dest->stpu[0], &src->stpu[0], sizeof(float)*networkConfigs[0].numN, kind)); // if(allocateMem) CUDA_CHECK_ERRORS( cudaMalloc( (void**) &dest->stpx, sizeof(float)*networkConfigs[0].numN)); // CUDA_CHECK_ERRORS( cudaMemcpy( &dest->stpx[0], &src->stpx[0], sizeof(float)*networkConfigs[0].numN, kind)); // allocate the stpu and stpx variable if (allocateMem) CUDA_CHECK_ERRORS(cudaMallocPitch((void**)&dest->stpu, &networkConfigs[netId].STP_Pitch, widthInBytes, networkConfigs[netId].maxDelay + 1)); if (allocateMem) CUDA_CHECK_ERRORS(cudaMallocPitch((void**)&dest->stpx, &STP_Pitch, widthInBytes, networkConfigs[netId].maxDelay + 1)); assert(networkConfigs[netId].STP_Pitch > 0); assert(STP_Pitch > 0); // stp_pitch should be greater than zero assert(STP_Pitch == networkConfigs[netId].STP_Pitch); // we want same Pitch for stpu and stpx assert(networkConfigs[netId].STP_Pitch >= widthInBytes); // stp_pitch should be greater than the width // convert the Pitch value to multiples of float assert(networkConfigs[netId].STP_Pitch % (sizeof(float)) == 0); if (allocateMem) networkConfigs[netId].STP_Pitch = networkConfigs[netId].STP_Pitch / sizeof(float); // fprintf(stderr, "STP_Pitch = %ld, STP_witdhInBytes = %d\n", networkConfigs[0].STP_Pitch, widthInBytes); float* tmp_stp = new float[networkConfigs[netId].numN]; // copy the already generated values of stpx and stpu to the GPU for(int t = 0; t < networkConfigs[netId].maxDelay + 1; t++) { if (kind == cudaMemcpyHostToDevice) { // stpu in the CPU might be mapped in a specific way. we want to change the format // to something that is okay with the GPU STP_U and STP_X variable implementation.. for (int n = 0; n < networkConfigs[netId].numN; n++) { int idx = STP_BUF_POS(n, t, glbNetworkConfig.maxDelay); tmp_stp[n] = managerRuntimeData.stpu[idx]; //assert(tmp_stp[n] == 0.0f); // STP is not enabled for all groups } CUDA_CHECK_ERRORS(cudaMemcpy(&dest->stpu[t * networkConfigs[netId].STP_Pitch], tmp_stp, sizeof(float) * networkConfigs[netId].numN, cudaMemcpyHostToDevice)); for (int n = 0; n < networkConfigs[netId].numN; n++) { int idx = STP_BUF_POS(n, t, glbNetworkConfig.maxDelay); tmp_stp[n] = managerRuntimeData.stpx[idx]; //assert(tmp_stp[n] == 1.0f); // STP is not enabled for all groups } CUDA_CHECK_ERRORS(cudaMemcpy(&dest->stpx[t * networkConfigs[netId].STP_Pitch], tmp_stp, sizeof(float) * networkConfigs[netId].numN, cudaMemcpyHostToDevice)); } else { CUDA_CHECK_ERRORS(cudaMemcpy(tmp_stp, &dest->stpu[t * networkConfigs[netId].STP_Pitch], sizeof(float) * networkConfigs[netId].numN, cudaMemcpyDeviceToHost)); for (int n = 0; n < networkConfigs[netId].numN; n++) managerRuntimeData.stpu[STP_BUF_POS(n, t, glbNetworkConfig.maxDelay)] = tmp_stp[n]; CUDA_CHECK_ERRORS(cudaMemcpy(tmp_stp, &dest->stpx[t * networkConfigs[netId].STP_Pitch], sizeof(float) * networkConfigs[netId].numN, cudaMemcpyDeviceToHost)); for (int n = 0; n < networkConfigs[netId].numN; n++) managerRuntimeData.stpx[STP_BUF_POS(n, t, glbNetworkConfig.maxDelay)] = tmp_stp[n]; } } delete [] tmp_stp; } /*! * \brief This function copies networkConfig form host to device * * This function: * copy networkConfig * * \param[in] netId the id of a local network whose networkConfig will be copied to device (GPU) memory * * \since v4.0 */ void SNN::copyNetworkConfig(int netId, cudaMemcpyKind kind) { checkAndSetGPUDevice(netId); assert(kind == cudaMemcpyHostToDevice); CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(networkConfigGPU, &networkConfigs[netId], sizeof(NetworkConfigRT), 0, cudaMemcpyHostToDevice)); } /*! * \brief This function copies groupConfigs form host to device * * This function: * copy groupConfigs * * \param[in] netId the id of a local network whose groupConfigs will be copied to device (GPU) memory * * \since v4.0 */ void SNN::copyGroupConfigs(int netId){ checkAndSetGPUDevice(netId); CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(groupConfigsGPU, groupConfigs[netId], (networkConfigs[netId].numGroupsAssigned) * sizeof(GroupConfigRT), 0, cudaMemcpyHostToDevice)); } /*! * \brief this function copy weight state in device (GPU) memory sapce to main (CPU) memory space * * This function: * copy wt, wtChange synSpikeTime * * This funcion is only called by fetchWeightState(). Only copying direction from device to host is required. * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * * \sa fetchWeightState * \since v4.0 */ void SNN::copyWeightState(int netId, int lGrpId, cudaMemcpyKind kind) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, lGrpId, 0); // check that the destination pointer is properly allocated.. assert(kind == cudaMemcpyDeviceToHost); int lengthSyn, posSyn; // first copy pre-connections info copyPreConnectionInfo(netId, lGrpId, &managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false); if (lGrpId == ALL) { lengthSyn = networkConfigs[netId].numPreSynNet; posSyn = 0; } else { lengthSyn = 0; for (int lNId = groupConfigs[netId][lGrpId].lStartN; lNId <= groupConfigs[netId][lGrpId].lEndN; lNId++) lengthSyn += managerRuntimeData.Npre[lNId]; posSyn = managerRuntimeData.cumulativePre[groupConfigs[netId][lGrpId].lStartN]; } assert(posSyn < networkConfigs[netId].numPreSynNet || networkConfigs[netId].numPreSynNet == 0); assert(lengthSyn <= networkConfigs[netId].numPreSynNet); CUDA_CHECK_ERRORS(cudaMemcpy(&managerRuntimeData.wt[posSyn], &runtimeData[netId].wt[posSyn], sizeof(float) * lengthSyn, cudaMemcpyDeviceToHost)); // copy firing time for individual synapses //CUDA_CHECK_ERRORS(cudaMemcpy(&managerRuntimeData.synSpikeTime[cumPos_syn], &runtimeData[netId].synSpikeTime[cumPos_syn], sizeof(int) * length_wt, cudaMemcpyDeviceToHost)); if ((!sim_with_fixedwts) || sim_with_stdp) { // copy synaptic weight derivative CUDA_CHECK_ERRORS(cudaMemcpy(&managerRuntimeData.wtChange[posSyn], &runtimeData[netId].wtChange[posSyn], sizeof(float) * lengthSyn, cudaMemcpyDeviceToHost)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies variables related to syanpses to it * * This function: * (allocate and) copy wt, wtChange, maxSynWt * * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU * \since v4.0 */ void SNN::copySynapseState(int netId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, ALL, 0); // check that the destination pointer is properly allocated.. assert(networkConfigs[netId].numPreSynNet > 0); // synaptic information based if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->wt, sizeof(float) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->wt, src->wt, sizeof(float) * networkConfigs[netId].numPreSynNet, kind)); // we don't need these data structures if the network doesn't have any plastic synapses at all // they show up in gpuUpdateLTP() and updateSynapticWeights(), two functions that do not get called if // sim_with_fixedwts is set if (!sim_with_fixedwts) { // synaptic weight derivative if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->wtChange, sizeof(float) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->wtChange, src->wtChange, sizeof(float) * networkConfigs[netId].numPreSynNet, kind)); // synaptic weight maximum value if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->maxSynWt, sizeof(float) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->maxSynWt, src->maxSynWt, sizeof(float) * networkConfigs[netId].numPreSynNet, kind)); } } /*! * \brief this function allocates device (GPU) memory sapce and copies auxiliary runtime data to it * * This function: * (allocate and) reset spikeGenBits, poissonFireRate * initialize I_setLength, I_setPitch; (allocate and) reset I_set * (allocate and) copy synSpikeTime, lastSpikeTime * (allocate and) copy nSpikeCnt * (allocate and) copy grpIds, connIdsPreIdx * (allocate and) copy firingTableD1, firingTableD2 * This funcion is only called by allocateSNN_GPU. Therefore, only copying direction from host to device is required * * \param[in] netId the id of local network, which is the same as device (GPU) id * \param[in] dest pointer to runtime data desitnation * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU * \since v4.0 */ void SNN::copyAuxiliaryData(int netId, int lGrpId, RuntimeData* dest, cudaMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, ALL, 0); // check that the destination pointer is properly allocated.. assert(kind == cudaMemcpyHostToDevice); assert(networkConfigs[netId].numN > 0); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->spikeGenBits, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1))); CUDA_CHECK_ERRORS(cudaMemset(dest->spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1))); // allocate the poisson neuron poissonFireRate if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->poissonFireRate, sizeof(float) * networkConfigs[netId].numNPois)); CUDA_CHECK_ERRORS(cudaMemset(dest->poissonFireRate, 0, sizeof(float) * networkConfigs[netId].numNPois)); // synaptic auxiliary data // I_set: a bit vector indicates which synapse got a spike if(allocateMem) { networkConfigs[netId].I_setLength = ceil(((networkConfigs[netId].maxNumPreSynN) / 32.0f)); CUDA_CHECK_ERRORS(cudaMallocPitch((void**)&dest->I_set, &networkConfigs[netId].I_setPitch, sizeof(int) * networkConfigs[netId].numNReg, networkConfigs[netId].I_setLength)); } assert(networkConfigs[netId].I_setPitch > 0 || networkConfigs[netId].maxNumPreSynN == 0); CUDA_CHECK_ERRORS(cudaMemset(dest->I_set, 0, networkConfigs[netId].I_setPitch * networkConfigs[netId].I_setLength)); // synSpikeTime: an array indicates the last time when a synapse got a spike if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->synSpikeTime, sizeof(int) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->synSpikeTime, managerRuntimeData.synSpikeTime, sizeof(int) * networkConfigs[netId].numPreSynNet, cudaMemcpyHostToDevice)); // neural auxiliary data // lastSpikeTime: an array indicates the last time of a neuron emitting a spike // neuron firing time if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->lastSpikeTime, sizeof(int) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->lastSpikeTime, managerRuntimeData.lastSpikeTime, sizeof(int) * networkConfigs[netId].numNAssigned, cudaMemcpyHostToDevice)); // auxiliary data for recording spike count of each neuron copyNeuronSpikeCount(netId, lGrpId, dest, &managerRuntimeData, cudaMemcpyHostToDevice, true, 0); // quick lookup array for local group ids if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->grpIds, managerRuntimeData.grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned, cudaMemcpyHostToDevice)); // quick lookup array for conn ids if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet)); CUDA_CHECK_ERRORS(cudaMemcpy(dest->connIdsPreIdx, managerRuntimeData.connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet, cudaMemcpyHostToDevice)); // firing table if(allocateMem) { assert(dest->firingTableD1 == NULL); assert(dest->firingTableD2 == NULL); } // allocate 1ms firing table if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->firingTableD1, sizeof(int) * networkConfigs[netId].maxSpikesD1)); if (networkConfigs[netId].maxSpikesD1 > 0) CUDA_CHECK_ERRORS(cudaMemcpy(dest->firingTableD1, managerRuntimeData.firingTableD1, sizeof(int) * networkConfigs[netId].maxSpikesD1, cudaMemcpyHostToDevice)); // allocate 2+ms firing table if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->firingTableD2, sizeof(int) * networkConfigs[netId].maxSpikesD2)); if (networkConfigs[netId].maxSpikesD2 > 0) CUDA_CHECK_ERRORS(cudaMemcpy(dest->firingTableD2, managerRuntimeData.firingTableD2, sizeof(int) * networkConfigs[netId].maxSpikesD2, cudaMemcpyHostToDevice)); // allocate external 1ms firing table if(allocateMem) { void* devPtr; CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMemset(dest->extFiringTableD1, 0 /* NULL */, sizeof(int*) * networkConfigs[netId].numGroups)); for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { if (groupConfigs[netId][lGrpId].hasExternalConnect) { CUDA_CHECK_ERRORS(cudaMalloc((void**)&devPtr, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE)); CUDA_CHECK_ERRORS(cudaMemset(devPtr, 0, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->extFiringTableD1[lGrpId], &devPtr, sizeof(int*), cudaMemcpyHostToDevice)); } } } // allocate external 2+ms firing table if(allocateMem) { void* devPtr; CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMemset(dest->extFiringTableD2, 0 /* NULL */, sizeof(int*) * networkConfigs[netId].numGroups)); for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { if (groupConfigs[netId][lGrpId].hasExternalConnect) { CUDA_CHECK_ERRORS(cudaMalloc((void**)&devPtr, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE)); CUDA_CHECK_ERRORS(cudaMemset(devPtr, 0, sizeof(int) * groupConfigs[netId][lGrpId].numN * NEURON_MAX_FIRING_RATE)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->extFiringTableD2[lGrpId], &devPtr, sizeof(int*), cudaMemcpyHostToDevice)); } } } // allocate external 1ms firing table index if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->extFiringTableEndIdxD1, sizeof(int) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMemset(dest->extFiringTableEndIdxD1, 0, sizeof(int) * networkConfigs[netId].numGroups)); // allocate external 2+ms firing table index if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->extFiringTableEndIdxD2, sizeof(int) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMemset(dest->extFiringTableEndIdxD2, 0, sizeof(int) * networkConfigs[netId].numGroups)); } void SNN::copyGrpIdsLookupArray(int netId, cudaMemcpyKind kind) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, ALL, 0);// check that the destination pointer is properly allocated.. assert(kind == cudaMemcpyDeviceToHost); CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.grpIds, runtimeData[netId].grpIds, sizeof(short int) * networkConfigs[netId].numNAssigned, cudaMemcpyDeviceToHost)); } void SNN::copyConnIdsLookupArray(int netId, cudaMemcpyKind kind) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, ALL, 0);// check that the destination pointer is properly allocated.. assert(kind == cudaMemcpyDeviceToHost); CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.connIdsPreIdx, runtimeData[netId].connIdsPreIdx, sizeof(short int) * networkConfigs[netId].numPreSynNet, cudaMemcpyDeviceToHost)); } void SNN::copyLastSpikeTime(int netId, cudaMemcpyKind kind) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(&managerRuntimeData, &runtimeData[netId], cudaMemcpyDeviceToHost, false, ALL, 0); // check that the destination pointer is properly allocated.. assert(kind == cudaMemcpyDeviceToHost); CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.lastSpikeTime, runtimeData[netId].lastSpikeTime, sizeof(int) * networkConfigs[netId].numN, cudaMemcpyDeviceToHost)); } // spikeGeneratorUpdate on GPUs.. void SNN::spikeGeneratorUpdate_GPU(int netId) { assert(runtimeData[netId].allocated); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); // update the random number for poisson spike generator (spikes generated by rate) if ((networkConfigs[netId].numNPois > 0) && (runtimeData[netId].gpuRandGen != NULL)) { curandGenerateUniform(runtimeData[netId].gpuRandGen, runtimeData[netId].randNum, networkConfigs[netId].numNPois); } // Use spike generators (user-defined callback function) if (networkConfigs[netId].numNSpikeGen > 0) { assert(managerRuntimeData.spikeGenBits != NULL); // reset the bit status of the spikeGenBits... memset(managerRuntimeData.spikeGenBits, 0, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1)); // fill spikeGenBits from SpikeBuffer fillSpikeGenBits(netId); // copy the spikeGenBits from the manager to the GPU.. CUDA_CHECK_ERRORS(cudaMemcpy(runtimeData[netId].spikeGenBits, managerRuntimeData.spikeGenBits, sizeof(int) * (networkConfigs[netId].numNSpikeGen / 32 + 1), cudaMemcpyHostToDevice)); } } void SNN::findFiring_GPU(int netId) { assert(runtimeData[netId].allocated); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); kernel_findFiring<<<NUM_BLOCKS, NUM_THREADS>>>(simTimeMs, simTime); CUDA_GET_LAST_ERROR("findFiring kernel failed\n"); } void SNN::updateTimingTable_GPU(int netId) { assert(runtimeData[netId].allocated); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); kernel_updateTimeTable<<<NUM_BLOCKS, NUM_THREADS>>>(simTimeMs); CUDA_GET_LAST_ERROR("timing Table update kernel failed\n"); } void SNN::doCurrentUpdateD2_GPU(int netId) { assert(runtimeData[netId].allocated); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); if (networkConfigs[netId].maxDelay > 1) { kernel_doCurrentUpdateD2<<<NUM_BLOCKS, NUM_THREADS>>>(simTimeMs, simTimeSec, simTime); CUDA_GET_LAST_ERROR("Kernel execution failed"); } } void SNN::doCurrentUpdateD1_GPU(int netId) { assert(runtimeData[netId].allocated); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); kernel_doCurrentUpdateD1<<<NUM_BLOCKS, NUM_THREADS>>>(simTimeMs, simTimeSec, simTime); CUDA_GET_LAST_ERROR("Kernel execution failed"); } void SNN::doSTPUpdateAndDecayCond_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); if (sim_with_stp || sim_with_conductances) { kernel_STPUpdateAndDecayConductances<<<NUM_BLOCKS, NUM_THREADS>>>(simTimeMs, simTimeSec, simTime); CUDA_GET_LAST_ERROR("STP update\n"); } } void SNN::initGPU(int netId) { checkAndSetGPUDevice(netId); assert(runtimeData[netId].allocated); kernel_initGPUMemory<<<NUM_BLOCKS, NUM_THREADS>>>(); CUDA_GET_LAST_ERROR("initGPUMemory kernel failed\n"); } void SNN::deleteRuntimeData_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); // cudaFree all device pointers CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].voltage) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].nextVoltage)); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].recovery) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].current) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].extCurrent) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].curSpike)); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Npre) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Npre_plastic) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Npre_plasticInv) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Npost) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].cumulativePost) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].cumulativePre) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].synSpikeTime) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].wt) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].wtChange) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].maxSynWt) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].nSpikeCnt) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].avgFiring) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].baseFiring) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].baseFiringInv) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpDA) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grp5HT) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpACh) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpNE) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpDABuffer) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grp5HTBuffer) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpAChBuffer) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpNEBuffer) ); if (networkConfigs[netId].sim_with_nm) { CUDA_CHECK_ERRORS(cudaFree(runtimeData[netId].nVBuffer)); CUDA_CHECK_ERRORS(cudaFree(runtimeData[netId].nUBuffer)); CUDA_CHECK_ERRORS(cudaFree(runtimeData[netId].nIBuffer)); } CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].grpIds) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_a) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_b) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_c) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_d) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_C)); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_k)); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_vr)); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_vt)); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].Izh_vpeak)); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gAMPA) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].lif_tau_m) ); //LIF parameters CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].lif_tau_ref) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].lif_tau_ref_c) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].lif_vTh) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].lif_vReset) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].lif_gain) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].lif_bias) ); if (sim_with_NMDA_rise) { CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gNMDA_r) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gNMDA_d) ); } else { CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gNMDA) ); } CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gGABAa) ); if (sim_with_GABAb_rise) { CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gGABAb_r) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gGABAb_d) ); } else { CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].gGABAb) ); } CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].stpu) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].stpx) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].connIdsPreIdx) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].groupIdInfo) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].neuronAllocation) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].postDelayInfo) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].postSynapticIds) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].preSynapticIds) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].I_set) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].poissonFireRate) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].lastSpikeTime) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].spikeGenBits) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].firingTableD2) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].firingTableD1) ); int** tempPtrs; tempPtrs = new int*[networkConfigs[netId].numGroups]; // fetch device memory address stored in extFiringTableD2 CUDA_CHECK_ERRORS( cudaMemcpy(tempPtrs, runtimeData[netId].extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups, cudaMemcpyDeviceToHost) ); for (int i = 0; i < networkConfigs[netId].numGroups; i++) CUDA_CHECK_ERRORS( cudaFree(tempPtrs[i]) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].extFiringTableD2) ); // fetch device memory address stored in extFiringTableD1 CUDA_CHECK_ERRORS( cudaMemcpy(tempPtrs, runtimeData[netId].extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups, cudaMemcpyDeviceToHost) ); for (int i = 0; i < networkConfigs[netId].numGroups; i++) CUDA_CHECK_ERRORS( cudaFree(tempPtrs[i]) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].extFiringTableD1) ); delete [] tempPtrs; CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].extFiringTableEndIdxD2) ); CUDA_CHECK_ERRORS( cudaFree(runtimeData[netId].extFiringTableEndIdxD1) ); // delete random numbr generator on GPU(s) // Note: RNG_rand48 objects allocate device memory if (runtimeData[netId].gpuRandGen != NULL) curandDestroyGenerator(runtimeData[netId].gpuRandGen); runtimeData[netId].gpuRandGen = NULL; if (runtimeData[netId].randNum != NULL) CUDA_CHECK_ERRORS(cudaFree(runtimeData[netId].randNum)); runtimeData[netId].randNum = NULL; } void SNN::globalStateUpdate_C_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); kernel_conductanceUpdate << <NUM_BLOCKS, NUM_THREADS >> > (simTimeMs, simTimeSec, simTime); CUDA_GET_LAST_ERROR("kernel_conductanceUpdate failed"); // use memset to reset I_set for debugging, resume it later //CUDA_CHECK_ERRORS(cudaMemset(runtimeData[netId].I_set, 0, networkConfigs[netId].I_setPitch * networkConfigs[netId].I_setLength)); } void SNN::globalStateUpdate_N_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); bool lastIteration = false; for (int j = 1; j <= networkConfigs[netId].simNumStepsPerMs; j++) { if (j == networkConfigs[netId].simNumStepsPerMs) lastIteration = true; // update all neuron state (i.e., voltage and recovery), including homeostasis kernel_neuronStateUpdate << <NUM_BLOCKS, NUM_THREADS >> > (simTimeMs, lastIteration); CUDA_GET_LAST_ERROR("Kernel execution failed"); // the above kernel should end with a syncthread statement to be on the safe side CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].voltage[0], &runtimeData[netId].nextVoltage[0], sizeof(float) * networkConfigs[netId].numNReg, cudaMemcpyDeviceToDevice)); } } void SNN::globalStateUpdate_G_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); // update all group state (i.e., concentration of neuronmodulators) // currently support 4 x 128 groups kernel_groupStateUpdate<<<4, NUM_THREADS>>>(simTimeMs); CUDA_GET_LAST_ERROR("Kernel execution failed"); } void SNN::assignPoissonFiringRate_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroups; lGrpId++) { // given group of neurons belong to the poisson group.... if (groupConfigs[netId][lGrpId].isSpikeGenerator) { int lNId = groupConfigs[netId][lGrpId].lStartN; int gGrpId = groupConfigs[netId][lGrpId].gGrpId; PoissonRate* rate = groupConfigMDMap[gGrpId].ratePtr; // if spikeGenFunc group does not have a Poisson pointer, skip if (groupConfigMap[gGrpId].spikeGenFunc || rate == NULL) continue; assert(runtimeData[netId].poissonFireRate != NULL); if (rate->isOnGPU()) { // rates allocated on GPU CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg], rate->getRatePtrGPU(), sizeof(float) * rate->getNumNeurons(), cudaMemcpyDeviceToDevice)); }else { // rates allocated on CPU CUDA_CHECK_ERRORS(cudaMemcpy(&runtimeData[netId].poissonFireRate[lNId - networkConfigs[netId].numNReg], rate->getRatePtrCPU(), sizeof(float) * rate->getNumNeurons(), cudaMemcpyHostToDevice)); } } } } // Note: for temporarily use, might be merged into exchangeExternalSpike void SNN::clearExtFiringTable_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); CUDA_CHECK_ERRORS(cudaMemset(runtimeData[netId].extFiringTableEndIdxD1, 0, sizeof(int) * networkConfigs[netId].numGroups)); CUDA_CHECK_ERRORS(cudaMemset(runtimeData[netId].extFiringTableEndIdxD2, 0, sizeof(int) * networkConfigs[netId].numGroups)); } //void SNN::routeSpikes_GPU() { // int firingTableIdxD2, firingTableIdxD1; // int GtoLOffset; // // ToDo: route spikes using routing table. currently only exchange spikes between GPU0 and GPU1 // // GPU0 -> GPU1 // if (!groupPartitionLists[0].empty() && !groupPartitionLists[1].empty()) { // checkAndSetGPUDevice(0); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[0].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[0].numGroups, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[0].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[0].numGroups, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[0].extFiringTableD2, sizeof(int*) * networkConfigs[0].numGroups, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[0].extFiringTableD1, sizeof(int*) * networkConfigs[0].numGroups, cudaMemcpyDeviceToHost)); // //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]); // // checkAndSetGPUDevice(1); // CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost)); // firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1]; // firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1]; // //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // // for (int lGrpId = 0; lGrpId < networkConfigs[0].numGroups; lGrpId++) { // if (groupConfigs[0][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) { // CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[1].firingTableD2 + firingTableIdxD2, 1, // managerRuntimeData.extFiringTableD2[lGrpId], 0, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[1].begin(); grpIt != groupPartitionLists[1].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[0][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // kernel_convertExtSpikesD2<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD2, // firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId]; // } // // if (groupConfigs[0][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) { // CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[1].firingTableD1 + firingTableIdxD1, 1, // managerRuntimeData.extFiringTableD1[lGrpId], 0, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[1].begin(); grpIt != groupPartitionLists[1].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[0][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // kernel_convertExtSpikesD1<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD1, // firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId]; // // } // //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // } // managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2; // managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1; // CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice)); // CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice)); // } // // // GPU1 -> GPU0 // if (!groupPartitionLists[1].empty() && !groupPartitionLists[0].empty()) { // checkAndSetGPUDevice(1); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[1].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[1].numGroups, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[1].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[1].numGroups, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[1].extFiringTableD2, sizeof(int*) * networkConfigs[1].numGroups, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[1].extFiringTableD1, sizeof(int*) * networkConfigs[1].numGroups, cudaMemcpyDeviceToHost)); // //KERNEL_DEBUG("GPU1 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]); // // checkAndSetGPUDevice(0); // CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost)); // firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1]; // firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1]; // //KERNEL_DEBUG("GPU0 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // // for (int lGrpId = 0; lGrpId < networkConfigs[1].numGroups; lGrpId++) { // if (groupConfigs[1][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) { // CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[0].firingTableD2 + firingTableIdxD2, 0, // managerRuntimeData.extFiringTableD2[lGrpId], 1, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[0].begin(); grpIt != groupPartitionLists[0].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[1][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // kernel_convertExtSpikesD2<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD2, // firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId]; // } // // if (groupConfigs[1][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) { // CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[0].firingTableD1 + firingTableIdxD1, 0, // managerRuntimeData.extFiringTableD1[lGrpId], 1, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[0].begin(); grpIt != groupPartitionLists[0].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[1][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // kernel_convertExtSpikesD1<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD1, // firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId]; // } // //KERNEL_DEBUG("GPU0 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // } // managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2; // managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1; // CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice)); // CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice)); // } // // // for (std::list<RoutingTableEntry>::iterator rteItr = spikeRoutingTable.begin(); rteItr != spikeRoutingTable.end(); rteItr++) { // int srcNetId = rteItr->srcNetId; // int destNetId = rteItr->destNetId; // assert(srcNetId < CPU_RUNTIME_BASE); // assert(destNetId < CPU_RUNTIME_BASE); // checkAndSetGPUDevice(srcNetId); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[srcNetId].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[srcNetId].numGroups, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[srcNetId].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[srcNetId].numGroups, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[srcNetId].extFiringTableD2, sizeof(int*) * networkConfigs[srcNetId].numGroups, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[srcNetId].extFiringTableD1, sizeof(int*) * networkConfigs[srcNetId].numGroups, cudaMemcpyDeviceToHost)); // //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]); // // checkAndSetGPUDevice(destNetId); // CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost)); // CUDA_CHECK_ERRORS( cudaMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyDeviceToHost)); // firingTableIdxD2 = managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1]; // firingTableIdxD1 = managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1]; // //KERNEL_DEBUG("GPU1 D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // // for (int lGrpId = 0; lGrpId < networkConfigs[srcNetId].numGroups; lGrpId++) { // if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD2[lGrpId] > 0) { // CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[destNetId].firingTableD2 + firingTableIdxD2, destNetId, // managerRuntimeData.extFiringTableD2[lGrpId], srcNetId, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD2[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // kernel_convertExtSpikesD2<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD2, // firingTableIdxD2 + managerRuntimeData.extFiringTableEndIdxD2[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD2 += managerRuntimeData.extFiringTableEndIdxD2[lGrpId]; // } // // if (groupConfigs[srcNetId][lGrpId].hasExternalConnect && managerRuntimeData.extFiringTableEndIdxD1[lGrpId] > 0) { // CUDA_CHECK_ERRORS( cudaMemcpyPeer(runtimeData[destNetId].firingTableD1 + firingTableIdxD1, destNetId, // managerRuntimeData.extFiringTableD1[lGrpId], srcNetId, // sizeof(int) * managerRuntimeData.extFiringTableEndIdxD1[lGrpId])); // // for (std::list<GroupConfigMD>::iterator grpIt = groupPartitionLists[destNetId].begin(); grpIt != groupPartitionLists[destNetId].end(); grpIt++) { // if (grpIt->gGrpId == groupConfigs[srcNetId][lGrpId].gGrpId) // GtoLOffset = grpIt->GtoLOffset; // } // // kernel_convertExtSpikesD1<<<NUM_BLOCKS, NUM_THREADS>>>(firingTableIdxD1, // firingTableIdxD1 + managerRuntimeData.extFiringTableEndIdxD1[lGrpId], // GtoLOffset); // [StartIdx, EndIdx) // firingTableIdxD1 += managerRuntimeData.extFiringTableEndIdxD1[lGrpId]; // // } // //KERNEL_DEBUG("GPU1 New D1:%d/D2:%d", firingTableIdxD1, firingTableIdxD2); // } // managerRuntimeData.timeTableD2[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD2; // managerRuntimeData.timeTableD1[simTimeMs + glbNetworkConfig.maxDelay + 1] = firingTableIdxD1; // CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice)); // CUDA_CHECK_ERRORS( cudaMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000+glbNetworkConfig.maxDelay+1), 0, cudaMemcpyHostToDevice)); // } //} /*! * \brief This function is called every second by SNN::runNetwork(). It updates the firingTableD1(D2)GPU and * timeTableD1(D2)GPU by removing older firing information. */ void SNN::shiftSpikeTables_F_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); kernel_shiftFiringTable<<<NUM_BLOCKS, NUM_THREADS>>>(); } void SNN::shiftSpikeTables_T_GPU(int netId) { assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); kernel_shiftTimeTable<<<NUM_BLOCKS, NUM_THREADS>>>(); } /* * \brief Update syanptic weights every 10ms, 100ms, or 1000ms * * */ void SNN::updateWeights_GPU(int netId) { assert(sim_in_testing == false); assert(sim_with_fixedwts == false); assert(runtimeData[netId].memType == GPU_MEM); checkAndSetGPUDevice(netId); kernel_updateWeights<<<NUM_BLOCKS, NUM_THREADS>>>(); } //__global__ void gpu_resetFiringInformation() { // if(threadIdx.x==0 && blockIdx.x==0) { // for(int i = 0; i < ROUNDED_TIMING_COUNT; i++) { // timeTableD2GPU[i] = 0; // timeTableD1GPU[i] = 0; // } // spikeCountD2SecGPU=0; // spikeCountD1SecGPU=0; // secD2fireCntTest=0; // secD1fireCntTest=0; // spikeCountD2GPU=0; // spikeCountD1GPU=0; // // //spikeCountAll1Sec=0;//assigned in fetchSpikeTables() // } // //} // //void SNN::resetFiringInformation_GPU() { // checkAndSetGPUDevice(); // // gpu_resetFiringInformation<<<NUM_BLOCKS,NUM_THREADS>>>(); //} /*! * \brief this function allocates device (GPU) memory sapce and copies external current to it * * This function: * (allocate and) copy extCurrent * * This funcion is called by copyNeuronState() and setExternalCurrent. Only host-to-divice copy is required * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa allocateSNN_GPU fetchSTPState * \since v3.0 */ void SNN::copyExternalCurrent(int netId, int lGrpId, RuntimeData* dest, cudaMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, &managerRuntimeData, cudaMemcpyHostToDevice, allocateMem, lGrpId, 0);// check that the destination pointer is properly allocated.. assert(kind == cudaMemcpyHostToDevice); int posN, lengthN; if (lGrpId == ALL) { posN = 0; lengthN = networkConfigs[netId].numNReg; } else { assert(lGrpId >= 0); posN = groupConfigs[netId][lGrpId].lStartN; lengthN = groupConfigs[netId][lGrpId].numN; } assert(lengthN >= 0 && lengthN <= networkConfigs[netId].numNReg); // assert NOT poisson neurons //KERNEL_DEBUG("copyExternalCurrent: lGrpId=%d, ptrPos=%d, length=%d, allocate=%s", lGrpId, posN, lengthN, allocateMem?"y":"n"); if(allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->extCurrent, sizeof(float) * lengthN)); CUDA_CHECK_ERRORS(cudaMemcpy(&(dest->extCurrent[posN]), &(managerRuntimeData.extCurrent[posN]), sizeof(float) * lengthN, cudaMemcpyHostToDevice)); } /*! * \brief This function fetch the spike count in all local networks and sum the up */ void SNN::copyNetworkSpikeCount(int netId, cudaMemcpyKind kind, unsigned int* spikeCountD1, unsigned int* spikeCountD2, unsigned int* spikeCountExtD1, unsigned int* spikeCountExtD2) { checkAndSetGPUDevice(netId); assert(kind == cudaMemcpyDeviceToHost); CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(spikeCountExtD2, spikeCountExtRxD2GPU, sizeof(int), 0, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(spikeCountExtD1, spikeCountExtRxD1GPU, sizeof(int), 0, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(spikeCountD2, spikeCountD2GPU, sizeof(int), 0, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(spikeCountD1, spikeCountD1GPU, sizeof(int), 0, cudaMemcpyDeviceToHost)); } /*! * \brief This function fetch spikeTables in the local network specified by netId * * \param[in] netId the id of local network of which timeTableD1(D2) and firingTableD1(D2) are copied to manager runtime data */ void SNN::copySpikeTables(int netId, cudaMemcpyKind kind) { unsigned int gpuSpikeCountD1Sec, gpuSpikeCountD2Sec, gpuSpikeCountLastSecLeftD2; checkAndSetGPUDevice(netId); assert(kind == cudaMemcpyDeviceToHost); CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(&gpuSpikeCountLastSecLeftD2, spikeCountLastSecLeftD2GPU, sizeof(int), 0, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(&gpuSpikeCountD2Sec, spikeCountD2SecGPU, sizeof(int), 0, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(&gpuSpikeCountD1Sec, spikeCountD1SecGPU, sizeof(int), 0, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.firingTableD2, runtimeData[netId].firingTableD2, sizeof(int)*(gpuSpikeCountD2Sec + gpuSpikeCountLastSecLeftD2), cudaMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.firingTableD1, runtimeData[netId].firingTableD1, sizeof(int)*gpuSpikeCountD1Sec, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, cudaMemcpyDeviceToHost)); } /*! * \brief This function fetch neuron state buffer in the local network specified by netId * * This function: * (allocate and) copy * * This funcion is called by copyNeuronState() * * \param[in] netId the id of a local network, which is the same as the device (GPU) id * \param[in] lGrpId the local group id in a local network, which specifiy the group(s) to be copied * \param[in] dest pointer to runtime data desitnation * \param[in] src pointer to runtime data source * \param[in] kind the direction of copy * \param[in] allocateMem a flag indicates whether allocating memory space before copying * * \sa copyNeuronState * \since v4.0 */ void SNN::copyNeuronStateBuffer(int netId, int lGrpId, RuntimeData* dest, RuntimeData* src, cudaMemcpyKind kind, bool allocateMem) { checkAndSetGPUDevice(netId); checkDestSrcPtrs(dest, src, kind, allocateMem, lGrpId, 0); // check that the destination pointer is properly allocated.. int ptrPos, length; if (lGrpId == ALL) { ptrPos = 0; length = networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * 1000; } else { ptrPos = lGrpId * MAX_NEURON_MON_GRP_SZIE * 1000; length = MAX_NEURON_MON_GRP_SZIE * 1000; } assert(length <= networkConfigs[netId].numGroups * MAX_NEURON_MON_GRP_SZIE * 1000); assert(length > 0); // neuron information assert(src->nVBuffer != NULL); if (allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->nVBuffer, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->nVBuffer[ptrPos], &src->nVBuffer[ptrPos], sizeof(float) * length, kind)); assert(src->nUBuffer != NULL); if (allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->nUBuffer, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->nUBuffer[ptrPos], &src->nUBuffer[ptrPos], sizeof(float) * length, kind)); assert(src->nIBuffer != NULL); if (allocateMem) CUDA_CHECK_ERRORS(cudaMalloc((void**)&dest->nIBuffer, sizeof(float) * length)); CUDA_CHECK_ERRORS(cudaMemcpy(&dest->nIBuffer[ptrPos], &src->nIBuffer[ptrPos], sizeof(float) * length, kind)); } void SNN::copyTimeTable(int netId, cudaMemcpyKind kind) { assert(netId < CPU_RUNTIME_BASE); checkAndSetGPUDevice(netId); if (kind == cudaMemcpyDeviceToHost) { CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(managerRuntimeData.timeTableD2, timeTableD2GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, cudaMemcpyDeviceToHost)); CUDA_CHECK_ERRORS(cudaMemcpyFromSymbol(managerRuntimeData.timeTableD1, timeTableD1GPU, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, cudaMemcpyDeviceToHost)); } else { // kind == cudaMemcpyHostToDevice CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(timeTableD2GPU, managerRuntimeData.timeTableD2, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, cudaMemcpyHostToDevice)); CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(timeTableD1GPU, managerRuntimeData.timeTableD1, sizeof(int)*(1000 + glbNetworkConfig.maxDelay + 1), 0, cudaMemcpyHostToDevice)); } } void SNN::copyExtFiringTable(int netId, cudaMemcpyKind kind) { assert(netId < CPU_RUNTIME_BASE); checkAndSetGPUDevice(netId); CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD2, runtimeData[netId].extFiringTableEndIdxD2, sizeof(int) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.extFiringTableEndIdxD1, runtimeData[netId].extFiringTableEndIdxD1, sizeof(int) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.extFiringTableD2, runtimeData[netId].extFiringTableD2, sizeof(int*) * networkConfigs[netId].numGroups, kind)); CUDA_CHECK_ERRORS(cudaMemcpy(managerRuntimeData.extFiringTableD1, runtimeData[netId].extFiringTableD1, sizeof(int*) * networkConfigs[netId].numGroups, kind)); //KERNEL_DEBUG("GPU0 D1ex:%d/D2ex:%d", managerRuntimeData.extFiringTableEndIdxD1[0], managerRuntimeData.extFiringTableEndIdxD2[0]); } int SNN::configGPUDevice() { int devCount, devMax; cudaDeviceProp deviceProp; CUDA_CHECK_ERRORS(cudaGetDeviceCount(&devCount)); KERNEL_INFO("CUDA devices Configuration:"); KERNEL_INFO(" - Number of CUDA devices = %9d", devCount); devMax = CUDA_GET_MAXGFLOP_DEVICE_ID(); KERNEL_INFO(" - CUDA device ID with max GFLOPs = %9d", devMax); for (int ithGPU = 0; ithGPU < devCount; ithGPU++) { CUDA_CHECK_ERRORS(cudaGetDeviceProperties(&deviceProp, ithGPU)); KERNEL_INFO(" + Use CUDA device[%1d] = %9s", ithGPU, deviceProp.name); KERNEL_INFO(" + CUDA Compute Capability (CC) = %2d.%d", deviceProp.major, deviceProp.minor); } if (deviceProp.major < 2) { // Unmark this when CC 1.3 is deprecated //KERNEL_ERROR("CARLsim does not support CUDA devices older than CC 2.0"); //exitSimulation(1); KERNEL_WARN("CUDA device with CC 1.3 will be deprecated in a future release"); } for (int ithGPU = 0; ithGPU < devCount; ithGPU++) { CUDA_CHECK_ERRORS(cudaSetDevice(ithGPU)); CUDA_DEVICE_RESET(); } if (devCount >= 2) { // try to setup P2P access if more than 2 GPUs are presented // FIXME: generalize the initialization for mulit-GPUs up to 4 or 8 // enable P2P access int canAccessPeer_0_1, canAccessPeer_1_0; cudaDeviceCanAccessPeer(&canAccessPeer_0_1, 0, 1); cudaDeviceCanAccessPeer(&canAccessPeer_1_0, 1, 0); // enable peer access between GPU0 and GPU1 if (canAccessPeer_0_1 & canAccessPeer_1_0) { cudaSetDevice(0); cudaDeviceEnablePeerAccess(1, 0); cudaSetDevice(1); cudaDeviceEnablePeerAccess(0, 0); KERNEL_INFO("* Peer Access is enabled"); } else { KERNEL_INFO("* Peer Access is not enabled"); } } return devCount; } void SNN::convertExtSpikesD2_GPU(int netId, int startIdx, int endIdx, int GtoLOffset) { checkAndSetGPUDevice(netId); kernel_convertExtSpikesD2<<<NUM_BLOCKS, NUM_THREADS>>>(startIdx, endIdx, GtoLOffset); // [StartIdx, EndIdx) } void SNN::convertExtSpikesD1_GPU(int netId, int startIdx, int endIdx, int GtoLOffset) { checkAndSetGPUDevice(netId); kernel_convertExtSpikesD1<<<NUM_BLOCKS, NUM_THREADS>>>(startIdx, endIdx, GtoLOffset); // [StartIdx, EndIdx) } void SNN::checkAndSetGPUDevice(int netId) { int currentDevice; cudaGetDevice(&currentDevice); assert(netId >= 0 && netId < numAvailableGPUs); if (currentDevice != netId) { //KERNEL_DEBUG("Change GPU context from GPU %d to GPU %d", currentDevice, netId); CUDA_CHECK_ERRORS(cudaSetDevice(netId)); } } // deprecated //void SNN::copyWeightsGPU(int nid, int src_grp) { // checkAndSetGPUDevice("copyWeightsGPU"); // // assert(nid < numNReg); // unsigned int cumId = managerRuntimeData.cumulativePre[nid]; // float* synWts = &(managerRuntimeData.wt[cumId]); // //TODO: NEEDED TO COMMENT THIS FOR CARLSIM 2.1-2.2 FILEMERGE -- KDC // // assert(cumId >= (nid-numNPois)); // //assert(cumId < numPreSynapses*networkConfigs[0].numN); // // CUDA_CHECK_ERRORS( cudaMemcpy( synWts, &runtimeData[0].wt[cumId], sizeof(float)*managerRuntimeData.Npre[nid], cudaMemcpyDeviceToHost)); //} // Allocates required memory and then initialize the GPU void SNN::allocateSNN_GPU(int netId) { checkAndSetGPUDevice(netId); // setup memory type of GPU runtime data runtimeData[netId].memType = GPU_MEM; // display some memory management info size_t avail, total, previous; float toMB = std::pow(1024.0f, 2); cudaMemGetInfo(&avail, &total); KERNEL_INFO("GPU Memory Management: (Total %2.3f MB)", (float)(total/toMB)); KERNEL_INFO("Data\t\t\tSize\t\tTotal Used\tTotal Available"); KERNEL_INFO("Init:\t\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(total)/toMB, (float)((total - avail) / toMB), (float)(avail/toMB)); previous=avail; // allocate random number generator on GPU(s) if(runtimeData[netId].gpuRandGen == NULL) { curandCreateGenerator(&runtimeData[netId].gpuRandGen, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(runtimeData[netId].gpuRandGen, randSeed_ + netId); } // allocate SNN::runtimeData[0].randNum for random number generators CUDA_CHECK_ERRORS(cudaMalloc((void **)&runtimeData[netId].randNum, networkConfigs[netId].numNPois * sizeof(float))); cudaMemGetInfo(&avail, &total); KERNEL_INFO("Random Gen:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // initialize runtimeData[0].neuronAllocation, __device__ loadBufferCount, loadBufferSize allocateStaticLoad(netId, NUM_THREADS); allocateGroupId(netId); // this table is useful for quick evaluation of the position of fired neuron // given a sequence of bits denoting the firing.. // initialize __device__ quickSynIdTableGPU[256] initQuickSynIdTable(netId); cudaMemGetInfo(&avail, &total); KERNEL_INFO("Static Load:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // initialize (copy from SNN) runtimeData[0].Npre, runtimeData[0].Npre_plastic, runtimeData[0].Npre_plasticInv, runtimeData[0].cumulativePre // initialize (copy from SNN) runtimeData[0].cumulativePost, runtimeData[0].Npost, runtimeData[0].postDelayInfo // initialize (copy from SNN) runtimeData[0].postSynapticIds, runtimeData[0].preSynapticIds copyPreConnectionInfo(netId, ALL, &runtimeData[netId], &managerRuntimeData, cudaMemcpyHostToDevice, true); copyPostConnectionInfo(netId, ALL, &runtimeData[netId], &managerRuntimeData, cudaMemcpyHostToDevice, true); cudaMemGetInfo(&avail, &total); KERNEL_INFO("Conn Info:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // initialize (copy from SNN) runtimeData[0].wt, runtimeData[0].wtChange, runtimeData[0].maxSynWt copySynapseState(netId, &runtimeData[netId], &managerRuntimeData, cudaMemcpyHostToDevice, true); cudaMemGetInfo(&avail, &total); KERNEL_INFO("Syn State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // copy the neuron state information to the GPU.. // initialize (copy from managerRuntimeData) runtimeData[0].recovery, runtimeData[0].voltage, runtimeData[0].current // initialize (copy from managerRuntimeData) runtimeData[0].gGABAa, runtimeData[0].gGABAb, runtimeData[0].gAMPA, runtimeData[0].gNMDA // initialize (copy from SNN) runtimeData[0].Izh_a, runtimeData[0].Izh_b, runtimeData[0].Izh_c, runtimeData[0].Izh_d // initialize (copy form SNN) runtimeData[0].baseFiring, runtimeData[0].baseFiringInv // initialize (copy from SNN) runtimeData[0].n(V,U,I)Buffer[] copyNeuronState(netId, ALL, &runtimeData[netId], cudaMemcpyHostToDevice, true); // copy STP state, considered as neuron state if (sim_with_stp) { // initialize (copy from SNN) stpu, stpx copySTPState(netId, ALL, &runtimeData[netId], &managerRuntimeData, cudaMemcpyHostToDevice, true); } cudaMemGetInfo(&avail, &total); KERNEL_INFO("Neuron State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // initialize (copy from SNN) runtimeData[0].grpDA(5HT,ACh,NE) // initialize (copy from SNN) runtimeData[0].grpDA(5HT,ACh,NE)Buffer[] copyGroupState(netId, ALL, &runtimeData[netId], &managerRuntimeData, cudaMemcpyHostToDevice, true); cudaMemGetInfo(&avail, &total); KERNEL_INFO("Group State:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous=avail; // initialize (cudaMemset) runtimeData[0].I_set, runtimeData[0].poissonFireRate // initialize (copy from SNN) runtimeData[0].firingTableD1, runtimeData[0].firingTableD2 // initialize (cudaMalloc) runtimeData[0].spikeGenBits // initialize (copy from managerRuntimeData) runtimeData[0].nSpikeCnt, // initialize (copy from SNN) runtimeData[0].synSpikeTime, runtimeData[0].lastSpikeTime copyAuxiliaryData(netId, ALL, &runtimeData[netId], cudaMemcpyHostToDevice, true); cudaMemGetInfo(&avail, &total); KERNEL_INFO("Auxiliary Data:\t\t%2.3f MB\t%2.3f MB\t%2.3f MB\n\n", (float)(previous - avail) / toMB, (float)((total - avail) / toMB), (float)(avail / toMB)); previous = avail; // copy relevant pointers and network information to GPU CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(runtimeDataGPU, &runtimeData[netId], sizeof(RuntimeData), 0, cudaMemcpyHostToDevice)); // copy data to from SNN:: to NetworkConfigRT SNN::networkConfigs[0] copyNetworkConfig(netId, cudaMemcpyHostToDevice); // FIXME: we can change the group properties such as STDP as the network is running. So, we need a way to updating the GPU when changes are made. // TODO: move mulSynFast, mulSynSlow to ConnectConfig structure // copy connection configs CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(d_mulSynFast, mulSynFast, sizeof(float) * networkConfigs[netId].numConnections, 0, cudaMemcpyHostToDevice)); CUDA_CHECK_ERRORS(cudaMemcpyToSymbol(d_mulSynSlow, mulSynSlow, sizeof(float) * networkConfigs[netId].numConnections, 0, cudaMemcpyHostToDevice)); copyGroupConfigs(netId); KERNEL_DEBUG("Transfering group settings to GPU:"); for (int lGrpId = 0; lGrpId < networkConfigs[netId].numGroupsAssigned; lGrpId++) { KERNEL_DEBUG("Settings for Group %s:", groupConfigMap[groupConfigs[netId][lGrpId].gGrpId].grpName.c_str()); KERNEL_DEBUG("\tType: %d", (int)groupConfigs[netId][lGrpId].Type); KERNEL_DEBUG("\tNumN: %d", groupConfigs[netId][lGrpId].numN); KERNEL_DEBUG("\tM: %d", groupConfigs[netId][lGrpId].numPostSynapses); KERNEL_DEBUG("\tPreM: %d", groupConfigs[netId][lGrpId].numPreSynapses); KERNEL_DEBUG("\tspikeGenerator: %d", (int)groupConfigs[netId][lGrpId].isSpikeGenerator); KERNEL_DEBUG("\tFixedInputWts: %d", (int)groupConfigs[netId][lGrpId].FixedInputWts); KERNEL_DEBUG("\tMaxDelay: %d", (int)groupConfigs[netId][lGrpId].MaxDelay); KERNEL_DEBUG("\tWithSTDP: %d", (int)groupConfigs[netId][lGrpId].WithSTDP); if (groupConfigs[netId][lGrpId].WithSTDP) { KERNEL_DEBUG("\t\tE-STDP type: %s", stdpType_string[groupConfigs[netId][lGrpId].WithESTDPtype]); KERNEL_DEBUG("\t\tTAU_PLUS_INV_EXC: %f", groupConfigs[netId][lGrpId].TAU_PLUS_INV_EXC); KERNEL_DEBUG("\t\tTAU_MINUS_INV_EXC: %f", groupConfigs[netId][lGrpId].TAU_MINUS_INV_EXC); KERNEL_DEBUG("\t\tALPHA_PLUS_EXC: %f", groupConfigs[netId][lGrpId].ALPHA_PLUS_EXC); KERNEL_DEBUG("\t\tALPHA_MINUS_EXC: %f", groupConfigs[netId][lGrpId].ALPHA_MINUS_EXC); KERNEL_DEBUG("\t\tI-STDP type: %s", stdpType_string[groupConfigs[netId][lGrpId].WithISTDPtype]); KERNEL_DEBUG("\t\tTAU_PLUS_INV_INB: %f", groupConfigs[netId][lGrpId].TAU_PLUS_INV_INB); KERNEL_DEBUG("\t\tTAU_MINUS_INV_INB: %f", groupConfigs[netId][lGrpId].TAU_MINUS_INV_INB); KERNEL_DEBUG("\t\tALPHA_PLUS_INB: %f", groupConfigs[netId][lGrpId].ALPHA_PLUS_INB); KERNEL_DEBUG("\t\tALPHA_MINUS_INB: %f", groupConfigs[netId][lGrpId].ALPHA_MINUS_INB); KERNEL_DEBUG("\t\tLAMBDA: %f", groupConfigs[netId][lGrpId].LAMBDA); KERNEL_DEBUG("\t\tDELTA: %f", groupConfigs[netId][lGrpId].DELTA); KERNEL_DEBUG("\t\tBETA_LTP: %f", groupConfigs[netId][lGrpId].BETA_LTP); KERNEL_DEBUG("\t\tBETA_LTD: %f", groupConfigs[netId][lGrpId].BETA_LTD); } KERNEL_DEBUG("\tWithSTP: %d", (int)groupConfigs[netId][lGrpId].WithSTP); if (groupConfigs[netId][lGrpId].WithSTP) { KERNEL_DEBUG("\t\tSTP_U: %f", groupConfigs[netId][lGrpId].STP_U); // KERNEL_DEBUG("\t\tSTP_tD: %f",groupConfigs[netId][lGrpId].STP_tD); // KERNEL_DEBUG("\t\tSTP_tF: %f",groupConfigs[netId][lGrpId].STP_tF); } KERNEL_DEBUG("\tspikeGen: %s", groupConfigs[netId][lGrpId].isSpikeGenFunc ? "is Set" : "is not set "); } // allocation of gpu runtime data is done runtimeData[netId].allocated = true; // map the timing table to texture.. saves a lot of headache in using shared memory void* devPtr; size_t offset; CUDA_CHECK_ERRORS(cudaGetSymbolAddress(&devPtr, timeTableD2GPU)); CUDA_CHECK_ERRORS(cudaBindTexture(&offset, timeTableD2GPU_tex, devPtr, sizeof(int) * TIMING_COUNT)); offset = offset / sizeof(int); CUDA_CHECK_ERRORS(cudaGetSymbolAddress(&devPtr, timeTableD2GPU_tex_offset)); CUDA_CHECK_ERRORS(cudaMemcpy(devPtr, &offset, sizeof(int), cudaMemcpyHostToDevice)); CUDA_CHECK_ERRORS(cudaGetSymbolAddress(&devPtr, timeTableD1GPU)); CUDA_CHECK_ERRORS(cudaBindTexture(&offset, timeTableD1GPU_tex, devPtr, sizeof(int) * TIMING_COUNT)); offset = offset / sizeof(int); CUDA_CHECK_ERRORS(cudaGetSymbolAddress(&devPtr, timeTableD1GPU_tex_offset)); CUDA_CHECK_ERRORS(cudaMemcpy(devPtr, &offset, sizeof(int), cudaMemcpyHostToDevice)); initGPU(netId); }
84b8d5908bc3c246c59de5b19a7bba9d11e465a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void rdiv_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0) result[i] = dx[i] - dy[i]; } }
84b8d5908bc3c246c59de5b19a7bba9d11e465a4.cu
#include "includes.h" __global__ void rdiv_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0) result[i] = dx[i] - dy[i]; } }
9e9181302b526e6eba24050df28af159a3e363fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void bcnn_scales_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if (offset < size) { output[(batch * n + filter) * size + offset] *= biases[filter]; } }
9e9181302b526e6eba24050df28af159a3e363fc.cu
#include "includes.h" __global__ void bcnn_scales_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if (offset < size) { output[(batch * n + filter) * size + offset] *= biases[filter]; } }
5fb76e5f8b0871c958936f07cca6a79972a36df1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "reorganize.h" #include <stdio.h> __global__ void toMatriplex_kernel(float *dst, int dst_stride, const float* __restrict__ src, int src_stride, int N, int LS) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < N && j < LS) { if (i==-1) { printf(" %d, mplex[%f] / lin[%f]\n", j, dst[i+j*dst_stride], src[j+i*src_stride]); } dst[i + j*dst_stride] = src[j + i*src_stride]; /*float diff = fabs((dst[i + j*dst_stride] - src[j + i*src_stride]));*/ /*if (diff > 1e-3) printf("%f\n", diff);*/ } } void toMatriplex_wrapper(hipStream_t& stream, GPlex<float> &dst, GPlex<float> &src, int N, int LS) { dim3 block(16, 8, 1); dim3 grid((N-1)/16 + 1, (LS-1)/8 +1, 1); hipLaunchKernelGGL(( toMatriplex_kernel) , dim3(grid), dim3(block), 0, stream, dst.ptr, dst.stride, src.ptr, src.stride, N, LS); } __global__ void reorganizeMs(float *msPar, size_t msPar_stride, float *full_posArray, float *msErr, size_t msErr_stride, float *full_errArray, int *full_hitIdx, int hi, int maxHits, int N, int HS, int HV, int Nhits) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < N) { int hidx = full_hitIdx[i + hi*N]; if (j < HV) { /*float tmp1 = msPar[i + msPar_stride*j];*/ msPar[i + msPar_stride*j] = full_posArray[j + HV*(hidx + hi*maxHits)]; /*float tmp2 = msPar[i + msPar_stride*j];*/ /*if (i==0 && hi == 0) {*/ /*if (fabs(tmp1 - tmp2) > 1e-3) {*/ /*printf("i %d, j %d, old: %f, new %f\n", i, j, tmp1, tmp2);*/ /*}*/ /*}*/ } if (j < HS) { msErr[i + msErr_stride*j] = full_errArray[j + HS*(hidx + hi*maxHits)]; } } } void reorganizeMs_wrapper(hipStream_t& stream, GPlex<float>& msPar, float *full_posArray, GPlex<float>& msErr, float *full_errArray, int *full_hitIdx, int hi, int maxHits, int N, int hs, int hv, int Nhits) { dim3 block(16, 6, 1); dim3 grid((N-1)/16 + 1, (hs-1)/6 +1, 1); hipLaunchKernelGGL(( reorganizeMs) , dim3(grid), dim3(block), 0, stream, msPar.ptr, msPar.stride, full_posArray, msErr.ptr, msErr.stride, full_errArray, full_hitIdx, hi, maxHits, N, hs, hv, Nhits); }
5fb76e5f8b0871c958936f07cca6a79972a36df1.cu
#include "reorganize.h" #include <stdio.h> __global__ void toMatriplex_kernel(float *dst, int dst_stride, const float* __restrict__ src, int src_stride, int N, int LS) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < N && j < LS) { if (i==-1) { printf(" %d, mplex[%f] / lin[%f]\n", j, dst[i+j*dst_stride], src[j+i*src_stride]); } dst[i + j*dst_stride] = src[j + i*src_stride]; /*float diff = fabs((dst[i + j*dst_stride] - src[j + i*src_stride]));*/ /*if (diff > 1e-3) printf("%f\n", diff);*/ } } void toMatriplex_wrapper(cudaStream_t& stream, GPlex<float> &dst, GPlex<float> &src, int N, int LS) { dim3 block(16, 8, 1); dim3 grid((N-1)/16 + 1, (LS-1)/8 +1, 1); toMatriplex_kernel <<<grid, block, 0, stream>>> (dst.ptr, dst.stride, src.ptr, src.stride, N, LS); } __global__ void reorganizeMs(float *msPar, size_t msPar_stride, float *full_posArray, float *msErr, size_t msErr_stride, float *full_errArray, int *full_hitIdx, int hi, int maxHits, int N, int HS, int HV, int Nhits) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < N) { int hidx = full_hitIdx[i + hi*N]; if (j < HV) { /*float tmp1 = msPar[i + msPar_stride*j];*/ msPar[i + msPar_stride*j] = full_posArray[j + HV*(hidx + hi*maxHits)]; /*float tmp2 = msPar[i + msPar_stride*j];*/ /*if (i==0 && hi == 0) {*/ /*if (fabs(tmp1 - tmp2) > 1e-3) {*/ /*printf("i %d, j %d, old: %f, new %f\n", i, j, tmp1, tmp2);*/ /*}*/ /*}*/ } if (j < HS) { msErr[i + msErr_stride*j] = full_errArray[j + HS*(hidx + hi*maxHits)]; } } } void reorganizeMs_wrapper(cudaStream_t& stream, GPlex<float>& msPar, float *full_posArray, GPlex<float>& msErr, float *full_errArray, int *full_hitIdx, int hi, int maxHits, int N, int hs, int hv, int Nhits) { dim3 block(16, 6, 1); dim3 grid((N-1)/16 + 1, (hs-1)/6 +1, 1); reorganizeMs <<<grid, block, 0, stream>>> (msPar.ptr, msPar.stride, full_posArray, msErr.ptr, msErr.stride, full_errArray, full_hitIdx, hi, maxHits, N, hs, hv, Nhits); }
a56e7b45cfb8ae48a5c78e9a06a247278bdca6fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sga_right_weight_backward (const int n, const float *bottom_data, const float *top_data, const float *temp_diff, const int height, const int width, const int depth, const int wsize, float *filters_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index / step * step * depth + index % step; //up->down int fbase = index / step * step * wsize + index % step; // int row = index%step/width; int col = index % step % width; for (int i = 0; i < depth; i++) filters_diff[fbase] += temp_diff[base + i * step] * bottom_data[base + i * step]; if (col - 1 >= 0) { int location = fbase + step; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + i * step - 1]; location = fbase + 3 * step; filters_diff[location] += temp_diff[base] * bottom_data[base]; for (int i = 1; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i - 1) * step - 1]; location = fbase + 4 * step; filters_diff[location] += temp_diff[base + (depth - 1) * step] * bottom_data[base + (depth - 1) * step]; for (int i = 0; i < depth - 1; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i + 1) * step - 1]; } /* else{ //int location = fbase + step; for(int i=0; i<depth; i++){ float temp = temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; } // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; // location = fbase + 3*step; // for(int i=0; i<depth; i++) // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; // // location = fbase + 4*step; // for(int i=0; i<depth; i++) // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; }*/ if (col - 2 >= 0) { int location = fbase + 2 * step; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + i * step - 2]; } /* else{ int location = fbase + 2*step; for(int i=0; i<depth; i++) filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; } */ }
a56e7b45cfb8ae48a5c78e9a06a247278bdca6fd.cu
#include "includes.h" __global__ void sga_right_weight_backward (const int n, const float *bottom_data, const float *top_data, const float *temp_diff, const int height, const int width, const int depth, const int wsize, float *filters_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index / step * step * depth + index % step; //up->down int fbase = index / step * step * wsize + index % step; // int row = index%step/width; int col = index % step % width; for (int i = 0; i < depth; i++) filters_diff[fbase] += temp_diff[base + i * step] * bottom_data[base + i * step]; if (col - 1 >= 0) { int location = fbase + step; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + i * step - 1]; location = fbase + 3 * step; filters_diff[location] += temp_diff[base] * bottom_data[base]; for (int i = 1; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i - 1) * step - 1]; location = fbase + 4 * step; filters_diff[location] += temp_diff[base + (depth - 1) * step] * bottom_data[base + (depth - 1) * step]; for (int i = 0; i < depth - 1; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + (i + 1) * step - 1]; } /* else{ //int location = fbase + step; for(int i=0; i<depth; i++){ float temp = temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step]; } // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; // location = fbase + 3*step; // for(int i=0; i<depth; i++) // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; // // location = fbase + 4*step; // for(int i=0; i<depth; i++) // filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; }*/ if (col - 2 >= 0) { int location = fbase + 2 * step; for (int i = 0; i < depth; i++) filters_diff[location] += temp_diff[base + i * step] * top_data[base + i * step - 2]; } /* else{ int location = fbase + 2*step; for(int i=0; i<depth; i++) filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step]; } */ }
e35aa627f837dd104b04fa0fcb414ccd87d55aa5.hip
// !!! This is a file automatically generated by hipify!!! /* * This expermental software is provided AS IS. * Feel free to use/modify/distribute, * If used, please retain this disclaimer and cite * "GPUfs: Integrating a file system with GPUs", * M Silberstein,B Ford,I Keidar,E Witchel * ASPLOS13, March 2013, Houston,USA */ #ifndef ringbuf_gpumem #define ringbuf_gpumem #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <string.h> #include "util.cu.h" struct ringbuf_metadata_t{ volatile uint _head; volatile uint _tail; uint _size; }; template<typename T> struct ringbuf{ volatile T *gpu_vals; ringbuf_metadata_t* cpu_meta; }; template<typename T> __host__ void rb_init_cpu(ringbuf<T>** rb_cpu, struct ringbuf<T>** rb_gpu, uint num_elem) { *rb_cpu=(ringbuf<T>*)malloc(sizeof(ringbuf<T>)); CUDA_SAFE_CALL(hipMalloc(&(*rb_cpu)->gpu_vals,(sizeof(T)*num_elem))); // values are in GPU ringbuf_metadata_t* rbm=(ringbuf_metadata_t*)malloc(sizeof(ringbuf_metadata_t)); // metadata in CPU rbm->_size=num_elem; rbm->_head=rbm->_tail=0; (*rb_cpu)->cpu_meta=rbm; /** host init complete **/ ringbuf<T> rb_h_gpu; // initializer for GPU // metadata in CPU shared with GPU CUDA_SAFE_CALL(hipHostRegister(rbm,sizeof(ringbuf_metdata_t),hipHostRegisterMapped)); CUDA_SAFE_CALL(hipHostGetDevicePointer((void**)&rb_h_gpu.cpu_meta,(void*)rbm,0)); rb_h_gpu.gpu_vals=(*rb_cpu)->gpu_vals; CUDA_SAFE_CALL(rb_gpu,sizeof(ringbuf<T>)); // create GPU object // copy initalized rb_gpu to gpu memory CUDA_SAFE_CALL(hipMemcpy(*rb_gpu,&rb_h_gpu,sizeof(ringbuf<T>),cudaHostToDevice)); } template<typename T> __host__ void rb_free_cpu(ringbuf<T>* rb_cpu, ringbuf<T>* rb_gpu){ CUDA_SAFE_CALL(hipHostUnregister(rb_cpu->cpu_meta)); CUDA_SAFE_CALL(hipFree(rb_gpu)); CUDA_SAFE_CALL(hipFree(rb_cpu->gpu_vals)); free(rb_cpu); } template<typename T> __device__ __host__ bool rb_empty(struct ringbuf<T>* r){ return (r->cpu_meta->_tail==r->cpu_meta->_head); } template<typename T>__device__ __host__ bool rb_full(struct ringbuf<T>* r){ return ((r->cpu_meta->_head+1)%r->cpu_meta->_size)==r->cpu_meta->_tail; } template<typename T> __host__ void memcpy_on_pop_cpu(T* cpu_val, volatile const T* gpu_val, hipStream_t& s){ CUDA_SAFE_CALL(hipMemcpyAsync(cpu_val,gpu_val, sizeof(T),hipMemcpyDeviceToHost,s)); CUDA_SAFE_CALL(hipStreamSynchronize(s)); // this call must be synced otherwise the buffer cannot be used! } template<typename T> __host__ bool rb_pop_cpu(struct ringbuf<T>* r, T* newval, hipStream_t& s){ if (rb_empty(r)) return false; memcpy_on_pop_cpu(newval,r->gpu_vals[r->_tail],s); r->cpu_meta->_tail=(r->cpu_meta->_tail+1)%r->cpu_meta_size; __sync_synchronize(); return true; } // inefficient implementation template<typename T> __device__ void memcpy_on_pop_gpu(T* dst, volatile const T* src) { #warning "memcpy_on_pop_gpu using inefficient implementation. Use specialization instead" memcpy_block(dst,src,sizeof(T)); } template<typename T> __device__ bool rb_pop_gpu(struct ringbuf<T>* r, T* newval){ __shared__ bool is_empty; BEGIN_SINGLE_THREAD is_empty=rb_empty(r); END_SINGLE_THREAD if(is_empty) return false; memcpy_on_pop_gpu(newval,r->gpu_vals[r->cpu_meta->_tail]); r->cpu_meta->_tail=(r->cpu_meta->_tail+1)%r->cpu_meta->_size; __threadfence_system(); return true; } template<typename T> void memcpy_on_push_cpu(volatile T* gpu_val, const T* cpu_val, hipStream_t s) { CUDA_SAFE_CALL(hipMemcpyAsync(gpu_val,cpu_val, sizeof(T),hipMemcpyHostToDevice,s)); CUDA_SAFE_CALL(hipStreamSynchronize(s)); // must be synchronized! } template<typename T> bool __host__ rb_push_cpu(struct ringbuf<T>* r, const T* val) { if (rb_full(r)) return false; memcpy_on_push_cpu(r->vals[r->_head],val); __sync_synchronize(); r->_head=(r->_head+1)%r->_size; __sync_synchronize(); return true; } template<typename T> __device__ void memcpy_on_push_gpu(volatile T* dst, const T* src){ #warning "memcpy_on_push_gpu using inefficient implementation. Use specialization instead" memcpy_block(dst,src,sizeof(T)); } template<typename T> bool __device__ rb_push_gpu(struct ringbuf<T>* r, const T* val){ __shared__ bool is_full; BEGIN_SINGLE_THREAD is_full=rb_full(r); END_SINGLE_THREAD if (is_full) return false; memcpy_on_push_gpu(r->gpu_vals[r->cpu_meta->_head],val); __threadfence(); r->cpu_meta->_head=(r->cpu_meta->_head+1)%r->cpu_meta->_size; __threadfence_system(); return true; } #endif
e35aa627f837dd104b04fa0fcb414ccd87d55aa5.cu
/* * This expermental software is provided AS IS. * Feel free to use/modify/distribute, * If used, please retain this disclaimer and cite * "GPUfs: Integrating a file system with GPUs", * M Silberstein,B Ford,I Keidar,E Witchel * ASPLOS13, March 2013, Houston,USA */ #ifndef ringbuf_gpumem #define ringbuf_gpumem #include <cuda.h> #include <cuda_runtime.h> #include <string.h> #include "util.cu.h" struct ringbuf_metadata_t{ volatile uint _head; volatile uint _tail; uint _size; }; template<typename T> struct ringbuf{ volatile T *gpu_vals; ringbuf_metadata_t* cpu_meta; }; template<typename T> __host__ void rb_init_cpu(ringbuf<T>** rb_cpu, struct ringbuf<T>** rb_gpu, uint num_elem) { *rb_cpu=(ringbuf<T>*)malloc(sizeof(ringbuf<T>)); CUDA_SAFE_CALL(cudaMalloc(&(*rb_cpu)->gpu_vals,(sizeof(T)*num_elem))); // values are in GPU ringbuf_metadata_t* rbm=(ringbuf_metadata_t*)malloc(sizeof(ringbuf_metadata_t)); // metadata in CPU rbm->_size=num_elem; rbm->_head=rbm->_tail=0; (*rb_cpu)->cpu_meta=rbm; /** host init complete **/ ringbuf<T> rb_h_gpu; // initializer for GPU // metadata in CPU shared with GPU CUDA_SAFE_CALL(cudaHostRegister(rbm,sizeof(ringbuf_metdata_t),cudaHostRegisterMapped)); CUDA_SAFE_CALL(cudaHostGetDevicePointer((void**)&rb_h_gpu.cpu_meta,(void*)rbm,0)); rb_h_gpu.gpu_vals=(*rb_cpu)->gpu_vals; CUDA_SAFE_CALL(rb_gpu,sizeof(ringbuf<T>)); // create GPU object // copy initalized rb_gpu to gpu memory CUDA_SAFE_CALL(cudaMemcpy(*rb_gpu,&rb_h_gpu,sizeof(ringbuf<T>),cudaHostToDevice)); } template<typename T> __host__ void rb_free_cpu(ringbuf<T>* rb_cpu, ringbuf<T>* rb_gpu){ CUDA_SAFE_CALL(cudaHostUnregister(rb_cpu->cpu_meta)); CUDA_SAFE_CALL(cudaFree(rb_gpu)); CUDA_SAFE_CALL(cudaFree(rb_cpu->gpu_vals)); free(rb_cpu); } template<typename T> __device__ __host__ bool rb_empty(struct ringbuf<T>* r){ return (r->cpu_meta->_tail==r->cpu_meta->_head); } template<typename T>__device__ __host__ bool rb_full(struct ringbuf<T>* r){ return ((r->cpu_meta->_head+1)%r->cpu_meta->_size)==r->cpu_meta->_tail; } template<typename T> __host__ void memcpy_on_pop_cpu(T* cpu_val, volatile const T* gpu_val, cudaStream_t& s){ CUDA_SAFE_CALL(cudaMemcpyAsync(cpu_val,gpu_val, sizeof(T),cudaMemcpyDeviceToHost,s)); CUDA_SAFE_CALL(cudaStreamSynchronize(s)); // this call must be synced otherwise the buffer cannot be used! } template<typename T> __host__ bool rb_pop_cpu(struct ringbuf<T>* r, T* newval, cudaStream_t& s){ if (rb_empty(r)) return false; memcpy_on_pop_cpu(newval,r->gpu_vals[r->_tail],s); r->cpu_meta->_tail=(r->cpu_meta->_tail+1)%r->cpu_meta_size; __sync_synchronize(); return true; } // inefficient implementation template<typename T> __device__ void memcpy_on_pop_gpu(T* dst, volatile const T* src) { #warning "memcpy_on_pop_gpu using inefficient implementation. Use specialization instead" memcpy_block(dst,src,sizeof(T)); } template<typename T> __device__ bool rb_pop_gpu(struct ringbuf<T>* r, T* newval){ __shared__ bool is_empty; BEGIN_SINGLE_THREAD is_empty=rb_empty(r); END_SINGLE_THREAD if(is_empty) return false; memcpy_on_pop_gpu(newval,r->gpu_vals[r->cpu_meta->_tail]); r->cpu_meta->_tail=(r->cpu_meta->_tail+1)%r->cpu_meta->_size; __threadfence_system(); return true; } template<typename T> void memcpy_on_push_cpu(volatile T* gpu_val, const T* cpu_val, cudaStream_t s) { CUDA_SAFE_CALL(cudaMemcpyAsync(gpu_val,cpu_val, sizeof(T),cudaMemcpyHostToDevice,s)); CUDA_SAFE_CALL(cudaStreamSynchronize(s)); // must be synchronized! } template<typename T> bool __host__ rb_push_cpu(struct ringbuf<T>* r, const T* val) { if (rb_full(r)) return false; memcpy_on_push_cpu(r->vals[r->_head],val); __sync_synchronize(); r->_head=(r->_head+1)%r->_size; __sync_synchronize(); return true; } template<typename T> __device__ void memcpy_on_push_gpu(volatile T* dst, const T* src){ #warning "memcpy_on_push_gpu using inefficient implementation. Use specialization instead" memcpy_block(dst,src,sizeof(T)); } template<typename T> bool __device__ rb_push_gpu(struct ringbuf<T>* r, const T* val){ __shared__ bool is_full; BEGIN_SINGLE_THREAD is_full=rb_full(r); END_SINGLE_THREAD if (is_full) return false; memcpy_on_push_gpu(r->gpu_vals[r->cpu_meta->_head],val); __threadfence(); r->cpu_meta->_head=(r->cpu_meta->_head+1)%r->cpu_meta->_size; __threadfence_system(); return true; } #endif
69ea12e333f82c93eb41e6d4beec2604e917a4ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_Test_final.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { // *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt // *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M // *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H // *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J // *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 // *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 // *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs // *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S // *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R // *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D // *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F // *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa // *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G // *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai // *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR // *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai // *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////initial condition for AP 20 //S2-1 real sv11[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392}; //////////////////// //S2-2 //real sv11[]={-86.6775309540028,0.00126031074107193,0.782379594133090,0.782216749001106,0.000172068343086772,0.486227463562957,0.00291750746806204,0.999998383839518,1.89860165324306e-08,1.86371442934849e-05,0.999771183306077,1.00730952275387,0.999997729764813,4.01181567168462e-05,0.661435383223664,9.89216406636310,139.601234209998}; ////////////////////////////////////////////////// //S3-1 //real sv11[]={-86.6902768323595,0.00125688376225555,0.782690257165761,0.782547892596001,0.000171750048746746,0.486360170563085,0.00291485827479809,0.999998387931464,1.89456679295569e-08,1.86054940017131e-05,0.999770742626069,1.00724037170339,0.999997113579370,4.17567836043613e-05,0.472458747863693,10.1478189383772,139.471917130272}; //////////////// //S3-2 //real sv11[]={-86.5236591284772,0.00130241284471985,0.778613483022969,0.778472769811598,0.000175875277625194,0.484626058693879,0.00294965177778795,0.999998333317616,1.94791112184908e-08,1.90234417053386e-05,0.999779558473224,1.00713872511970,0.999995965310622,4.41551215458988e-05,0.567040008888733,10.2464162625462,139.303734550690}; *((real * )((char *) sv + pitch * 0) + threadID) =sv11[0]; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) =sv11[1]; //M *((real * )((char *) sv + pitch * 2) + threadID) =sv11[2]; //H *((real * )((char *) sv + pitch * 3) + threadID) = sv11[3]; //J *((real * )((char *) sv + pitch * 4) + threadID) =sv11[4]; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) =sv11[5]; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = sv11[6]; //Xs *((real * )((char *) sv + pitch * 7) + threadID) =sv11[7]; //S *((real * )((char *) sv + pitch * 8) + threadID) =sv11[8]; //R *((real * )((char *) sv + pitch * 9) + threadID) =sv11[9]; //D *((real * )((char *) sv + pitch * 10) + threadID) =sv11[10]; //F *((real * )((char *) sv + pitch * 11) + threadID) =sv11[11]; //FCa *((real * )((char *) sv + pitch * 12) + threadID) =sv11[12]; //G *((real * )((char *) sv + pitch * 13) + threadID) = sv11[13]; //Cai *((real * )((char *) sv + pitch * 14) + threadID) =sv11[14]; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = sv11[15]; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = sv11[16]; //Ki } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); ///S2-1 real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05}; ///// //S2-2 //real parameters []={13.9645635317638,0.000234559273515713,0.000158508496150117,0.000387718953473422,0.271550011299244,0.171313643894679,0.148132634408518,3.52429749186627,0.0163232963007063,1.80625170161156,1099.99984094905,0.000508428591582056,0.426315288126368,0.0193610246251599,0.00342305438925442,2.79133840240607e-05}; ///S3-1: //real parameters []={14.2265776064284,0.000280045021984329,0.000123702304592752,0.000251556675811958,0.224623739779267,0.145045477736859,0.132102752427711,4.42712254301024,0.0156948843567210,1.61691730440283,1100,0.000520888772463349,0.258756467150201,0.0191544497099730,0.00137164828832637,4.52996729499983e-05}; ////// ///S3-2: //real parameters []={14.2751110459407,0.000197490405913840,0.000138093676576538,0.000459611951400222,0.248312214169369,0.146550920650185,0.141336894566835,4.51002424199619,0.0147942147525980,1.60874334855823,1098.91591518736,0.000497071049372500,0.357179450926053,0.0190817376935230,0.00515881032161095,3.63348608264117e-05}; real GNa=parameters[0]; real GbNa=parameters[1]; real GCaL=parameters[2]; real GbCa=parameters[3]; real Gto=parameters[4]; real Gkr=parameters[5]; real Gks=parameters[6]; real GK1=parameters[7]; real GpK=parameters[8]; real knak=parameters[9]; real knaca=parameters[10]; real Vmaxup=parameters[11]; real GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; /// real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr /// real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI /// real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 /// real GK1=5.405; //Parameters for Ito ///#ifdef EPI /// real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa /// real GNa=14.838; //Parameters for IbNa /// real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; /// real knak=1.362; //Parameters for ICaL /// real GCaL=0.000175; //Parameters for IbCa /// real GbCa=0.000592; //Parameters for INaCa /// real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa /// real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; /// real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
69ea12e333f82c93eb41e6d4beec2604e917a4ac.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_Test_final.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { // *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt // *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M // *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H // *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J // *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 // *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 // *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs // *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S // *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R // *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D // *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F // *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa // *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G // *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai // *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR // *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai // *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////initial condition for AP 20 //S2-1 real sv11[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392}; //////////////////// //S2-2 //real sv11[]={-86.6775309540028,0.00126031074107193,0.782379594133090,0.782216749001106,0.000172068343086772,0.486227463562957,0.00291750746806204,0.999998383839518,1.89860165324306e-08,1.86371442934849e-05,0.999771183306077,1.00730952275387,0.999997729764813,4.01181567168462e-05,0.661435383223664,9.89216406636310,139.601234209998}; ////////////////////////////////////////////////// //S3-1 //real sv11[]={-86.6902768323595,0.00125688376225555,0.782690257165761,0.782547892596001,0.000171750048746746,0.486360170563085,0.00291485827479809,0.999998387931464,1.89456679295569e-08,1.86054940017131e-05,0.999770742626069,1.00724037170339,0.999997113579370,4.17567836043613e-05,0.472458747863693,10.1478189383772,139.471917130272}; //////////////// //S3-2 //real sv11[]={-86.5236591284772,0.00130241284471985,0.778613483022969,0.778472769811598,0.000175875277625194,0.484626058693879,0.00294965177778795,0.999998333317616,1.94791112184908e-08,1.90234417053386e-05,0.999779558473224,1.00713872511970,0.999995965310622,4.41551215458988e-05,0.567040008888733,10.2464162625462,139.303734550690}; *((real * )((char *) sv + pitch * 0) + threadID) =sv11[0]; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) =sv11[1]; //M *((real * )((char *) sv + pitch * 2) + threadID) =sv11[2]; //H *((real * )((char *) sv + pitch * 3) + threadID) = sv11[3]; //J *((real * )((char *) sv + pitch * 4) + threadID) =sv11[4]; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) =sv11[5]; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = sv11[6]; //Xs *((real * )((char *) sv + pitch * 7) + threadID) =sv11[7]; //S *((real * )((char *) sv + pitch * 8) + threadID) =sv11[8]; //R *((real * )((char *) sv + pitch * 9) + threadID) =sv11[9]; //D *((real * )((char *) sv + pitch * 10) + threadID) =sv11[10]; //F *((real * )((char *) sv + pitch * 11) + threadID) =sv11[11]; //FCa *((real * )((char *) sv + pitch * 12) + threadID) =sv11[12]; //G *((real * )((char *) sv + pitch * 13) + threadID) = sv11[13]; //Cai *((real * )((char *) sv + pitch * 14) + threadID) =sv11[14]; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = sv11[15]; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = sv11[16]; //Ki } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); ///S2-1 real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05}; ///// //S2-2 //real parameters []={13.9645635317638,0.000234559273515713,0.000158508496150117,0.000387718953473422,0.271550011299244,0.171313643894679,0.148132634408518,3.52429749186627,0.0163232963007063,1.80625170161156,1099.99984094905,0.000508428591582056,0.426315288126368,0.0193610246251599,0.00342305438925442,2.79133840240607e-05}; ///S3-1: //real parameters []={14.2265776064284,0.000280045021984329,0.000123702304592752,0.000251556675811958,0.224623739779267,0.145045477736859,0.132102752427711,4.42712254301024,0.0156948843567210,1.61691730440283,1100,0.000520888772463349,0.258756467150201,0.0191544497099730,0.00137164828832637,4.52996729499983e-05}; ////// ///S3-2: //real parameters []={14.2751110459407,0.000197490405913840,0.000138093676576538,0.000459611951400222,0.248312214169369,0.146550920650185,0.141336894566835,4.51002424199619,0.0147942147525980,1.60874334855823,1098.91591518736,0.000497071049372500,0.357179450926053,0.0190817376935230,0.00515881032161095,3.63348608264117e-05}; real GNa=parameters[0]; real GbNa=parameters[1]; real GCaL=parameters[2]; real GbCa=parameters[3]; real Gto=parameters[4]; real Gkr=parameters[5]; real Gks=parameters[6]; real GK1=parameters[7]; real GpK=parameters[8]; real knak=parameters[9]; real knaca=parameters[10]; real Vmaxup=parameters[11]; real GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; /// real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr /// real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI /// real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 /// real GK1=5.405; //Parameters for Ito ///#ifdef EPI /// real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa /// real GNa=14.838; //Parameters for IbNa /// real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; /// real knak=1.362; //Parameters for ICaL /// real GCaL=0.000175; //Parameters for IbCa /// real GbCa=0.000592; //Parameters for INaCa /// real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa /// real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; /// real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
7e7c1671b880e9d677c54565b455b897e25bc72c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Purpose: Calculate definite integral using trapezoidal rule. * * Input: a, b, n * Output: Estimate of integral from a to b of f(x) * using n trapezoids. * * Compile: gcc -g -Wall -o trap trap.c -fopenmp * Usage: ./trap * * Note: The function f(x) is hardwired. * */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> #include <time.h> // includes, project #include <cutil.h> // includes, kernels #include <trap_kernel.cu> //Err void check_fo_error(char *msg); //Globals for time float cpu_time; float gpu_slow; float gpu_fast; //Timer for timing time measured in time units unsigned int timer; #define LEFT_ENDPOINT 5 #define RIGHT_ENDPOINT 1000 //#define NUM_TRAPEZOIDS 100000 //MOVED TO _kernel.cu file float compute_on_device(float , float , int, float ,int); extern "C" float compute_gold(float , float , int, float ); int main(void) { int n = NUM_TRAPEZOIDS; float a = LEFT_ENDPOINT; float b = RIGHT_ENDPOINT; float h = (b-a)/(float)n; // Height of each trapezoid printf("The height of the trapezoid is %f \n", h); printf("Number of trapezoids: %d\n",n); //Start timer...NOW~!~!~!~! cutCreateTimer(&timer); cutStartTimer(timer); float reference = compute_gold(a, b, n, h); printf("Reference solution computed on the CPU = %f \n", reference); //Stop timer cutStopTimer(timer); cpu_time = 1e-3 * cutGetTimerValue(timer); float gpu_result_slow = compute_on_device(a, b, n, h, 1); printf(" Solution computed on the GPU (slow) = %f \n", gpu_result_slow); float slow_per_diff = 100*(fabs(gpu_result_slow -reference)/reference); printf(" Percent difference computed on the GPU (slow) = %f \n", slow_per_diff ); float gpu_result_fast = compute_on_device(a, b, n, h, 0); printf(" Solution computed on the GPU (fast) = %f \n", gpu_result_fast); float fast_per_diff = 100*(fabs(gpu_result_fast -reference)/reference); printf(" Percent difference computed on the GPU (fast) = %f \n", fast_per_diff ); //Speedup inffffffooo printf("== Speedup Info == \n"); printf("CPU Run time: %0.10f s. \n", cpu_time); printf("GPU (slow) Run time: %0.10f s. \n", gpu_slow); printf("GPU (fast) Run time: %0.10f s. \n", gpu_fast); float gpu_slow_speedup = cpu_time/gpu_slow; float gpu_fast_speedup = cpu_time/gpu_fast; printf("GPU (slow) Speedup: %0.10f\n", gpu_slow_speedup); printf("GPU (fast) Speedup: %0.10f\n",gpu_fast_speedup ); //For copy paste //printf("%d %f %f %f %f\n", NUM_TRAPEZOIDS,gpu_fast_speedup,gpu_slow_speedup,fast_per_diff,slow_per_diff); } /* Complete this function to perform the trapezoidal rule on the GPU. */ float compute_on_device(float a, float b, int n, float h, int do_slow) { //Allocate space on gpu //Room for the sum float * sum_on_gpu; int result_size = 1 * sizeof(float); hipMalloc(&sum_on_gpu, result_size); //Calculate kernel params //How many threads per block? int threads_per_block = THREADS_PER_BLOCK; //How many calculations? //Defined by for (k = 1; k <= n-1; k++) //Example n=5, k=1,2,3,4 so num calcs total = n-1 int total_calcs = n-1; //Each thread will take a single calculation int calcs_per_thread = 1; //How many threads to use? int num_threads = ceil((float)total_calcs / (float)calcs_per_thread); //How many blocks? int num_blocks = ceil((float)num_threads / (float)threads_per_block); dim3 thread_block(threads_per_block, 1, 1); dim3 grid(num_blocks,1); //Call kernel //do_slow indicates if this should execute the slow or fast kernel if(do_slow) { printf("== Slow Kernel==\n"); //Also allocate space for dumb storage of each threads result float * gpu_thread_results; //All threads (useful or not) will calculate a result to avoid //warp divergence...even though this is so slow it won't matter hipMalloc(&gpu_thread_results, num_blocks*threads_per_block * sizeof(float)); //Start timer...NOW~!~!~!~! cutCreateTimer(&timer); cutStartTimer(timer); //Slow kernel hipLaunchKernelGGL(( trap_kernel_slow), dim3(grid), dim3(thread_block), 0, 0, a, b, n, h,gpu_thread_results); //Sync hipDeviceSynchronize(); check_fo_error("trap_kernel_slow FAILURE"); //Summing slow kernel //REALLY slow...only one thread does summing //Obviously will be made better in optimized version dim3 thread_block_slow_sum(1, 1, 1); dim3 grid_slow_sum(1,1); hipLaunchKernelGGL(( trap_kernel_slow_sum), dim3(grid_slow_sum), dim3(thread_block_slow_sum), 0, 0, sum_on_gpu, a, b, n, h, gpu_thread_results); //Sync at end hipDeviceSynchronize(); check_fo_error("trap_kernel_slow_sum FAILURE"); //Stop timer cutStopTimer(timer); gpu_slow = 1e-3 * cutGetTimerValue(timer); } else { printf("== Fast Kernel==\n"); //Each thread does one iteration, placing value in shared mem //One thread from each block sums shared mem and writes to global //Threads in block will each take one iteration //One thread form block will sum all threads results //Same one thread will write into global memory float * gpu_block_results; hipMalloc(&gpu_block_results, num_blocks * sizeof(float)); //Start timer...NOW~!~!~!~! cutCreateTimer(&timer); cutStartTimer(timer); //Fast kernel hipLaunchKernelGGL(( trap_kernel_fast), dim3(grid), dim3(thread_block), 0, 0, n,a,h,gpu_block_results); //Sync hipDeviceSynchronize(); check_fo_error("trap_kernel_fast FAILURE"); //DEBUG////// /*float result = 0; hipMemcpy(&result, gpu_block_results, result_size, hipMemcpyDeviceToHost); printf("sum_on_gpu before fast sum: %f\n", result); float tmp_result = (result + ((f(a) + f(b))/2.0))*h; printf("result before fast sum: %f\n", tmp_result); ////////////////////// */ //Use another kernel to sum block results in global mem //There are now 'num_blocks' floats in global memory int global_mem_floats = num_blocks; //To sync summation and use only shared memory we can only have //one thread block num_blocks = 1; //threads per block threads_per_block = THREADS_PER_BLOCK; num_threads = threads_per_block * num_blocks; //Use tree based approach //First level of tree is reading from global mem //Want second level to be in shared mem (get out of global as // soon as possible) /* |-----x------| |g g g g| g g g g| g g g g| <<< Single block s s s <<< condensing/summing into shared mem */ //How many floats can fit in shared mem? int shared_mem_per_block_size = 16384; int max_floats_per_block = shared_mem_per_block_size / sizeof(float); //Each thread sums some global floats into shared mem int global_floats_per_thread = ceil((float)global_mem_floats/(float)num_threads); dim3 thread_block_fast_sum(threads_per_block, 1, 1); dim3 grid_fast_sum(num_blocks,1); //Fast sum kernel hipLaunchKernelGGL(( trap_kernel_fast_sum), dim3(grid_fast_sum), dim3(thread_block_fast_sum), 0, 0, a,b,h,sum_on_gpu, global_floats_per_thread, global_mem_floats, gpu_block_results); //Sync hipDeviceSynchronize(); check_fo_error("trap_kernel_fast_sum FAILURE"); //Stop timer cutStopTimer(timer); gpu_fast = 1e-3 * cutGetTimerValue(timer); printf(" max_floats_per_block: %d\n",max_floats_per_block); printf(" global_mem_floats: %d\n",global_mem_floats); printf(" global_floats_per_thread: %d\n",global_floats_per_thread); } printf(" Threads per block: %d\n",threads_per_block); printf(" Number of blocks: %d\n",num_blocks); int actual_threads = threads_per_block*num_blocks; printf(" Number of threads: %d\n",actual_threads); //Copy back the result float result = 0; hipMemcpy(&result, sum_on_gpu, result_size, hipMemcpyDeviceToHost); //Return result return result; } //Error helper void check_fo_error(char *msg){ hipError_t err = hipGetLastError(); if(hipSuccess != err){ printf("CUDA ERROR: %s (%s). \n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } }
7e7c1671b880e9d677c54565b455b897e25bc72c.cu
/* Purpose: Calculate definite integral using trapezoidal rule. * * Input: a, b, n * Output: Estimate of integral from a to b of f(x) * using n trapezoids. * * Compile: gcc -g -Wall -o trap trap.c -fopenmp * Usage: ./trap * * Note: The function f(x) is hardwired. * */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> #include <time.h> // includes, project #include <cutil.h> // includes, kernels #include <trap_kernel.cu> //Err void check_fo_error(char *msg); //Globals for time float cpu_time; float gpu_slow; float gpu_fast; //Timer for timing time measured in time units unsigned int timer; #define LEFT_ENDPOINT 5 #define RIGHT_ENDPOINT 1000 //#define NUM_TRAPEZOIDS 100000 //MOVED TO _kernel.cu file float compute_on_device(float , float , int, float ,int); extern "C" float compute_gold(float , float , int, float ); int main(void) { int n = NUM_TRAPEZOIDS; float a = LEFT_ENDPOINT; float b = RIGHT_ENDPOINT; float h = (b-a)/(float)n; // Height of each trapezoid printf("The height of the trapezoid is %f \n", h); printf("Number of trapezoids: %d\n",n); //Start timer...NOW~!~!~!~! cutCreateTimer(&timer); cutStartTimer(timer); float reference = compute_gold(a, b, n, h); printf("Reference solution computed on the CPU = %f \n", reference); //Stop timer cutStopTimer(timer); cpu_time = 1e-3 * cutGetTimerValue(timer); float gpu_result_slow = compute_on_device(a, b, n, h, 1); printf(" Solution computed on the GPU (slow) = %f \n", gpu_result_slow); float slow_per_diff = 100*(fabs(gpu_result_slow -reference)/reference); printf(" Percent difference computed on the GPU (slow) = %f \n", slow_per_diff ); float gpu_result_fast = compute_on_device(a, b, n, h, 0); printf(" Solution computed on the GPU (fast) = %f \n", gpu_result_fast); float fast_per_diff = 100*(fabs(gpu_result_fast -reference)/reference); printf(" Percent difference computed on the GPU (fast) = %f \n", fast_per_diff ); //Speedup inffffffooo printf("== Speedup Info == \n"); printf("CPU Run time: %0.10f s. \n", cpu_time); printf("GPU (slow) Run time: %0.10f s. \n", gpu_slow); printf("GPU (fast) Run time: %0.10f s. \n", gpu_fast); float gpu_slow_speedup = cpu_time/gpu_slow; float gpu_fast_speedup = cpu_time/gpu_fast; printf("GPU (slow) Speedup: %0.10f\n", gpu_slow_speedup); printf("GPU (fast) Speedup: %0.10f\n",gpu_fast_speedup ); //For copy paste //printf("%d %f %f %f %f\n", NUM_TRAPEZOIDS,gpu_fast_speedup,gpu_slow_speedup,fast_per_diff,slow_per_diff); } /* Complete this function to perform the trapezoidal rule on the GPU. */ float compute_on_device(float a, float b, int n, float h, int do_slow) { //Allocate space on gpu //Room for the sum float * sum_on_gpu; int result_size = 1 * sizeof(float); cudaMalloc(&sum_on_gpu, result_size); //Calculate kernel params //How many threads per block? int threads_per_block = THREADS_PER_BLOCK; //How many calculations? //Defined by for (k = 1; k <= n-1; k++) //Example n=5, k=1,2,3,4 so num calcs total = n-1 int total_calcs = n-1; //Each thread will take a single calculation int calcs_per_thread = 1; //How many threads to use? int num_threads = ceil((float)total_calcs / (float)calcs_per_thread); //How many blocks? int num_blocks = ceil((float)num_threads / (float)threads_per_block); dim3 thread_block(threads_per_block, 1, 1); dim3 grid(num_blocks,1); //Call kernel //do_slow indicates if this should execute the slow or fast kernel if(do_slow) { printf("== Slow Kernel==\n"); //Also allocate space for dumb storage of each threads result float * gpu_thread_results; //All threads (useful or not) will calculate a result to avoid //warp divergence...even though this is so slow it won't matter cudaMalloc(&gpu_thread_results, num_blocks*threads_per_block * sizeof(float)); //Start timer...NOW~!~!~!~! cutCreateTimer(&timer); cutStartTimer(timer); //Slow kernel trap_kernel_slow<<<grid, thread_block>>>(a, b, n, h,gpu_thread_results); //Sync cudaThreadSynchronize(); check_fo_error("trap_kernel_slow FAILURE"); //Summing slow kernel //REALLY slow...only one thread does summing //Obviously will be made better in optimized version dim3 thread_block_slow_sum(1, 1, 1); dim3 grid_slow_sum(1,1); trap_kernel_slow_sum<<<grid_slow_sum, thread_block_slow_sum>>>(sum_on_gpu, a, b, n, h, gpu_thread_results); //Sync at end cudaThreadSynchronize(); check_fo_error("trap_kernel_slow_sum FAILURE"); //Stop timer cutStopTimer(timer); gpu_slow = 1e-3 * cutGetTimerValue(timer); } else { printf("== Fast Kernel==\n"); //Each thread does one iteration, placing value in shared mem //One thread from each block sums shared mem and writes to global //Threads in block will each take one iteration //One thread form block will sum all threads results //Same one thread will write into global memory float * gpu_block_results; cudaMalloc(&gpu_block_results, num_blocks * sizeof(float)); //Start timer...NOW~!~!~!~! cutCreateTimer(&timer); cutStartTimer(timer); //Fast kernel trap_kernel_fast<<<grid, thread_block>>>(n,a,h,gpu_block_results); //Sync cudaThreadSynchronize(); check_fo_error("trap_kernel_fast FAILURE"); //DEBUG////// /*float result = 0; cudaMemcpy(&result, gpu_block_results, result_size, cudaMemcpyDeviceToHost); printf("sum_on_gpu before fast sum: %f\n", result); float tmp_result = (result + ((f(a) + f(b))/2.0))*h; printf("result before fast sum: %f\n", tmp_result); ////////////////////// */ //Use another kernel to sum block results in global mem //There are now 'num_blocks' floats in global memory int global_mem_floats = num_blocks; //To sync summation and use only shared memory we can only have //one thread block num_blocks = 1; //threads per block threads_per_block = THREADS_PER_BLOCK; num_threads = threads_per_block * num_blocks; //Use tree based approach //First level of tree is reading from global mem //Want second level to be in shared mem (get out of global as // soon as possible) /* |-----x------| |g g g g| g g g g| g g g g| <<< Single block s s s <<< condensing/summing into shared mem */ //How many floats can fit in shared mem? int shared_mem_per_block_size = 16384; int max_floats_per_block = shared_mem_per_block_size / sizeof(float); //Each thread sums some global floats into shared mem int global_floats_per_thread = ceil((float)global_mem_floats/(float)num_threads); dim3 thread_block_fast_sum(threads_per_block, 1, 1); dim3 grid_fast_sum(num_blocks,1); //Fast sum kernel trap_kernel_fast_sum<<<grid_fast_sum, thread_block_fast_sum>>>(a,b,h,sum_on_gpu, global_floats_per_thread, global_mem_floats, gpu_block_results); //Sync cudaThreadSynchronize(); check_fo_error("trap_kernel_fast_sum FAILURE"); //Stop timer cutStopTimer(timer); gpu_fast = 1e-3 * cutGetTimerValue(timer); printf(" max_floats_per_block: %d\n",max_floats_per_block); printf(" global_mem_floats: %d\n",global_mem_floats); printf(" global_floats_per_thread: %d\n",global_floats_per_thread); } printf(" Threads per block: %d\n",threads_per_block); printf(" Number of blocks: %d\n",num_blocks); int actual_threads = threads_per_block*num_blocks; printf(" Number of threads: %d\n",actual_threads); //Copy back the result float result = 0; cudaMemcpy(&result, sum_on_gpu, result_size, cudaMemcpyDeviceToHost); //Return result return result; } //Error helper void check_fo_error(char *msg){ cudaError_t err = cudaGetLastError(); if(cudaSuccess != err){ printf("CUDA ERROR: %s (%s). \n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } }
2ed02c9619db88126b6bc28853e0b913e6b8493d.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cmath> #include "hip/device_functions.h" #include "rocblas.h" #include <hip/hip_runtime.h> #define MAX_NGL 5 #define TPB_1 96 #define TPB_2 32 using namespace std; //------- compute rhs kernel ---------------------------------------------------------- // ----- new version with additional ranks of threads at the nqs level and atomic operations __global__ void compute_rhs_cuda(float * rhs, float * q, float * u, float * v, float * ksi_x, float * ksi_y, float * eta_x, float * eta_y, float * jac, float * psi, float * psi_ksi, float * psi_eta, int nelem, int npts, int nqs){ int l_tid = threadIdx.x; int l_block = blockDim.x; int tid = threadIdx.x+blockIdx.x*blockDim.x; int k = threadIdx.y; if(tid<nelem){ if(k<npts){ float wq; float e_x; float e_y; float n_x; float n_y; float u_k; float v_k; float q_k; float h_k; float h_e; float h_n; float dhdx_k; float dhdy_k; __shared__ float rhs_s[TPB_1*MAX_NGL*MAX_NGL]; //cooperatively load rhs for the block into shared memory // rhs[nelem*k+tid] = 0.0; rhs_s[l_block*k+l_tid] = 0.0; __syncthreads(); int dof = k*nelem+tid; wq = jac[dof]; e_x = ksi_x[dof]; e_y = ksi_y[dof]; n_x = eta_x[dof]; n_y = eta_y[dof]; u_k = 0.0; v_k = 0.0; q_k = 0.0; for(int i=0;i<npts;i++){ h_k = psi[k*npts+i]; u_k = u_k +h_k*u[i*nelem+tid]; v_k = v_k+h_k*v[i*nelem+tid]; q_k = q_k+h_k*q[i*nelem+tid]; }//if(int i... for(int i=0;i<npts;i++){ h_e = psi_ksi[k*npts+i]; h_n = psi_eta[k*npts+i]; dhdx_k=h_e*e_x+h_n*n_x; dhdy_k=h_e*e_y+h_n*n_y; //atomicAdd(&rhs[i*nelem+tid],wq*q_k*(dhdx_k*u_k+dhdy_k*v_k)); atomicAdd(&rhs_s[i*l_block+l_tid],wq*q_k*(dhdx_k*u_k+dhdy_k*v_k)); }//for(int i=... __syncthreads(); //cooperatively load rhs_s back to global memory rhs[k*nelem+tid]=rhs_s[k*l_block+l_tid]; } }//if(tid<nelem) } //-------------- compute flux kernel --------------------------------------------------- __global__ void compute_flux_cuda(float * rhs, float * q, float * u, float * v, int * psideh, float * nx, float * ny, float * jac_side, float * psi, int nside, int ngl, int nq, int * imapl, int * imapr,int nelem){ //each thread updates a side int is = threadIdx.x + blockIdx.x*blockDim.x; //int l_is = threadIdx.x; if(is<nside){ int iel = psideh[2*nside+is];//element on rhs of side //float old; if(iel != -6){ //some local variables //int nqs = nq*nq; //int ilocl,il,jl,kl,ier,ilocr,ir,jr,kr; int ilocl, ilocr, ier, i_tmp,j_tmp,k_tmp; float wq, nxl,nyl,nxr,nyr,qlq_k, qrq_k,u_k,v_k,ul_k, vl_k, ur_k,vr_k; float unl, unr, claml, clamr, clam;// fxl, fyl, fxr, fyr //float flux_ql; //float flux_qr; float flux_q; float diss_q,h_i; float ql[MAX_NGL]; float qr[MAX_NGL]; float ul[MAX_NGL]; float vl[MAX_NGL]; ilocl = psideh[(1-1)*nside+is]; //just being explicit about the indexing for(int l=0;l<ngl;l++){ //get pointers i_tmp = imapl[l*(4*2)+(1-1)*4+(ilocl-1)]; j_tmp = imapl[l*(4*2)+(2-1)*4+(ilocl-1)]; k_tmp = (j_tmp-1)*ngl+i_tmp-1; //kl is now zero-based pointer //left element ql[l] = q[k_tmp*nelem+(iel-1)]; ul[l] = u[k_tmp*nelem+(iel-1)]; vl[l] = v[k_tmp*nelem+(iel-1)]; }//for(int l=0... //store right side variables ier = psideh[(4-1)*nside + is]; if(ier != 0){ ilocr = psideh[(2-1)*nside+is]; for(int l = 0;l<ngl;l++){ i_tmp = imapr[l*(4*2)+(1-1)*4+(ilocr-1)]; j_tmp = imapr[l*(4*2)+(2-1)*4+(ilocr-1)]; k_tmp = (j_tmp-1)*ngl+i_tmp-1;//kr is now zero-based pointer qr[l] = q[k_tmp*nelem+(ier-1)]; }//for(int l=0... }//if(ier !=0... //do gauss-lobatto integration for(int l=0;l<nq;l++){ wq = jac_side[l*nside+is]; //store normal vectors nxl = nx[l*nside+is]; nyl = ny[l*nside+is]; nxr = -nxl; nyr = -nyl; //interpolate onto quad points qlq_k = 0.; qrq_k = 0.; u_k = 0.; v_k = 0.; for(int i=0;i<ngl;i++){ qlq_k = qlq_k+psi[l*ngl+i]*ql[i]; qrq_k = qrq_k+psi[l*ngl+i]*qr[i]; u_k = u_k + psi[l*ngl+i]*ul[i]; v_k = v_k+psi[l*ngl+i]*vl[i]; }//for(int i=0... ul_k = u_k; vl_k = v_k; ur_k = u_k; vr_k = v_k; //compute Rusanov flux constant unl = nxl*ul_k + nyl*vl_k; unr = nxl*ur_k+nyl*vr_k; claml=fabs(unl); clamr=fabs(unr); //clam = max(claml,clamr); if(claml > clamr){ clam = claml; }else{ clam = clamr; } // //flux variables // fxl = qlq_k*ul_k; // fyl = qlq_k*vl_k; // fxr = qrq_k*ur_k; // fyr = qrq_k*vr_k; // //normal flux component // flux_ql = nxl*fxl+nyl*fyl; // flux_qr = nxr*fxr+nyr*fyr; // flux_q = flux_ql-flux_qr; flux_q = nxl*qlq_k*ul_k+nyl*qlq_k*vl_k-(nxr*qrq_k*ur_k+nyr*qrq_k*vr_k); //dissipation term diss_q = clam*(qrq_k-qlq_k); //construct Rusanov flux flux_q = 0.5*(flux_q-diss_q); // //loop through side interpolation points // cout << "side = " << is+1 << endl; for(int i=0;i<ngl;i++){ h_i = psi[i*ngl+l]; //left side i_tmp=imapl[i*(4*2)+(1-1)*4+(ilocl-1)]; j_tmp=imapl[i*(4*2)+(2-1)*4+(ilocl-1)]; k_tmp = (j_tmp-1)*ngl+i_tmp-1; // cout << "ngl = " << i+1 << ", incr = " << // -wq*h_i*flux_q << endl; //rhs[kl*nelem+iel-1]-=wq*h_i*flux_q; atomicAdd(&rhs[k_tmp*nelem+iel-1],-wq*h_i*flux_q); //right side if(ier > 0){ i_tmp = imapr[i*(4*2)+(1-1)*4 +(ilocr-1)]; j_tmp = imapr[i*(4*2)+(2-1)*4 +(ilocr-1)]; k_tmp = (j_tmp-1)*ngl+i_tmp - 1; // cout << "right side, ngl = " << i+1 << // ", incr = " << wq*h_i*flux_q << endl; //rhs[kr*nelem+ier-1]+=wq*h_i*flux_q; atomicAdd(&rhs[k_tmp*nelem+ier-1],wq*h_i*flux_q); }//if(ier... }//for(int i... }//for(int l... }//skip sides with parallel BCs }//if(is<nside... } //------------------------------------------------------------------------------------- //---- apply inverse mass-matrix kernel ----------------------------------------------- __global__ void apply_invM_cuda(float * rhs, float * invM, int nelem, int npts){ int l_dof= threadIdx.x; int blk_sz = blockDim.x; int ie = threadIdx.x + blockIdx.x*blockDim.x; int l = threadIdx.y; //which of npts rows this thread processes if(ie<nelem){ if(l<npts){ //all threads "should" satisfy this...not sure though //all threads collaboratively load rhs into shared memory __shared__ float rhs_s[TPB_2*MAX_NGL*MAX_NGL]; rhs_s[blk_sz*l+l_dof] = rhs[l*nelem+ie]; __syncthreads(); float tmpRHS = 0.0; int invM_dof; int rhs_dof; //for(int m=0;m<npts;m++){ //invM_dof = ie*npts*npts+m*npts+l; invM_dof = ie*npts*npts+l*npts+l; //rhs_dof = m*nelem+ie; //rhs_dof = m*blk_sz+l_dof; rhs_dof=l*blk_sz+l_dof; tmpRHS+=invM[invM_dof]*rhs_s[rhs_dof]; //} __syncthreads();//make sure all l-threads are done using //this row of the rhs rhs[l*nelem+ie] = tmpRHS; }//doing on row of the invM against one row of rhs... }//only do the right number of elements } // ------------------------------------------------------------------------------------ __global__ void interstage_update(float * q0, float * q1, float *qp, float *rhs, float a0, float a1, float dtt,int nelem, int npts){ int ie = threadIdx.x+blockIdx.x*blockDim.x; int l = threadIdx.y; if(ie<nelem){ if(l<npts){ int dof = l*nelem+ie; qp[dof]=a0*q0[dof]+a1*q1[dof]+dtt*rhs[dof]; } } } void print_q(float * q_d, float * q, int nelem, int npts){ //copy q_d to q hipMemcpy(q,q_d,nelem*npts*sizeof(float),hipMemcpyDeviceToHost); //now print out q for(int e = 0;e<nelem;e++){ cout << endl; for(int l=0;l<npts;l++){ cout << q[l*nelem+e]; if(l<(npts-1)){ cout << ", "; } } } cout << endl; } //-------- main driver function ------------------------------------------------------- extern "C" void dg_advection_2d_ntp_cuda_(int* ntime_ptr, float * dt_ptr, int* kstages_ptr, float * q0, float * u0, float * v0, float * psi, float * ksi2d_x, float * ksi2d_y, float * eta2d_x, float * eta2d_y, float * jac2d, float * psi2d, float * psi2d_ksi, float * psi2d_eta, int * intma2d, int * psideh, float * jac_side, int * imapl, int * imapr, float * nx, float * ny, int* nelem_ptr, int* npts_ptr, int* nqs_ptr, int* ngl_ptr, int* nq_ptr, float * Mmatrix_inv, int* nside_ptr){ int ntime, kstages, nelem, npts, nqs, ngl, nq, nside; ntime = *ntime_ptr; kstages = *kstages_ptr; nelem = *nelem_ptr; npts = *npts_ptr; nqs = *nqs_ptr; ngl = *ngl_ptr; nq = *nq_ptr; nside = *nside_ptr; float dt, a0, a1,beta,dtt; const int time_step_rep_freq = 100; dt = *dt_ptr; float * rhs = new float[npts*nelem]; float * invm = new float[npts*npts*nelem]; float * q_print = new float[npts*nelem]; for(int e=0;e<nelem;e++){ for(int i=0;i<npts;i++){ for(int j=0;j<npts;j++){ invm[e*npts*npts+i*npts+j]=Mmatrix_inv[i*nelem*npts+j*nelem+e]; } } } //declare GPU variables float * rhs_d; float * qp_d; float * q1_d; float * q0_d; float * u0_d; float * v0_d; float * ksi2d_x_d; float * ksi2d_y_d; float * eta2d_x_d; float * eta2d_y_d; float * jac2d_d; float * psi2d_d; float * psi2d_ksi_d; float * psi2d_eta_d; int * psideh_d; float * nx_d; float * ny_d; float * jac_side_d; float * psi_d; int * imapl_d; int * imapr_d; float * invm_d; hipblasStatus_t stat; hipblasHandle_t handle; stat = hipblasCreate(&handle); if(stat!=HIPBLAS_STATUS_SUCCESS){ cout << "CUBLAS initialization failure!" << endl; return; } //allocate space on GPU hipMalloc((void**)&rhs_d,(nelem*npts)*sizeof(float)); hipMalloc((void**)&qp_d,(nelem*npts)*sizeof(float)); hipMalloc((void**)&q1_d,(nelem*npts)*sizeof(float)); hipMalloc((void**)&q0_d,(nelem*npts)*sizeof(float)); hipMalloc((void**)&u0_d,(nelem*npts)*sizeof(float)); hipMalloc((void**)&v0_d,(nelem*npts)*sizeof(float)); hipMalloc((void**)&ksi2d_x_d,(nelem*nqs)*sizeof(float)); hipMalloc((void**)&ksi2d_y_d,(nelem*nqs)*sizeof(float)); hipMalloc((void**)&eta2d_x_d,(nelem*nqs)*sizeof(float)); hipMalloc((void**)&eta2d_y_d,(nelem*nqs)*sizeof(float)); hipMalloc((void**)&jac2d_d,(nelem*nqs)*sizeof(float)); hipMalloc((void**)&psi2d_d,(npts*nqs)*sizeof(float)); hipMalloc((void**)&psi2d_ksi_d,(npts*nqs)*sizeof(float)); hipMalloc((void**)&psi2d_eta_d,(npts*nqs)*sizeof(float)); hipMalloc((void**)&psideh_d,(nside*4)*sizeof(int)); hipMalloc((void**)&nx_d,(nside*nq)*sizeof(float)); hipMalloc((void**)&ny_d,(nside*nq)*sizeof(float)); hipMalloc((void**)&jac_side_d,(nside*nq)*sizeof(float)); hipMalloc((void**)&psi_d,(ngl*nq)*sizeof(float)); hipMalloc((void**)&imapl_d,(4*2*ngl)*sizeof(int)); hipMalloc((void**)&imapr_d,(4*2*ngl)*sizeof(int)); hipMalloc((void**)&invm_d,(nelem*npts*npts)*sizeof(float)); //transfer data to the GPU hipMemcpy(rhs_d,rhs,nelem*npts*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(qp_d,q0,nelem*npts*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(q1_d,q0,nelem*npts*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(q0_d,q0,nelem*npts*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(u0_d,u0,nelem*npts*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(v0_d,v0,nelem*npts*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(ksi2d_x_d,ksi2d_x,nelem*nqs*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(ksi2d_y_d,ksi2d_y,nelem*nqs*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(eta2d_x_d,eta2d_x,nelem*nqs*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(eta2d_y_d,eta2d_y,nelem*nqs*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(jac2d_d,jac2d,nelem*nqs*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(psi2d_d,psi2d,npts*nqs*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(psi2d_ksi_d,psi2d_ksi,npts*nqs*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(psi2d_eta_d,psi2d_eta,npts*nqs*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(psideh_d,psideh,nside*4*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(nx_d,nx,nside*nq*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(ny_d,ny,nside*nq*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(jac_side_d,jac_side,nside*nq*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(psi_d,psi,ngl*nq*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(imapl_d,imapl,4*2*ngl*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(imapr_d,imapr,4*2*ngl*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(invm_d,invm,nelem*npts*npts*sizeof(float),hipMemcpyHostToDevice); //establish thread launch configuration //dim3 dimBlock1(TPB_1,npts,1); dim3 dimBlock1(TPB_1,1,1); dim3 dimBlock_rhs(TPB_1,npts,1); dim3 dimGrid1((nelem+TPB_1-1)/TPB_1,1,1);//for rhs, inv mass matrix dim3 dimGrid2((nside+TPB_1-1)/TPB_1,1,1);//for computing/applying flux dim3 dimBlock2(TPB_2,npts,1);//for inverting mass matrix dim3 dimGrid3((nelem+TPB_2-1)/TPB_2,1,1);//for inverting mass matrix dim3 dimBlock4(TPB_2,npts,1); dim3 dimGrid4((nelem+TPB_2-1)/TPB_2,1,1);//for inter-stage updates hipFuncSetCacheConfig(compute_flux_cuda,hipFuncCachePreferL1); //commence time-stepping // cout << "About to commence time stepping, q0_d = " << endl; // print_q(q0_d,q_print,nelem,npts); for(int itime = 0; itime<ntime;itime++){ //outpu progress if((itime+1)%time_step_rep_freq == 0){ cout << "Commencing time step " << itime+1 << endl; } //Explicit RK stages for(int ik=1;ik<=kstages;ik++){ switch (kstages) { case 2: switch (ik) { case 1: a0 = 1.0; a1 = 0.0; beta = 1.0; break; case 2: a0 = 0.5; a1 = 0.5; beta = 0.5; }//case (2) switch (ik) break; case 3: switch (ik) { case 1: a0 = 1.0; a1 = 0.0; beta = 1.0; break; case 2: a0 = 3.0/4.0; a1 = 1.0/4.0; beta = 1.0/4.0; break; case 3: a0 = 1.0/3.0; a1 = 2.0/3.0; beta = 2.0/3.0; }//switch(ik) }//switch (kstages) dtt = dt*beta; //compute rhs hipLaunchKernelGGL(( compute_rhs_cuda), dim3(dimGrid1),dim3(dimBlock_rhs), 0, 0, rhs_d,qp_d,u0_d,v0_d,ksi2d_x_d, ksi2d_y_d,eta2d_x_d,eta2d_y_d, jac2d_d,psi2d_d,psi2d_ksi_d, psi2d_eta_d,nelem,npts,nqs); // cout << "After compute_rhs, rhs_d = "<< endl; // print_q(rhs_d,q_print,nelem,npts); //compute/apply flux hipLaunchKernelGGL(( compute_flux_cuda), dim3(dimGrid2),dim3(dimBlock1), 0, 0, rhs_d,qp_d,u0_d,v0_d,psideh_d,nx_d,ny_d, jac_side_d,psi_d,nside,ngl,nq,imapl_d, imapr_d,nelem); // cout << "After compute_flux, rhs_d = " << endl; // print_q(rhs_d,q_print,nelem,npts); //apply inverse mass matrix (possibly re-structure inverse mass matrix to allow using cublas/cusparse) hipLaunchKernelGGL(( apply_invM_cuda), dim3(dimGrid3),dim3(dimBlock2), 0, 0, rhs_d,invm_d,nelem,npts); // cout << "After applying inverse mass matrix, rhs_d = " << endl; // print_q(rhs_d,q_print,nelem,npts); // cout << "Before inter-stage update, q0_d = " << endl; // print_q(q0_d,q_print,nelem,npts); // cout << "Before inter-stage update, q1_d = " << endl; // print_q(q1_d,q_print,nelem,npts); // cout << "Before inter-stage update, qp_d = " << endl; // print_q(qp_d,q_print,nelem,npts); // cout << "dtt = " << dtt; // cout << "a0 = " << a0; // cout << "a1 = " << a1; //perform inter-stage updates //qp_d = a0*q0_d + a1*q1_d + dtt*rhs_d stat=hipblasScopy(handle,nelem*npts,rhs_d,1,qp_d,1);//qp_d=rhs_d stat=hipblasSscal(handle,nelem*npts,&dtt,qp_d,1);//qp_d=dtt*qp_d stat=hipblasSaxpy(handle,nelem*npts,&a1,q1_d,1,qp_d,1); //qp_d = qp_d+a1*q1_d stat=hipblasSaxpy(handle,nelem*npts,&a0,q0_d,1,qp_d,1);//qp_d = qp_d+a0*q0_d // interstage_update<<<dimGrid4,dimBlock4>>>(q0_d,q1_d,qp_d,rhs_d,a0,a1,dtt,nelem,npts); // cout << "After inter-stage update, qp_d = " << endl; // print_q(qp_d,q_print,nelem,npts); //q1_d = qp_d stat=hipblasScopy(handle,nelem*npts,qp_d,1,q1_d,1); }//for(int ik... //q0_d = qp_d stat=hipblasScopy(handle,nelem*npts,qp_d,1,q0_d,1); // cout << "After all stages, q0_d = " << endl; // print_q(q0_d,q_print,nelem,npts); } //end time-stepping section, transfer needed output data back from GPU hipMemcpy(q0,q0_d,nelem*npts*sizeof(float),hipMemcpyDeviceToHost); //de-allocate memory from GPU hipblasDestroy(handle); hipFree(rhs_d); hipFree(qp_d); hipFree(q1_d); hipFree(q0_d); hipFree(u0_d); hipFree(v0_d); hipFree(ksi2d_x_d); hipFree(ksi2d_y_d); hipFree(eta2d_x_d); hipFree(eta2d_y_d); hipFree(jac2d_d); hipFree(psi2d_d); hipFree(psi2d_ksi_d); hipFree(psi2d_eta_d); hipFree(psideh_d); hipFree(nx_d); hipFree(ny_d); hipFree(jac_side_d); hipFree(imapl_d); hipFree(imapr_d); hipFree(invm_d); //de-allocate any local memory delete [] rhs; delete [] invm; delete [] q_print; }
2ed02c9619db88126b6bc28853e0b913e6b8493d.cu
#include <iostream> #include <cmath> #include "device_functions.h" #include "cublas_v2.h" #include <cuda_runtime.h> #define MAX_NGL 5 #define TPB_1 96 #define TPB_2 32 using namespace std; //------- compute rhs kernel ---------------------------------------------------------- // ----- new version with additional ranks of threads at the nqs level and atomic operations __global__ void compute_rhs_cuda(float * rhs, float * q, float * u, float * v, float * ksi_x, float * ksi_y, float * eta_x, float * eta_y, float * jac, float * psi, float * psi_ksi, float * psi_eta, int nelem, int npts, int nqs){ int l_tid = threadIdx.x; int l_block = blockDim.x; int tid = threadIdx.x+blockIdx.x*blockDim.x; int k = threadIdx.y; if(tid<nelem){ if(k<npts){ float wq; float e_x; float e_y; float n_x; float n_y; float u_k; float v_k; float q_k; float h_k; float h_e; float h_n; float dhdx_k; float dhdy_k; __shared__ float rhs_s[TPB_1*MAX_NGL*MAX_NGL]; //cooperatively load rhs for the block into shared memory // rhs[nelem*k+tid] = 0.0; rhs_s[l_block*k+l_tid] = 0.0; __syncthreads(); int dof = k*nelem+tid; wq = jac[dof]; e_x = ksi_x[dof]; e_y = ksi_y[dof]; n_x = eta_x[dof]; n_y = eta_y[dof]; u_k = 0.0; v_k = 0.0; q_k = 0.0; for(int i=0;i<npts;i++){ h_k = psi[k*npts+i]; u_k = u_k +h_k*u[i*nelem+tid]; v_k = v_k+h_k*v[i*nelem+tid]; q_k = q_k+h_k*q[i*nelem+tid]; }//if(int i... for(int i=0;i<npts;i++){ h_e = psi_ksi[k*npts+i]; h_n = psi_eta[k*npts+i]; dhdx_k=h_e*e_x+h_n*n_x; dhdy_k=h_e*e_y+h_n*n_y; //atomicAdd(&rhs[i*nelem+tid],wq*q_k*(dhdx_k*u_k+dhdy_k*v_k)); atomicAdd(&rhs_s[i*l_block+l_tid],wq*q_k*(dhdx_k*u_k+dhdy_k*v_k)); }//for(int i=... __syncthreads(); //cooperatively load rhs_s back to global memory rhs[k*nelem+tid]=rhs_s[k*l_block+l_tid]; } }//if(tid<nelem) } //-------------- compute flux kernel --------------------------------------------------- __global__ void compute_flux_cuda(float * rhs, float * q, float * u, float * v, int * psideh, float * nx, float * ny, float * jac_side, float * psi, int nside, int ngl, int nq, int * imapl, int * imapr,int nelem){ //each thread updates a side int is = threadIdx.x + blockIdx.x*blockDim.x; //int l_is = threadIdx.x; if(is<nside){ int iel = psideh[2*nside+is];//element on rhs of side //float old; if(iel != -6){ //some local variables //int nqs = nq*nq; //int ilocl,il,jl,kl,ier,ilocr,ir,jr,kr; int ilocl, ilocr, ier, i_tmp,j_tmp,k_tmp; float wq, nxl,nyl,nxr,nyr,qlq_k, qrq_k,u_k,v_k,ul_k, vl_k, ur_k,vr_k; float unl, unr, claml, clamr, clam;// fxl, fyl, fxr, fyr //float flux_ql; //float flux_qr; float flux_q; float diss_q,h_i; float ql[MAX_NGL]; float qr[MAX_NGL]; float ul[MAX_NGL]; float vl[MAX_NGL]; ilocl = psideh[(1-1)*nside+is]; //just being explicit about the indexing for(int l=0;l<ngl;l++){ //get pointers i_tmp = imapl[l*(4*2)+(1-1)*4+(ilocl-1)]; j_tmp = imapl[l*(4*2)+(2-1)*4+(ilocl-1)]; k_tmp = (j_tmp-1)*ngl+i_tmp-1; //kl is now zero-based pointer //left element ql[l] = q[k_tmp*nelem+(iel-1)]; ul[l] = u[k_tmp*nelem+(iel-1)]; vl[l] = v[k_tmp*nelem+(iel-1)]; }//for(int l=0... //store right side variables ier = psideh[(4-1)*nside + is]; if(ier != 0){ ilocr = psideh[(2-1)*nside+is]; for(int l = 0;l<ngl;l++){ i_tmp = imapr[l*(4*2)+(1-1)*4+(ilocr-1)]; j_tmp = imapr[l*(4*2)+(2-1)*4+(ilocr-1)]; k_tmp = (j_tmp-1)*ngl+i_tmp-1;//kr is now zero-based pointer qr[l] = q[k_tmp*nelem+(ier-1)]; }//for(int l=0... }//if(ier !=0... //do gauss-lobatto integration for(int l=0;l<nq;l++){ wq = jac_side[l*nside+is]; //store normal vectors nxl = nx[l*nside+is]; nyl = ny[l*nside+is]; nxr = -nxl; nyr = -nyl; //interpolate onto quad points qlq_k = 0.; qrq_k = 0.; u_k = 0.; v_k = 0.; for(int i=0;i<ngl;i++){ qlq_k = qlq_k+psi[l*ngl+i]*ql[i]; qrq_k = qrq_k+psi[l*ngl+i]*qr[i]; u_k = u_k + psi[l*ngl+i]*ul[i]; v_k = v_k+psi[l*ngl+i]*vl[i]; }//for(int i=0... ul_k = u_k; vl_k = v_k; ur_k = u_k; vr_k = v_k; //compute Rusanov flux constant unl = nxl*ul_k + nyl*vl_k; unr = nxl*ur_k+nyl*vr_k; claml=fabs(unl); clamr=fabs(unr); //clam = max(claml,clamr); if(claml > clamr){ clam = claml; }else{ clam = clamr; } // //flux variables // fxl = qlq_k*ul_k; // fyl = qlq_k*vl_k; // fxr = qrq_k*ur_k; // fyr = qrq_k*vr_k; // //normal flux component // flux_ql = nxl*fxl+nyl*fyl; // flux_qr = nxr*fxr+nyr*fyr; // flux_q = flux_ql-flux_qr; flux_q = nxl*qlq_k*ul_k+nyl*qlq_k*vl_k-(nxr*qrq_k*ur_k+nyr*qrq_k*vr_k); //dissipation term diss_q = clam*(qrq_k-qlq_k); //construct Rusanov flux flux_q = 0.5*(flux_q-diss_q); // //loop through side interpolation points // cout << "side = " << is+1 << endl; for(int i=0;i<ngl;i++){ h_i = psi[i*ngl+l]; //left side i_tmp=imapl[i*(4*2)+(1-1)*4+(ilocl-1)]; j_tmp=imapl[i*(4*2)+(2-1)*4+(ilocl-1)]; k_tmp = (j_tmp-1)*ngl+i_tmp-1; // cout << "ngl = " << i+1 << ", incr = " << // -wq*h_i*flux_q << endl; //rhs[kl*nelem+iel-1]-=wq*h_i*flux_q; atomicAdd(&rhs[k_tmp*nelem+iel-1],-wq*h_i*flux_q); //right side if(ier > 0){ i_tmp = imapr[i*(4*2)+(1-1)*4 +(ilocr-1)]; j_tmp = imapr[i*(4*2)+(2-1)*4 +(ilocr-1)]; k_tmp = (j_tmp-1)*ngl+i_tmp - 1; // cout << "right side, ngl = " << i+1 << // ", incr = " << wq*h_i*flux_q << endl; //rhs[kr*nelem+ier-1]+=wq*h_i*flux_q; atomicAdd(&rhs[k_tmp*nelem+ier-1],wq*h_i*flux_q); }//if(ier... }//for(int i... }//for(int l... }//skip sides with parallel BCs }//if(is<nside... } //------------------------------------------------------------------------------------- //---- apply inverse mass-matrix kernel ----------------------------------------------- __global__ void apply_invM_cuda(float * rhs, float * invM, int nelem, int npts){ int l_dof= threadIdx.x; int blk_sz = blockDim.x; int ie = threadIdx.x + blockIdx.x*blockDim.x; int l = threadIdx.y; //which of npts rows this thread processes if(ie<nelem){ if(l<npts){ //all threads "should" satisfy this...not sure though //all threads collaboratively load rhs into shared memory __shared__ float rhs_s[TPB_2*MAX_NGL*MAX_NGL]; rhs_s[blk_sz*l+l_dof] = rhs[l*nelem+ie]; __syncthreads(); float tmpRHS = 0.0; int invM_dof; int rhs_dof; //for(int m=0;m<npts;m++){ //invM_dof = ie*npts*npts+m*npts+l; invM_dof = ie*npts*npts+l*npts+l; //rhs_dof = m*nelem+ie; //rhs_dof = m*blk_sz+l_dof; rhs_dof=l*blk_sz+l_dof; tmpRHS+=invM[invM_dof]*rhs_s[rhs_dof]; //} __syncthreads();//make sure all l-threads are done using //this row of the rhs rhs[l*nelem+ie] = tmpRHS; }//doing on row of the invM against one row of rhs... }//only do the right number of elements } // ------------------------------------------------------------------------------------ __global__ void interstage_update(float * q0, float * q1, float *qp, float *rhs, float a0, float a1, float dtt,int nelem, int npts){ int ie = threadIdx.x+blockIdx.x*blockDim.x; int l = threadIdx.y; if(ie<nelem){ if(l<npts){ int dof = l*nelem+ie; qp[dof]=a0*q0[dof]+a1*q1[dof]+dtt*rhs[dof]; } } } void print_q(float * q_d, float * q, int nelem, int npts){ //copy q_d to q cudaMemcpy(q,q_d,nelem*npts*sizeof(float),cudaMemcpyDeviceToHost); //now print out q for(int e = 0;e<nelem;e++){ cout << endl; for(int l=0;l<npts;l++){ cout << q[l*nelem+e]; if(l<(npts-1)){ cout << ", "; } } } cout << endl; } //-------- main driver function ------------------------------------------------------- extern "C" void dg_advection_2d_ntp_cuda_(int* ntime_ptr, float * dt_ptr, int* kstages_ptr, float * q0, float * u0, float * v0, float * psi, float * ksi2d_x, float * ksi2d_y, float * eta2d_x, float * eta2d_y, float * jac2d, float * psi2d, float * psi2d_ksi, float * psi2d_eta, int * intma2d, int * psideh, float * jac_side, int * imapl, int * imapr, float * nx, float * ny, int* nelem_ptr, int* npts_ptr, int* nqs_ptr, int* ngl_ptr, int* nq_ptr, float * Mmatrix_inv, int* nside_ptr){ int ntime, kstages, nelem, npts, nqs, ngl, nq, nside; ntime = *ntime_ptr; kstages = *kstages_ptr; nelem = *nelem_ptr; npts = *npts_ptr; nqs = *nqs_ptr; ngl = *ngl_ptr; nq = *nq_ptr; nside = *nside_ptr; float dt, a0, a1,beta,dtt; const int time_step_rep_freq = 100; dt = *dt_ptr; float * rhs = new float[npts*nelem]; float * invm = new float[npts*npts*nelem]; float * q_print = new float[npts*nelem]; for(int e=0;e<nelem;e++){ for(int i=0;i<npts;i++){ for(int j=0;j<npts;j++){ invm[e*npts*npts+i*npts+j]=Mmatrix_inv[i*nelem*npts+j*nelem+e]; } } } //declare GPU variables float * rhs_d; float * qp_d; float * q1_d; float * q0_d; float * u0_d; float * v0_d; float * ksi2d_x_d; float * ksi2d_y_d; float * eta2d_x_d; float * eta2d_y_d; float * jac2d_d; float * psi2d_d; float * psi2d_ksi_d; float * psi2d_eta_d; int * psideh_d; float * nx_d; float * ny_d; float * jac_side_d; float * psi_d; int * imapl_d; int * imapr_d; float * invm_d; cublasStatus_t stat; cublasHandle_t handle; stat = cublasCreate(&handle); if(stat!=CUBLAS_STATUS_SUCCESS){ cout << "CUBLAS initialization failure!" << endl; return; } //allocate space on GPU cudaMalloc((void**)&rhs_d,(nelem*npts)*sizeof(float)); cudaMalloc((void**)&qp_d,(nelem*npts)*sizeof(float)); cudaMalloc((void**)&q1_d,(nelem*npts)*sizeof(float)); cudaMalloc((void**)&q0_d,(nelem*npts)*sizeof(float)); cudaMalloc((void**)&u0_d,(nelem*npts)*sizeof(float)); cudaMalloc((void**)&v0_d,(nelem*npts)*sizeof(float)); cudaMalloc((void**)&ksi2d_x_d,(nelem*nqs)*sizeof(float)); cudaMalloc((void**)&ksi2d_y_d,(nelem*nqs)*sizeof(float)); cudaMalloc((void**)&eta2d_x_d,(nelem*nqs)*sizeof(float)); cudaMalloc((void**)&eta2d_y_d,(nelem*nqs)*sizeof(float)); cudaMalloc((void**)&jac2d_d,(nelem*nqs)*sizeof(float)); cudaMalloc((void**)&psi2d_d,(npts*nqs)*sizeof(float)); cudaMalloc((void**)&psi2d_ksi_d,(npts*nqs)*sizeof(float)); cudaMalloc((void**)&psi2d_eta_d,(npts*nqs)*sizeof(float)); cudaMalloc((void**)&psideh_d,(nside*4)*sizeof(int)); cudaMalloc((void**)&nx_d,(nside*nq)*sizeof(float)); cudaMalloc((void**)&ny_d,(nside*nq)*sizeof(float)); cudaMalloc((void**)&jac_side_d,(nside*nq)*sizeof(float)); cudaMalloc((void**)&psi_d,(ngl*nq)*sizeof(float)); cudaMalloc((void**)&imapl_d,(4*2*ngl)*sizeof(int)); cudaMalloc((void**)&imapr_d,(4*2*ngl)*sizeof(int)); cudaMalloc((void**)&invm_d,(nelem*npts*npts)*sizeof(float)); //transfer data to the GPU cudaMemcpy(rhs_d,rhs,nelem*npts*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(qp_d,q0,nelem*npts*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(q1_d,q0,nelem*npts*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(q0_d,q0,nelem*npts*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(u0_d,u0,nelem*npts*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(v0_d,v0,nelem*npts*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(ksi2d_x_d,ksi2d_x,nelem*nqs*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(ksi2d_y_d,ksi2d_y,nelem*nqs*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(eta2d_x_d,eta2d_x,nelem*nqs*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(eta2d_y_d,eta2d_y,nelem*nqs*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(jac2d_d,jac2d,nelem*nqs*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(psi2d_d,psi2d,npts*nqs*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(psi2d_ksi_d,psi2d_ksi,npts*nqs*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(psi2d_eta_d,psi2d_eta,npts*nqs*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(psideh_d,psideh,nside*4*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(nx_d,nx,nside*nq*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(ny_d,ny,nside*nq*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(jac_side_d,jac_side,nside*nq*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(psi_d,psi,ngl*nq*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(imapl_d,imapl,4*2*ngl*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(imapr_d,imapr,4*2*ngl*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(invm_d,invm,nelem*npts*npts*sizeof(float),cudaMemcpyHostToDevice); //establish thread launch configuration //dim3 dimBlock1(TPB_1,npts,1); dim3 dimBlock1(TPB_1,1,1); dim3 dimBlock_rhs(TPB_1,npts,1); dim3 dimGrid1((nelem+TPB_1-1)/TPB_1,1,1);//for rhs, inv mass matrix dim3 dimGrid2((nside+TPB_1-1)/TPB_1,1,1);//for computing/applying flux dim3 dimBlock2(TPB_2,npts,1);//for inverting mass matrix dim3 dimGrid3((nelem+TPB_2-1)/TPB_2,1,1);//for inverting mass matrix dim3 dimBlock4(TPB_2,npts,1); dim3 dimGrid4((nelem+TPB_2-1)/TPB_2,1,1);//for inter-stage updates cudaFuncSetCacheConfig(compute_flux_cuda,cudaFuncCachePreferL1); //commence time-stepping // cout << "About to commence time stepping, q0_d = " << endl; // print_q(q0_d,q_print,nelem,npts); for(int itime = 0; itime<ntime;itime++){ //outpu progress if((itime+1)%time_step_rep_freq == 0){ cout << "Commencing time step " << itime+1 << endl; } //Explicit RK stages for(int ik=1;ik<=kstages;ik++){ switch (kstages) { case 2: switch (ik) { case 1: a0 = 1.0; a1 = 0.0; beta = 1.0; break; case 2: a0 = 0.5; a1 = 0.5; beta = 0.5; }//case (2) switch (ik) break; case 3: switch (ik) { case 1: a0 = 1.0; a1 = 0.0; beta = 1.0; break; case 2: a0 = 3.0/4.0; a1 = 1.0/4.0; beta = 1.0/4.0; break; case 3: a0 = 1.0/3.0; a1 = 2.0/3.0; beta = 2.0/3.0; }//switch(ik) }//switch (kstages) dtt = dt*beta; //compute rhs compute_rhs_cuda<<<dimGrid1,dimBlock_rhs>>>(rhs_d,qp_d,u0_d,v0_d,ksi2d_x_d, ksi2d_y_d,eta2d_x_d,eta2d_y_d, jac2d_d,psi2d_d,psi2d_ksi_d, psi2d_eta_d,nelem,npts,nqs); // cout << "After compute_rhs, rhs_d = "<< endl; // print_q(rhs_d,q_print,nelem,npts); //compute/apply flux compute_flux_cuda<<<dimGrid2,dimBlock1>>>(rhs_d,qp_d,u0_d,v0_d,psideh_d,nx_d,ny_d, jac_side_d,psi_d,nside,ngl,nq,imapl_d, imapr_d,nelem); // cout << "After compute_flux, rhs_d = " << endl; // print_q(rhs_d,q_print,nelem,npts); //apply inverse mass matrix (possibly re-structure inverse mass matrix to allow using cublas/cusparse) apply_invM_cuda<<<dimGrid3,dimBlock2>>>(rhs_d,invm_d,nelem,npts); // cout << "After applying inverse mass matrix, rhs_d = " << endl; // print_q(rhs_d,q_print,nelem,npts); // cout << "Before inter-stage update, q0_d = " << endl; // print_q(q0_d,q_print,nelem,npts); // cout << "Before inter-stage update, q1_d = " << endl; // print_q(q1_d,q_print,nelem,npts); // cout << "Before inter-stage update, qp_d = " << endl; // print_q(qp_d,q_print,nelem,npts); // cout << "dtt = " << dtt; // cout << "a0 = " << a0; // cout << "a1 = " << a1; //perform inter-stage updates //qp_d = a0*q0_d + a1*q1_d + dtt*rhs_d stat=cublasScopy(handle,nelem*npts,rhs_d,1,qp_d,1);//qp_d=rhs_d stat=cublasSscal(handle,nelem*npts,&dtt,qp_d,1);//qp_d=dtt*qp_d stat=cublasSaxpy(handle,nelem*npts,&a1,q1_d,1,qp_d,1); //qp_d = qp_d+a1*q1_d stat=cublasSaxpy(handle,nelem*npts,&a0,q0_d,1,qp_d,1);//qp_d = qp_d+a0*q0_d // interstage_update<<<dimGrid4,dimBlock4>>>(q0_d,q1_d,qp_d,rhs_d,a0,a1,dtt,nelem,npts); // cout << "After inter-stage update, qp_d = " << endl; // print_q(qp_d,q_print,nelem,npts); //q1_d = qp_d stat=cublasScopy(handle,nelem*npts,qp_d,1,q1_d,1); }//for(int ik... //q0_d = qp_d stat=cublasScopy(handle,nelem*npts,qp_d,1,q0_d,1); // cout << "After all stages, q0_d = " << endl; // print_q(q0_d,q_print,nelem,npts); } //end time-stepping section, transfer needed output data back from GPU cudaMemcpy(q0,q0_d,nelem*npts*sizeof(float),cudaMemcpyDeviceToHost); //de-allocate memory from GPU cublasDestroy(handle); cudaFree(rhs_d); cudaFree(qp_d); cudaFree(q1_d); cudaFree(q0_d); cudaFree(u0_d); cudaFree(v0_d); cudaFree(ksi2d_x_d); cudaFree(ksi2d_y_d); cudaFree(eta2d_x_d); cudaFree(eta2d_y_d); cudaFree(jac2d_d); cudaFree(psi2d_d); cudaFree(psi2d_ksi_d); cudaFree(psi2d_eta_d); cudaFree(psideh_d); cudaFree(nx_d); cudaFree(ny_d); cudaFree(jac_side_d); cudaFree(imapl_d); cudaFree(imapr_d); cudaFree(invm_d); //de-allocate any local memory delete [] rhs; delete [] invm; delete [] q_print; }
637a463db9424dd41afb420ff4d8b8c9ad9ff1fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/sequence_ops.h" #include "caffe2/core/operator.h" #include "caffe2/core/tensor.h" namespace caffe2 { namespace { template <typename T> __global__ void AddPaddingKernel( const T* in, int block_size, int lengths_size, int outer_size, const int32_t* lengths_prefix_sum, const T* padding_start_ptr, int start_padding_width_blocks, const T* padding_end_ptr, int end_padding_width_blocks, T* out, int32_t* lengths_out) { int element_idx = blockIdx.x; int prior_padding = element_idx * (start_padding_width_blocks + end_padding_width_blocks); int out_start_idx = element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1] + prior_padding; int len_blocks; int in_start_idx; if (lengths_prefix_sum) { len_blocks = lengths_prefix_sum[element_idx] - (element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]); in_start_idx = lengths_prefix_sum[element_idx] - len_blocks; } else { // Only one element, use the outer size CUDA_KERNEL_ASSERT(lengths_size == 1); len_blocks = outer_size; in_start_idx = 0; } out_start_idx *= block_size; in_start_idx *= block_size; int len = len_blocks * block_size; int start_padding_width = start_padding_width_blocks * block_size; int end_padding_width = end_padding_width_blocks * block_size; // start pad T* out_ptr = out + out_start_idx; for (int i = threadIdx.x; i < start_padding_width; i += blockDim.x) { T fill = padding_start_ptr ? padding_start_ptr[i % block_size] : T(0); out_ptr[i] = fill; } // payload for (int i = threadIdx.x; i < len; i += blockDim.x) { out_ptr[i + start_padding_width] = in[in_start_idx + i]; } // end pad for (int i = threadIdx.x; i < end_padding_width; i += blockDim.x) { T fill = padding_end_ptr ? padding_end_ptr[i % block_size] : T(0); out_ptr[i + start_padding_width + len] = fill; } // update the lengths if (threadIdx.x == 0 && lengths_out != nullptr) { lengths_out[element_idx] = len_blocks + start_padding_width_blocks + end_padding_width_blocks; } } template <typename T> __global__ void RemovePaddingKernel( const T* in, int block_size, int lengths_size, int outer_size, const int32_t* lengths_prefix_sum, int start_padding_width_blocks, int end_padding_width_blocks, T* out, int32_t* lengths_out) { int element_idx = blockIdx.x; int prior_padding = element_idx * (start_padding_width_blocks + end_padding_width_blocks); int out_start_idx = element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1] - prior_padding; int len_blocks; int in_start_idx; if (lengths_prefix_sum) { len_blocks = lengths_prefix_sum[element_idx] - (element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]); in_start_idx = lengths_prefix_sum[element_idx] - len_blocks; } else { // Only one element, use the outer size CUDA_KERNEL_ASSERT(lengths_size == 1); len_blocks = outer_size; in_start_idx = 0; } out_start_idx *= block_size; in_start_idx *= block_size; int len = len_blocks * block_size; int start_padding_width = start_padding_width_blocks * block_size; // payload T* out_ptr = out + out_start_idx; for (int i = threadIdx.x; i < len; i += blockDim.x) { out_ptr[in_start_idx + i] = in[i + start_padding_width]; } // update the lengths if (threadIdx.x == 0 && lengths_out != nullptr) { lengths_out[element_idx] = len_blocks - (start_padding_width_blocks + end_padding_width_blocks); } } template <bool Inclusive = true> void lengths_prefix_sum( const int32_t* lengths, int32_t num_items, Tensor* prefix_buffer, Tensor* prefix_sum, CUDAContext* context) { // Retrieve buffer size size_t temp_storage_bytes = 0; prefix_sum->Resize(num_items); if (Inclusive) { hipcub::DeviceScan::InclusiveSum( NULL, temp_storage_bytes, lengths, prefix_sum->template mutable_data<int32_t>(), num_items, context->cuda_stream()); } else { hipcub::DeviceScan::ExclusiveSum( NULL, temp_storage_bytes, lengths, prefix_sum->template mutable_data<int32_t>(), num_items, context->cuda_stream()); } // Allocate temporary storage auto buffer_size = (temp_storage_bytes + sizeof(int32_t)) / sizeof(int32_t); prefix_buffer->Resize(buffer_size); void* d_temp_storage = static_cast<void*>(prefix_buffer->template mutable_data<int32_t>()); if (Inclusive) { hipcub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, lengths, prefix_sum->template mutable_data<int32_t>(), num_items, context->cuda_stream()); } else { hipcub::DeviceScan::ExclusiveSum( d_temp_storage, temp_storage_bytes, lengths, prefix_sum->template mutable_data<int32_t>(), num_items, context->cuda_stream()); } } } // namespace template <> template <typename T> bool AddPaddingOp<CUDAContext>::MakePadding( const T* in_ptr, T* out_ptr, const int32_t* lengths_ptr, int32_t lengths_size, int32_t outer_size, const T* padding_start_ptr, const T* padding_end_ptr, int64_t block_size) { // Step 1: compute prefix sum over the lengths -- unless // there were no lengths given, i.e there is only one segment const int32_t* lengths_prefix_sum_ptr = nullptr; if (lengths_ptr != nullptr) { lengths_prefix_sum( lengths_ptr, lengths_size, &lengths_prefix_sum_buffer_, &lengths_prefix_sum_, &context_); lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>(); } int32_t* lengths_out_ptr = nullptr; if (OutputSize() > 1) { auto* lengths_out = Output(1, {lengths_size}, at::dtype<int32_t>()); lengths_out_ptr = lengths_out->template mutable_data<int32_t>(); } if (lengths_size == 0) { return true; } // Compute the padding using the accumulated lengths hipLaunchKernelGGL(( AddPaddingKernel<T>) , dim3(lengths_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), in_ptr, block_size, lengths_size, outer_size, lengths_prefix_sum_ptr, padding_start_ptr, startPaddingWidth_, padding_end_ptr, endPaddingWidth_, out_ptr, lengths_out_ptr); return true; } REGISTER_CUDA_OPERATOR(AddPadding, AddPaddingOp<CUDAContext>); template <> template <typename T> bool RemovePaddingOp<CUDAContext>::DoRunWithType() { const auto& in = Input(0); CAFFE_ENFORCE_GE(in.dim(), 1); const int32_t outer_size = in.sizes()[0]; const auto block_size = std::accumulate( in.sizes().begin() + 1, in.sizes().end(), 1, std::multiplies<int64_t>()); // if no lengths is provided, assume it is a single full-span entry const int32_t* lengths_ptr = nullptr; int32_t lengths_size = 1; if (InputSize() > 1) { const auto& lengths = Input(1); lengths_ptr = lengths.data<int32_t>(); lengths_size = lengths.size(); } auto out_dims = in.sizes().vec(); out_dims[0] -= (startPaddingWidth_ + endPaddingWidth_) * lengths_size; auto* out = Output(0, out_dims, at::dtype<T>()); const auto* in_ptr = in.template data<T>(); auto* out_ptr = out->template mutable_data<T>(); // Step 1: compute prefix sum over the (padded) lengths -- unless // there were no lengths given, i.e there is only one segment const int32_t* lengths_prefix_sum_ptr = nullptr; if (lengths_ptr != nullptr) { lengths_prefix_sum( lengths_ptr, lengths_size, &lengths_prefix_sum_buffer_, &lengths_prefix_sum_, &context_); lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>(); } int32_t* lengths_out_ptr = nullptr; if (OutputSize() > 1) { auto* lengths_out = Output(1, {lengths_size}, at::dtype<int32_t>()); lengths_out_ptr = lengths_out->template mutable_data<int32_t>(); } if (lengths_size == 0) { return true; } // Compute the padding using the accumulated lengths hipLaunchKernelGGL(( RemovePaddingKernel<T>) , dim3(lengths_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), in_ptr, block_size, lengths_size, outer_size, lengths_prefix_sum_ptr, startPaddingWidth_, endPaddingWidth_, out_ptr, lengths_out_ptr); return true; } template <typename T> __global__ void gather_padding_kernel( const int K, const int N, const int Y0Width, const int Y1Width, const T* X, const int* I, const int* L, T* Y0, T* Y1) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage y0_tmp; __shared__ typename BlockReduce::TempStorage y1_tmp; for (int i = blockIdx.x; i < N; i += gridDim.x) { T sum_1 = T(0); T sum_2 = T(0); for (int j = threadIdx.x; j < K * Y0Width; j += blockDim.x) { const int j1 = j / Y0Width; const int j2 = j % Y0Width; const int idx1 = N * (L[j1] + j2); sum_1 += X[idx1 + i]; } for (int j = threadIdx.x; j < K * Y1Width; j += blockDim.x) { const int j1 = j / Y1Width; const int j2 = j % Y1Width; const int idx1 = N * L[j1]; const int idx2 = idx1 + N * (I[j1] - Y1Width + j2); sum_2 += X[idx2 + i]; } sum_1 = BlockReduce(y0_tmp).Reduce(sum_1, hipcub::Sum()); sum_2 = BlockReduce(y1_tmp).Reduce(sum_2, hipcub::Sum()); if (threadIdx.x == 0) { Y0[i] = sum_1; Y0 != Y1 ? Y1[i] = sum_2 : Y0[i] = sum_1 + sum_2; } __syncthreads(); } } template <> template <typename T> void GatherPaddingOp<CUDAContext>::GatherPadding( const int outer_size, const int lengths_size, const int block_size, const int pad_width, const T* in_ptr, const int* lengths_ptr, T* padding_start_ptr, T* padding_end_ptr) { if (lengths_size > 0) { lengths_prefix_sum<false>( lengths_ptr, lengths_size, &lengths_prefix_sum_buffer_, &lengths_prefix_sum_, &context_); hipLaunchKernelGGL(( gather_padding_kernel<T>) , dim3(min(block_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), lengths_size, block_size, startPaddingWidth_, endPaddingWidth_, in_ptr, lengths_ptr, lengths_prefix_sum_.template data<int>(), padding_start_ptr, padding_end_ptr); } } REGISTER_CUDA_OPERATOR(RemovePadding, RemovePaddingOp<CUDAContext>); REGISTER_CUDA_OPERATOR(GatherPadding, GatherPaddingOp<CUDAContext>); } // namespace caffe2
637a463db9424dd41afb420ff4d8b8c9ad9ff1fe.cu
#include <cub/cub.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/sequence_ops.h" #include "caffe2/core/operator.h" #include "caffe2/core/tensor.h" namespace caffe2 { namespace { template <typename T> __global__ void AddPaddingKernel( const T* in, int block_size, int lengths_size, int outer_size, const int32_t* lengths_prefix_sum, const T* padding_start_ptr, int start_padding_width_blocks, const T* padding_end_ptr, int end_padding_width_blocks, T* out, int32_t* lengths_out) { int element_idx = blockIdx.x; int prior_padding = element_idx * (start_padding_width_blocks + end_padding_width_blocks); int out_start_idx = element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1] + prior_padding; int len_blocks; int in_start_idx; if (lengths_prefix_sum) { len_blocks = lengths_prefix_sum[element_idx] - (element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]); in_start_idx = lengths_prefix_sum[element_idx] - len_blocks; } else { // Only one element, use the outer size CUDA_KERNEL_ASSERT(lengths_size == 1); len_blocks = outer_size; in_start_idx = 0; } out_start_idx *= block_size; in_start_idx *= block_size; int len = len_blocks * block_size; int start_padding_width = start_padding_width_blocks * block_size; int end_padding_width = end_padding_width_blocks * block_size; // start pad T* out_ptr = out + out_start_idx; for (int i = threadIdx.x; i < start_padding_width; i += blockDim.x) { T fill = padding_start_ptr ? padding_start_ptr[i % block_size] : T(0); out_ptr[i] = fill; } // payload for (int i = threadIdx.x; i < len; i += blockDim.x) { out_ptr[i + start_padding_width] = in[in_start_idx + i]; } // end pad for (int i = threadIdx.x; i < end_padding_width; i += blockDim.x) { T fill = padding_end_ptr ? padding_end_ptr[i % block_size] : T(0); out_ptr[i + start_padding_width + len] = fill; } // update the lengths if (threadIdx.x == 0 && lengths_out != nullptr) { lengths_out[element_idx] = len_blocks + start_padding_width_blocks + end_padding_width_blocks; } } template <typename T> __global__ void RemovePaddingKernel( const T* in, int block_size, int lengths_size, int outer_size, const int32_t* lengths_prefix_sum, int start_padding_width_blocks, int end_padding_width_blocks, T* out, int32_t* lengths_out) { int element_idx = blockIdx.x; int prior_padding = element_idx * (start_padding_width_blocks + end_padding_width_blocks); int out_start_idx = element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1] - prior_padding; int len_blocks; int in_start_idx; if (lengths_prefix_sum) { len_blocks = lengths_prefix_sum[element_idx] - (element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]); in_start_idx = lengths_prefix_sum[element_idx] - len_blocks; } else { // Only one element, use the outer size CUDA_KERNEL_ASSERT(lengths_size == 1); len_blocks = outer_size; in_start_idx = 0; } out_start_idx *= block_size; in_start_idx *= block_size; int len = len_blocks * block_size; int start_padding_width = start_padding_width_blocks * block_size; // payload T* out_ptr = out + out_start_idx; for (int i = threadIdx.x; i < len; i += blockDim.x) { out_ptr[in_start_idx + i] = in[i + start_padding_width]; } // update the lengths if (threadIdx.x == 0 && lengths_out != nullptr) { lengths_out[element_idx] = len_blocks - (start_padding_width_blocks + end_padding_width_blocks); } } template <bool Inclusive = true> void lengths_prefix_sum( const int32_t* lengths, int32_t num_items, Tensor* prefix_buffer, Tensor* prefix_sum, CUDAContext* context) { // Retrieve buffer size size_t temp_storage_bytes = 0; prefix_sum->Resize(num_items); if (Inclusive) { cub::DeviceScan::InclusiveSum( NULL, temp_storage_bytes, lengths, prefix_sum->template mutable_data<int32_t>(), num_items, context->cuda_stream()); } else { cub::DeviceScan::ExclusiveSum( NULL, temp_storage_bytes, lengths, prefix_sum->template mutable_data<int32_t>(), num_items, context->cuda_stream()); } // Allocate temporary storage auto buffer_size = (temp_storage_bytes + sizeof(int32_t)) / sizeof(int32_t); prefix_buffer->Resize(buffer_size); void* d_temp_storage = static_cast<void*>(prefix_buffer->template mutable_data<int32_t>()); if (Inclusive) { cub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, lengths, prefix_sum->template mutable_data<int32_t>(), num_items, context->cuda_stream()); } else { cub::DeviceScan::ExclusiveSum( d_temp_storage, temp_storage_bytes, lengths, prefix_sum->template mutable_data<int32_t>(), num_items, context->cuda_stream()); } } } // namespace template <> template <typename T> bool AddPaddingOp<CUDAContext>::MakePadding( const T* in_ptr, T* out_ptr, const int32_t* lengths_ptr, int32_t lengths_size, int32_t outer_size, const T* padding_start_ptr, const T* padding_end_ptr, int64_t block_size) { // Step 1: compute prefix sum over the lengths -- unless // there were no lengths given, i.e there is only one segment const int32_t* lengths_prefix_sum_ptr = nullptr; if (lengths_ptr != nullptr) { lengths_prefix_sum( lengths_ptr, lengths_size, &lengths_prefix_sum_buffer_, &lengths_prefix_sum_, &context_); lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>(); } int32_t* lengths_out_ptr = nullptr; if (OutputSize() > 1) { auto* lengths_out = Output(1, {lengths_size}, at::dtype<int32_t>()); lengths_out_ptr = lengths_out->template mutable_data<int32_t>(); } if (lengths_size == 0) { return true; } // Compute the padding using the accumulated lengths AddPaddingKernel<T> <<<lengths_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( in_ptr, block_size, lengths_size, outer_size, lengths_prefix_sum_ptr, padding_start_ptr, startPaddingWidth_, padding_end_ptr, endPaddingWidth_, out_ptr, lengths_out_ptr); return true; } REGISTER_CUDA_OPERATOR(AddPadding, AddPaddingOp<CUDAContext>); template <> template <typename T> bool RemovePaddingOp<CUDAContext>::DoRunWithType() { const auto& in = Input(0); CAFFE_ENFORCE_GE(in.dim(), 1); const int32_t outer_size = in.sizes()[0]; const auto block_size = std::accumulate( in.sizes().begin() + 1, in.sizes().end(), 1, std::multiplies<int64_t>()); // if no lengths is provided, assume it is a single full-span entry const int32_t* lengths_ptr = nullptr; int32_t lengths_size = 1; if (InputSize() > 1) { const auto& lengths = Input(1); lengths_ptr = lengths.data<int32_t>(); lengths_size = lengths.size(); } auto out_dims = in.sizes().vec(); out_dims[0] -= (startPaddingWidth_ + endPaddingWidth_) * lengths_size; auto* out = Output(0, out_dims, at::dtype<T>()); const auto* in_ptr = in.template data<T>(); auto* out_ptr = out->template mutable_data<T>(); // Step 1: compute prefix sum over the (padded) lengths -- unless // there were no lengths given, i.e there is only one segment const int32_t* lengths_prefix_sum_ptr = nullptr; if (lengths_ptr != nullptr) { lengths_prefix_sum( lengths_ptr, lengths_size, &lengths_prefix_sum_buffer_, &lengths_prefix_sum_, &context_); lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>(); } int32_t* lengths_out_ptr = nullptr; if (OutputSize() > 1) { auto* lengths_out = Output(1, {lengths_size}, at::dtype<int32_t>()); lengths_out_ptr = lengths_out->template mutable_data<int32_t>(); } if (lengths_size == 0) { return true; } // Compute the padding using the accumulated lengths RemovePaddingKernel<T> <<<lengths_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( in_ptr, block_size, lengths_size, outer_size, lengths_prefix_sum_ptr, startPaddingWidth_, endPaddingWidth_, out_ptr, lengths_out_ptr); return true; } template <typename T> __global__ void gather_padding_kernel( const int K, const int N, const int Y0Width, const int Y1Width, const T* X, const int* I, const int* L, T* Y0, T* Y1) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage y0_tmp; __shared__ typename BlockReduce::TempStorage y1_tmp; for (int i = blockIdx.x; i < N; i += gridDim.x) { T sum_1 = T(0); T sum_2 = T(0); for (int j = threadIdx.x; j < K * Y0Width; j += blockDim.x) { const int j1 = j / Y0Width; const int j2 = j % Y0Width; const int idx1 = N * (L[j1] + j2); sum_1 += X[idx1 + i]; } for (int j = threadIdx.x; j < K * Y1Width; j += blockDim.x) { const int j1 = j / Y1Width; const int j2 = j % Y1Width; const int idx1 = N * L[j1]; const int idx2 = idx1 + N * (I[j1] - Y1Width + j2); sum_2 += X[idx2 + i]; } sum_1 = BlockReduce(y0_tmp).Reduce(sum_1, cub::Sum()); sum_2 = BlockReduce(y1_tmp).Reduce(sum_2, cub::Sum()); if (threadIdx.x == 0) { Y0[i] = sum_1; Y0 != Y1 ? Y1[i] = sum_2 : Y0[i] = sum_1 + sum_2; } __syncthreads(); } } template <> template <typename T> void GatherPaddingOp<CUDAContext>::GatherPadding( const int outer_size, const int lengths_size, const int block_size, const int pad_width, const T* in_ptr, const int* lengths_ptr, T* padding_start_ptr, T* padding_end_ptr) { if (lengths_size > 0) { lengths_prefix_sum<false>( lengths_ptr, lengths_size, &lengths_prefix_sum_buffer_, &lengths_prefix_sum_, &context_); gather_padding_kernel<T> <<<min(block_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( lengths_size, block_size, startPaddingWidth_, endPaddingWidth_, in_ptr, lengths_ptr, lengths_prefix_sum_.template data<int>(), padding_start_ptr, padding_end_ptr); } } REGISTER_CUDA_OPERATOR(RemovePadding, RemovePaddingOp<CUDAContext>); REGISTER_CUDA_OPERATOR(GatherPadding, GatherPaddingOp<CUDAContext>); } // namespace caffe2
5e755fdd1e766597725f845ac8d8bd092e9e2d5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> typedef struct { unsigned char red,green,blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; #define CREATOR "COMP3231" #define RGB_COMPONENT_COLOR 255 static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(const char *filename, PPMImage *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //comments fprintf(fp, "# Created by %s\n",CREATOR); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } __global__ void blur_kernel() { //kernel code } void your_gaussian_blur_func(PPMImage *img) { //host code } int main(){ PPMImage *image; image = readPPM("input.ppm"); your_gaussian_blur_func(image); writePPM("output.ppm",image); }
5e755fdd1e766597725f845ac8d8bd092e9e2d5f.cu
#include<stdio.h> #include<stdlib.h> typedef struct { unsigned char red,green,blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; #define CREATOR "COMP3231" #define RGB_COMPONENT_COLOR 255 static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(const char *filename, PPMImage *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //comments fprintf(fp, "# Created by %s\n",CREATOR); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } __global__ void blur_kernel() { //kernel code } void your_gaussian_blur_func(PPMImage *img) { //host code } int main(){ PPMImage *image; image = readPPM("input.ppm"); your_gaussian_blur_func(image); writePPM("output.ppm",image); }
2d13bcb89549e633ed6f3a7566828454137a366d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sgemv.cuh" #include "util.cuh" #include "cutens.h" template <const int DIM_X, const int DIM_Y, const int TS> void sgemv_template(const int m, const int n, const float * __restrict__ A, int lda, const float * __restrict__ x, float * __restrict__ y) { dim3 grid(CEIL(m, TS), 1); dim3 block(DIM_X, DIM_Y); hipLaunchKernelGGL(( ker_sgemv <DIM_X, DIM_Y, TS>) , dim3(grid), dim3(block), 0, 0, m, n, A, lda, x, y); } void sgemv (cuftens *a, cuftens *b, cuftens *c) { const int M=a->M, N=a->N; cuASSERT(b->M==1 && b->N==N, "err: sgemv shape\n"); sgemv_template <256, 1, 256> (M, N, a->data, N, b->data, c->data); }
2d13bcb89549e633ed6f3a7566828454137a366d.cu
#include "sgemv.cuh" #include "util.cuh" #include "cutens.h" template <const int DIM_X, const int DIM_Y, const int TS> void sgemv_template(const int m, const int n, const float * __restrict__ A, int lda, const float * __restrict__ x, float * __restrict__ y) { dim3 grid(CEIL(m, TS), 1); dim3 block(DIM_X, DIM_Y); ker_sgemv <DIM_X, DIM_Y, TS> <<<grid, block>>> (m, n, A, lda, x, y); } void sgemv (cuftens *a, cuftens *b, cuftens *c) { const int M=a->M, N=a->N; cuASSERT(b->M==1 && b->N==N, "err: sgemv shape\n"); sgemv_template <256, 1, 256> (M, N, a->data, N, b->data, c->data); }
7b5308d45e771b2dba3a7ce4dc0a6df6a293a3ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/platform/device_context.h> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/sum_op.h" #include "paddle/fluid/platform/float16.h" namespace plat = paddle::platform; namespace paddle { namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) using LoDTensor = framework::LoDTensor; template <class T> __global__ void Sum2CUDAKernel(const T *in_0, const T *in_1, T *out, int64_t N) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { out[id] = in_0[id] + in_1[id]; id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumArrayCUDAKernel(T **in, T *out, int64_t N, size_t in_size, bool read_dst) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { T total(read_dst ? out[id] : static_cast<T>(0)); for (int i = 0; i < in_size; ++i) { const T *tmp = in[i]; if (tmp) { total += tmp[id]; } } out[id] = total; id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumSelectedRowsCUDAKernel(T **sr_in_out, int64_t N, size_t rows) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { for (int i = 0; i < 2 * rows; i += 2) { const T *tmp = sr_in_out[i]; T *tmp_out = sr_in_out[i + 1]; if (tmp && tmp_out) { tmp_out[id] += tmp[id]; } } id += blockDim.x * gridDim.x; } } template <class T> void SumToLoDTensor(const framework::ExecutionContext &context) { auto in_vars = context.MultiInputVar("X"); const size_t in_num = in_vars.size(); constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; auto *out = context.Output<LoDTensor>("Out"); bool in_place = in_vars[0] == context.OutputVar("Out"); if (!in_place) { auto *out_ptr = out->mutable_data<T>(context.GetPlace()); if (in_num >= 1 && in_vars[0]->IsType<framework::LoDTensor>()) { auto &in_0_tensor = in_vars[0]->Get<framework::LoDTensor>(); if (in_0_tensor.numel() > 0) { in_place = (in_0_tensor.data<T>() == out_ptr); } } } // Sum of two tensors if (in_num == 2 && in_vars[0]->IsType<framework::LoDTensor>() && in_vars[1]->IsType<framework::LoDTensor>()) { auto &in_0 = in_vars[0]->Get<framework::LoDTensor>(); auto &in_1 = in_vars[1]->Get<framework::LoDTensor>(); int64_t length_0 = in_0.numel(); int64_t length_1 = in_1.numel(); if (length_0 && length_1 && in_0.IsInitialized() && in_1.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); auto in_0_e = EigenVector<T>::Flatten(in_0); auto in_1_e = EigenVector<T>::Flatten(in_1); result.device(place) = in_0_e + in_1_e; } else if (length_0 && in_0.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_0); } else if (length_1 && in_1.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_1); } return; } int start = in_place ? 1 : 0; if (!in_place) { phi::funcs::SetConstant<platform::CUDADeviceContext, T> constant_functor; constant_functor( context.template device_context<platform::CUDADeviceContext>(), out, static_cast<T>(0)); } std::vector<const T *> in_data; std::vector<int> selectrow_index; int64_t lod_length = 0; bool dst_write = false; for (int i = start; i < in_num; ++i) { if (in_vars[i]->IsType<framework::LoDTensor>()) { auto &in_i = in_vars[i]->Get<framework::LoDTensor>(); lod_length = in_i.numel(); if (lod_length && in_i.IsInitialized()) { in_data.emplace_back(in_i.data<T>()); } } else if (in_vars[i]->IsType<phi::SelectedRows>()) { selectrow_index.push_back(i); } } // compute select rows seperately. if (!selectrow_index.empty()) { std::vector<const T *> sr_in_out_data; size_t rows = 0; int64_t length = 0; for (auto index : selectrow_index) { auto &sr = in_vars[index]->Get<phi::SelectedRows>(); auto &sr_value = sr.value(); auto &sr_rows = sr.rows(); auto row_numel = sr_value.numel() / sr_rows.size(); auto out_dims = out->dims(); PADDLE_ENFORCE_EQ(sr.height(), out_dims[0], platform::errors::InvalidArgument( "The table height of input must be same as output, " "but received input height is %d" ", output height is %d", sr.height(), out_dims[0])); PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height(), platform::errors::InvalidArgument( "The table width of input must be same as output, " "but received input width is %d" ", output width is %d", row_numel, out->numel() / sr.height())); auto *sr_data = sr_value.data<T>(); auto *sr_out_data = out->data<T>(); rows += sr_rows.size(); length = row_numel; for (size_t i = 0; i < sr_rows.size(); ++i) { sr_in_out_data.emplace_back(&sr_data[i * row_numel]); sr_in_out_data.emplace_back(&sr_out_data[sr_rows[i] * row_numel]); } } if (!sr_in_out_data.empty()) { auto tmp_sr_in_out_array = memory::Alloc(dev_ctx, sr_in_out_data.size() * sizeof(T *)); memory::Copy(dev_ctx.GetPlace(), tmp_sr_in_out_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(sr_in_out_data.data()), sr_in_out_data.size() * sizeof(T *), dev_ctx.stream()); T **sr_in_out_array_data = reinterpret_cast<T **>(tmp_sr_in_out_array->ptr()); ComputeKernelParameter(length); hipLaunchKernelGGL(( SumSelectedRowsCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream, sr_in_out_array_data, length, rows); dst_write = true; } } // if indata not null, merge into one kernel call. if (!in_data.empty()) { auto tmp_in_array = memory::Alloc(dev_ctx, in_data.size() * sizeof(T *)); memory::Copy(dev_ctx.GetPlace(), tmp_in_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(lod_length); hipLaunchKernelGGL(( SumArrayCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream, in_array_data, out->data<T>(), lod_length, in_data.size(), dst_write | in_place); } } template <typename T> class SumKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto out_var = context.OutputVar("Out"); if (out_var->IsType<framework::LoDTensor>()) { SumToLoDTensor<T>(context); } else if (out_var->IsType<phi::SelectedRows>()) { SelectedRowsCompute<platform::CUDADeviceContext, T>(context); } else if (out_var->IsType<framework::LoDTensorArray>()) { LodTensorArrayCompute<platform::CUDADeviceContext, T>(context); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Expected type of Ouput(out) must be Tensor, SelectedRows or " "LodTensorArray. But got " "unsupport type: %s.", framework::ToTypeName(out_var->Type()))); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( sum, ops::SumKernel<paddle::platform::CUDADeviceContext, float>, ops::SumKernel<paddle::platform::CUDADeviceContext, double>, ops::SumKernel<paddle::platform::CUDADeviceContext, int>, ops::SumKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::SumKernel<paddle::platform::CUDADeviceContext, plat::float16>, ops::SumKernel<paddle::platform::CUDADeviceContext, plat::bfloat16>);
7b5308d45e771b2dba3a7ce4dc0a6df6a293a3ad.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/platform/device_context.h> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/sum_op.h" #include "paddle/fluid/platform/float16.h" namespace plat = paddle::platform; namespace paddle { namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) using LoDTensor = framework::LoDTensor; template <class T> __global__ void Sum2CUDAKernel(const T *in_0, const T *in_1, T *out, int64_t N) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { out[id] = in_0[id] + in_1[id]; id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumArrayCUDAKernel(T **in, T *out, int64_t N, size_t in_size, bool read_dst) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { T total(read_dst ? out[id] : static_cast<T>(0)); for (int i = 0; i < in_size; ++i) { const T *tmp = in[i]; if (tmp) { total += tmp[id]; } } out[id] = total; id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumSelectedRowsCUDAKernel(T **sr_in_out, int64_t N, size_t rows) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { for (int i = 0; i < 2 * rows; i += 2) { const T *tmp = sr_in_out[i]; T *tmp_out = sr_in_out[i + 1]; if (tmp && tmp_out) { tmp_out[id] += tmp[id]; } } id += blockDim.x * gridDim.x; } } template <class T> void SumToLoDTensor(const framework::ExecutionContext &context) { auto in_vars = context.MultiInputVar("X"); const size_t in_num = in_vars.size(); constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; auto *out = context.Output<LoDTensor>("Out"); bool in_place = in_vars[0] == context.OutputVar("Out"); if (!in_place) { auto *out_ptr = out->mutable_data<T>(context.GetPlace()); if (in_num >= 1 && in_vars[0]->IsType<framework::LoDTensor>()) { auto &in_0_tensor = in_vars[0]->Get<framework::LoDTensor>(); if (in_0_tensor.numel() > 0) { in_place = (in_0_tensor.data<T>() == out_ptr); } } } // Sum of two tensors if (in_num == 2 && in_vars[0]->IsType<framework::LoDTensor>() && in_vars[1]->IsType<framework::LoDTensor>()) { auto &in_0 = in_vars[0]->Get<framework::LoDTensor>(); auto &in_1 = in_vars[1]->Get<framework::LoDTensor>(); int64_t length_0 = in_0.numel(); int64_t length_1 = in_1.numel(); if (length_0 && length_1 && in_0.IsInitialized() && in_1.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); auto in_0_e = EigenVector<T>::Flatten(in_0); auto in_1_e = EigenVector<T>::Flatten(in_1); result.device(place) = in_0_e + in_1_e; } else if (length_0 && in_0.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_0); } else if (length_1 && in_1.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_1); } return; } int start = in_place ? 1 : 0; if (!in_place) { phi::funcs::SetConstant<platform::CUDADeviceContext, T> constant_functor; constant_functor( context.template device_context<platform::CUDADeviceContext>(), out, static_cast<T>(0)); } std::vector<const T *> in_data; std::vector<int> selectrow_index; int64_t lod_length = 0; bool dst_write = false; for (int i = start; i < in_num; ++i) { if (in_vars[i]->IsType<framework::LoDTensor>()) { auto &in_i = in_vars[i]->Get<framework::LoDTensor>(); lod_length = in_i.numel(); if (lod_length && in_i.IsInitialized()) { in_data.emplace_back(in_i.data<T>()); } } else if (in_vars[i]->IsType<phi::SelectedRows>()) { selectrow_index.push_back(i); } } // compute select rows seperately. if (!selectrow_index.empty()) { std::vector<const T *> sr_in_out_data; size_t rows = 0; int64_t length = 0; for (auto index : selectrow_index) { auto &sr = in_vars[index]->Get<phi::SelectedRows>(); auto &sr_value = sr.value(); auto &sr_rows = sr.rows(); auto row_numel = sr_value.numel() / sr_rows.size(); auto out_dims = out->dims(); PADDLE_ENFORCE_EQ(sr.height(), out_dims[0], platform::errors::InvalidArgument( "The table height of input must be same as output, " "but received input height is %d" ", output height is %d", sr.height(), out_dims[0])); PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height(), platform::errors::InvalidArgument( "The table width of input must be same as output, " "but received input width is %d" ", output width is %d", row_numel, out->numel() / sr.height())); auto *sr_data = sr_value.data<T>(); auto *sr_out_data = out->data<T>(); rows += sr_rows.size(); length = row_numel; for (size_t i = 0; i < sr_rows.size(); ++i) { sr_in_out_data.emplace_back(&sr_data[i * row_numel]); sr_in_out_data.emplace_back(&sr_out_data[sr_rows[i] * row_numel]); } } if (!sr_in_out_data.empty()) { auto tmp_sr_in_out_array = memory::Alloc(dev_ctx, sr_in_out_data.size() * sizeof(T *)); memory::Copy(dev_ctx.GetPlace(), tmp_sr_in_out_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(sr_in_out_data.data()), sr_in_out_data.size() * sizeof(T *), dev_ctx.stream()); T **sr_in_out_array_data = reinterpret_cast<T **>(tmp_sr_in_out_array->ptr()); ComputeKernelParameter(length); SumSelectedRowsCUDAKernel<T><<<grids, blocks, 0, stream>>>( sr_in_out_array_data, length, rows); dst_write = true; } } // if indata not null, merge into one kernel call. if (!in_data.empty()) { auto tmp_in_array = memory::Alloc(dev_ctx, in_data.size() * sizeof(T *)); memory::Copy(dev_ctx.GetPlace(), tmp_in_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(lod_length); SumArrayCUDAKernel<T><<<grids, blocks, 0, stream>>>( in_array_data, out->data<T>(), lod_length, in_data.size(), dst_write | in_place); } } template <typename T> class SumKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto out_var = context.OutputVar("Out"); if (out_var->IsType<framework::LoDTensor>()) { SumToLoDTensor<T>(context); } else if (out_var->IsType<phi::SelectedRows>()) { SelectedRowsCompute<platform::CUDADeviceContext, T>(context); } else if (out_var->IsType<framework::LoDTensorArray>()) { LodTensorArrayCompute<platform::CUDADeviceContext, T>(context); } else { PADDLE_THROW(platform::errors::InvalidArgument( "Expected type of Ouput(out) must be Tensor, SelectedRows or " "LodTensorArray. But got " "unsupport type: %s.", framework::ToTypeName(out_var->Type()))); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( sum, ops::SumKernel<paddle::platform::CUDADeviceContext, float>, ops::SumKernel<paddle::platform::CUDADeviceContext, double>, ops::SumKernel<paddle::platform::CUDADeviceContext, int>, ops::SumKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::SumKernel<paddle::platform::CUDADeviceContext, plat::float16>, ops::SumKernel<paddle::platform::CUDADeviceContext, plat::bfloat16>);
ee483e366a312c11eac34b7c9586d41ed39b71a7.hip
// !!! This is a file automatically generated by hipify!!! #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <cstring> #include <climits> #include <iomanip> #include <sys/time.h> #include <hip/hip_runtime.h> using namespace std; __global__ void transpose(float *matrix, int row, int col, float *result) { int i = blockIdx.x; if(i < row){ for (int j = 0; j < col; ++j) { result[j * row + i] = matrix[i * col + j]; } } } __global__ void matrixMult(float *left, int leftRow, int leftCol, float *right, int rightRow, int rightCol, float *result, int resultRow, int resultCol) { //#pragma omp parallel for float temp = 0; int i = blockIdx.y*blockDim.y+threadIdx.y; int j = blockIdx.x*blockDim.x+threadIdx.x; if ((i < leftRow) && (j < rightCol)){ //for (int i = 0; i < leftRow; ++i) { //for (int j = 0; j < rightCol; ++j) { // you need k calcs for each entry for (int k = 0; k < leftCol; ++k) { temp += left[i * leftCol + k] * right[k * rightCol + j]; } result[i * resultCol + j] = temp; } //} //} } void initializeArray(float *arr, int row, int col) { // #pragma omp parallel for for (int i = 0; i < row * col; ++i) { arr[i] = 0; } } void printMatrix(float *matrix, int row, int col) { cout.precision(6); for (int i = 0; i < row; ++i) { for (int j = 0; j < col; j++){ cout << setw(13) << matrix[i * col + j]; } cout << endl; } cout << endl; } float gpuquad(float *a, int n, float *u) { dim3 dimBlock(32, 32); float *d__a; hipMalloc((void **)&d__a, n*n*sizeof(float)); hipMemcpy(d__a, a, n*n*sizeof(float), hipMemcpyHostToDevice); //cout << "A" << endl; //printMatrix(a, n, n); float *d__u; hipMalloc((void **)&d__u, n*sizeof(float)); hipMemcpy(d__u, u, n*sizeof(float), hipMemcpyHostToDevice); //cout << "U" << endl; //printMatrix(u,n,1); dim3 dimGridUt(1,n); //float h__Ut[n]; //initializeArray(h__Ut, 1, n); float *d__Ut; hipMalloc((void **)&d__Ut, n*sizeof(float)); //hipMemcpy(d__Ut, h__Ut, n*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( transpose), dim3(dimGridUt), dim3(dimBlock), 0, 0, d__u, 1, n, d__Ut); // transpose pass in row and col of original matrix //hipMemcpy(h__Ut, d__Ut, n*sizeof(float), hipMemcpyDeviceToHost); //cout << "Ut" <<endl; //printMatrix(h__Ut, 1, n); dim3 dimGridN1((n + dimBlock.x - 1) / dimBlock.x,(1 + dimBlock.y - 1) / dimBlock.y); //float h__Ut_a[n]; //initializeArray(h__Ut_a, 1, n); float *d__Ut_a; hipMalloc((void **)&d__Ut_a, n*sizeof(float)); //hipMemcpy(d__Ut_a, h__Ut_a, n*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( matrixMult), dim3(dimGridN1), dim3(dimBlock), 0, 0, d__Ut, 1, n, d__a, n, n, d__Ut_a, 1, n); //hipMemcpy(h__Ut_a, d__Ut_a, n*sizeof(float), hipMemcpyDeviceToHost); //cout << "Ut * A" <<endl; //printMatrix(h__Ut_a, 1, n); dim3 dimGrid11((1 + dimBlock.x - 1) / dimBlock.x,(1 + dimBlock.y - 1) / dimBlock.y); float h__Ut_a_U[1]; //h__Ut_a_U[0] = 0; float *d__Ut_a_U; hipMalloc((void **)&d__Ut_a_U, sizeof(float)); //hipMemcpy(d__Ut_a_U, h__Ut_a_U, sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( matrixMult), dim3(dimGrid11), dim3(dimBlock), 0, 0, d__Ut_a, 1, n, d__u, n, 1, d__Ut_a_U, 1, 1); hipMemcpy(h__Ut_a_U, d__Ut_a_U, sizeof(float), hipMemcpyDeviceToHost); //cout << "Ut * A * U" <<endl; //printMatrix(h__Ut_a_U, 1, 1); return h__Ut_a_U[0]; } /* int main() { float a[] = {1,2,2,2,3, 2,1,5,5,2, 2,5,1,5,2, 2,5,5,1,2, 3,2,2,2,1}; int n = 5; float u[] = {1,2,3,4,5}; cout << gpuquad(a, n, u) << endl; gpuquad(a, n, u); return 0; } */
ee483e366a312c11eac34b7c9586d41ed39b71a7.cu
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <cstring> #include <climits> #include <iomanip> #include <sys/time.h> #include <cuda.h> using namespace std; __global__ void transpose(float *matrix, int row, int col, float *result) { int i = blockIdx.x; if(i < row){ for (int j = 0; j < col; ++j) { result[j * row + i] = matrix[i * col + j]; } } } __global__ void matrixMult(float *left, int leftRow, int leftCol, float *right, int rightRow, int rightCol, float *result, int resultRow, int resultCol) { //#pragma omp parallel for float temp = 0; int i = blockIdx.y*blockDim.y+threadIdx.y; int j = blockIdx.x*blockDim.x+threadIdx.x; if ((i < leftRow) && (j < rightCol)){ //for (int i = 0; i < leftRow; ++i) { //for (int j = 0; j < rightCol; ++j) { // you need k calcs for each entry for (int k = 0; k < leftCol; ++k) { temp += left[i * leftCol + k] * right[k * rightCol + j]; } result[i * resultCol + j] = temp; } //} //} } void initializeArray(float *arr, int row, int col) { // #pragma omp parallel for for (int i = 0; i < row * col; ++i) { arr[i] = 0; } } void printMatrix(float *matrix, int row, int col) { cout.precision(6); for (int i = 0; i < row; ++i) { for (int j = 0; j < col; j++){ cout << setw(13) << matrix[i * col + j]; } cout << endl; } cout << endl; } float gpuquad(float *a, int n, float *u) { dim3 dimBlock(32, 32); float *d__a; cudaMalloc((void **)&d__a, n*n*sizeof(float)); cudaMemcpy(d__a, a, n*n*sizeof(float), cudaMemcpyHostToDevice); //cout << "A" << endl; //printMatrix(a, n, n); float *d__u; cudaMalloc((void **)&d__u, n*sizeof(float)); cudaMemcpy(d__u, u, n*sizeof(float), cudaMemcpyHostToDevice); //cout << "U" << endl; //printMatrix(u,n,1); dim3 dimGridUt(1,n); //float h__Ut[n]; //initializeArray(h__Ut, 1, n); float *d__Ut; cudaMalloc((void **)&d__Ut, n*sizeof(float)); //cudaMemcpy(d__Ut, h__Ut, n*sizeof(float), cudaMemcpyHostToDevice); transpose<<<dimGridUt, dimBlock>>>(d__u, 1, n, d__Ut); // transpose pass in row and col of original matrix //cudaMemcpy(h__Ut, d__Ut, n*sizeof(float), cudaMemcpyDeviceToHost); //cout << "Ut" <<endl; //printMatrix(h__Ut, 1, n); dim3 dimGridN1((n + dimBlock.x - 1) / dimBlock.x,(1 + dimBlock.y - 1) / dimBlock.y); //float h__Ut_a[n]; //initializeArray(h__Ut_a, 1, n); float *d__Ut_a; cudaMalloc((void **)&d__Ut_a, n*sizeof(float)); //cudaMemcpy(d__Ut_a, h__Ut_a, n*sizeof(float), cudaMemcpyHostToDevice); matrixMult<<<dimGridN1, dimBlock>>>(d__Ut, 1, n, d__a, n, n, d__Ut_a, 1, n); //cudaMemcpy(h__Ut_a, d__Ut_a, n*sizeof(float), cudaMemcpyDeviceToHost); //cout << "Ut * A" <<endl; //printMatrix(h__Ut_a, 1, n); dim3 dimGrid11((1 + dimBlock.x - 1) / dimBlock.x,(1 + dimBlock.y - 1) / dimBlock.y); float h__Ut_a_U[1]; //h__Ut_a_U[0] = 0; float *d__Ut_a_U; cudaMalloc((void **)&d__Ut_a_U, sizeof(float)); //cudaMemcpy(d__Ut_a_U, h__Ut_a_U, sizeof(float), cudaMemcpyHostToDevice); matrixMult<<<dimGrid11, dimBlock>>>(d__Ut_a, 1, n, d__u, n, 1, d__Ut_a_U, 1, 1); cudaMemcpy(h__Ut_a_U, d__Ut_a_U, sizeof(float), cudaMemcpyDeviceToHost); //cout << "Ut * A * U" <<endl; //printMatrix(h__Ut_a_U, 1, 1); return h__Ut_a_U[0]; } /* int main() { float a[] = {1,2,2,2,3, 2,1,5,5,2, 2,5,1,5,2, 2,5,5,1,2, 3,2,2,2,1}; int n = 5; float u[] = {1,2,3,4,5}; cout << gpuquad(a, n, u) << endl; gpuquad(a, n, u); return 0; } */
6626f72d116a7eeef7ab6a321fa1e57bc99f87ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } __global__ void doubleElements(int *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = idx; i < N + stride; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { /* * Add error handling to this source code to learn what errors * exist, and then correct them. Googling error messages may be * of service if actions for resolving them are not clear to you. */ int N = 10000; int *a; size_t size = N * sizeof(int); //hipError_t err; //err = hipMallocManaged(&a, size); hipMallocManaged(&a, size); /* if (err != hipSuccess) { printf("Here is the error: %s", hipGetErrorString(err)); } printf(err); */ init(a, N); size_t threads_per_block = 1024; //size_t threads_per_block = 2048; size_t number_of_blocks = 32; hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N); // Catch errors for both the kernel launch above and any errors that occur during the asynchronous `doubleElements` kernel execution. hipError_t syncError = hipGetLastError(); hipError_t asyncError = hipDeviceSynchronize(); if (syncError != hipSuccess) printf("Here is an error: %s\n", hipGetErrorString(syncError)); // invalid configuration argument if (asyncError != hipSuccess) printf("Here is an error: %s\n", hipGetErrorString(asyncError)); // no error bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); hipFree(a); }
6626f72d116a7eeef7ab6a321fa1e57bc99f87ab.cu
#include <stdio.h> #include <iostream> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } __global__ void doubleElements(int *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = idx; i < N + stride; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { /* * Add error handling to this source code to learn what errors * exist, and then correct them. Googling error messages may be * of service if actions for resolving them are not clear to you. */ int N = 10000; int *a; size_t size = N * sizeof(int); //cudaError_t err; //err = cudaMallocManaged(&a, size); cudaMallocManaged(&a, size); /* if (err != cudaSuccess) { printf("Here is the error: %s", cudaGetErrorString(err)); } printf(err); */ init(a, N); size_t threads_per_block = 1024; //size_t threads_per_block = 2048; size_t number_of_blocks = 32; doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); // Catch errors for both the kernel launch above and any errors that occur during the asynchronous `doubleElements` kernel execution. cudaError_t syncError = cudaGetLastError(); cudaError_t asyncError = cudaDeviceSynchronize(); if (syncError != cudaSuccess) printf("Here is an error: %s\n", cudaGetErrorString(syncError)); // invalid configuration argument if (asyncError != cudaSuccess) printf("Here is an error: %s\n", cudaGetErrorString(asyncError)); // no error bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); cudaFree(a); }
8aaecba60231cb15ba9cfed2deb786de7f11a815.hip
// !!! This is a file automatically generated by hipify!!! // Solve the Laplace equation on a 2D lattice with boundary conditions. // // compile with the following command: // // (for GTX970) // nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o laplace laplace.cu // // (for GTX1060) // nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o laplace laplace.cu // Includes #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> // field variables float* h_new; // host field vectors float* h_old; float* h_C; // result of diff*diff of each block float* g_new; float* d_new; // device field vectors float* d_old; float* d_C; int MAX=1000000; // maximum iterations double eps=1.0e-10; // stopping criterion __align__(8) texture<float> texOld; // declare the texture __align__(8) texture<float> texNew; __global__ void laplacian(float* phi_old, float* phi_new, float* C, bool flag) { extern __shared__ float cache[]; float t, l, c, r, b; // top, left, center, right, bottom float diff; int site, ym1, xm1, xp1, yp1; int Nx = blockDim.x*gridDim.x; // number of site in x direction int Ny = blockDim.y*gridDim.y; // number of site in y direction int x = blockDim.x*blockIdx.x + threadIdx.x; int y = blockDim.y*blockIdx.y + threadIdx.y; int cacheIndex = threadIdx.x + threadIdx.y*blockDim.x; site = x + y*Nx; if((x == 0) || (x == Nx-1) || (y == 0) || (y == Ny-1) ) { // do nothing on the boundaries } else { xm1 = site - 1; // x-1 xp1 = site + 1; // x+1 ym1 = site - Nx; // y-1 yp1 = site + Nx; // y+1 if(flag) { b = tex1Dfetch(texOld, ym1); // read d_old via texOld l = tex1Dfetch(texOld, xm1); c = tex1Dfetch(texOld, site); r = tex1Dfetch(texOld, xp1); t = tex1Dfetch(texOld, yp1); phi_new[site] = 0.25*(b+l+r+t); diff = phi_new[site]-c; } else { b = tex1Dfetch(texNew, ym1); // read d_new via texNew l = tex1Dfetch(texNew, xm1); c = tex1Dfetch(texNew, site); r = tex1Dfetch(texNew, xp1); t = tex1Dfetch(texNew, yp1); phi_old[site] = 0.25*(b+l+r+t); diff = phi_old[site]-c; } } // each thread saves its error estimate to the shared memory cache[cacheIndex]=diff*diff; __syncthreads(); // parallel reduction in each block int ib = blockDim.x*blockDim.y/2; while (ib != 0) { if(cacheIndex < ib) cache[cacheIndex] += cache[cacheIndex + ib]; __syncthreads(); ib /=2; } // save the partial sum of each block to C int blockIndex = blockIdx.x + gridDim.x*blockIdx.y; if(cacheIndex == 0) C[blockIndex] = cache[0]; } int main(void) { int gid; // GPU_ID int iter; volatile bool flag; // to toggle between *_new and *_old float gputime; float gputime_tot; double flops; double error; printf("Enter the GPU ID (0/1): "); scanf("%d",&gid); printf("%d\n",gid); // Error code to check return values for CUDA calls hipError_t err = hipSuccess; err = hipSetDevice(gid); if (err != hipSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Select GPU with device ID = %d\n", gid); hipSetDevice(gid); int Nx, Ny; // lattice size int tx, ty; // block size, threads (tx, ty) per block int bx, by; // grid size, block (bx, by) per grid int N; // total number of site int size; // size of the array h_old, h_new int sb; // size of the array h_C; int sm; // size of shared memory float Intime; float Outime; // create the timer hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Write all the result to file. FILE *output; output = fopen("Output_GPUTex_site512.txt", "a"); fprintf(output, "LatticeSize BlockSize GPUTexInput GPUTexerror GPUTexiteration GPUTexonly GPUTexflop GPUTexoutput GPUTextotal\n"); fclose(output); Nx = 512; Ny = 512; for(int n = 2; n <= 5; n = n+1){ tx = pow(2, n); ty = tx; dim3 threads(tx,ty); bx = Nx / tx; by = Ny / ty; dim3 blocks(bx,by); // Allocate field vector h_phi in host memory N = Nx * Ny; size = N * sizeof(float); sb = bx * by * sizeof(float); h_old = (float*)malloc(size); h_new = (float*)malloc(size); g_new = (float*)malloc(size); h_C = (float*)malloc(sb); // Initialize the array to 0 memset(h_old, 0, size); memset(h_new, 0, size); // Initialize the field vector with boundary conditions for(int x = 0; x < Nx; x = x+1) { // phi = +1 on top h_new[x + Nx * (Ny - 1)] = 1.0; h_old[x + Nx * (Ny - 1)] = 1.0; // phi = +5 in bottom h_new[x] = 5.0; h_old[x] = 5.0; } for(int y = 0; y < Ny; y = y+1){ //phi = -1 in left h_new[Nx * y] = -1.0; h_old[Nx * y] = -1.0; //phi = -2 in right h_new[(Nx - 1) + Nx * y] = -2.0; h_old[(Nx - 1) + Nx * y] = -2.0; } FILE *out1; // save initial configuration in phi_initial.dat out1 = fopen("phi_initial_GPUTex_site512.dat","w"); for(int j=Ny-1;j>-1;j--) { for(int i=0; i<Nx; i++) { fprintf(out1,"%.2e ",h_new[i+j*Nx]); } fprintf(out1,"\n"); } fclose(out1); printf("\n"); // start the timer hipEventRecord(start,0); // Allocate vectors in device memory hipMalloc((void**)&d_new, size); hipMalloc((void**)&d_old, size); hipMalloc((void**)&d_C, sb); hipBindTexture(NULL, texOld, d_old, size); // bind the texture to already existed variable on hipBindTexture(NULL, texNew, d_new, size); // device memory // Copy vectors from host memory to device memory hipMemcpy(d_new, h_new, size, hipMemcpyHostToDevice); hipMemcpy(d_old, h_old, size, hipMemcpyHostToDevice); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &Intime, start, stop); printf("Input time for GPU: %f (ms) \n",Intime); // start the timer hipEventRecord(start,0); error = 10*eps; // any value bigger than eps is OK iter = 0; // counter for iterations flag = true; sm = tx * ty * sizeof(float); // size of the shared memory in each block while ( (error > eps) && (iter < MAX) ) { hipLaunchKernelGGL(( laplacian), dim3(blocks),dim3(threads),sm, 0, d_old, d_new, d_C, flag); hipMemcpy(h_C, d_C, sb, hipMemcpyDeviceToHost); error = 0.0; for(int i=0; i<bx*by; i++) { error = error + h_C[i]; } error = sqrt(error); // printf("error = %.15e\n",error); // printf("iteration = %d\n",iter); iter++; flag = !flag; } printf("error (GPU) = %.15e\n",error); printf("total iterations (GPU) = %d\n",iter); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); flops = 7.0*(Nx-2)*(Ny-2)*iter; printf("GPU Gflops: %f\n",flops/(1000000.0*gputime)); // Copy result from device memory to host memory // start the timer hipEventRecord(start,0); // Because after the iteration, d_new and d_old are basically the same. hipMemcpy(g_new, d_new, size, hipMemcpyDeviceToHost); hipFree(d_new); hipFree(d_old); hipFree(d_C); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &Outime, start, stop); printf("Output time for GPU: %f (ms) \n",Outime); gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); fflush(stdout); FILE *outg; // save GPU solution in phi_GPU.dat outg = fopen("phi_GPUTex_site512.dat","w"); for(int j=Ny-1;j>-1;j--) { for(int i=0; i<Nx; i++) { fprintf(outg,"%.2e ",g_new[i+j*Nx]); } fprintf(outg,"\n"); } fclose(outg); // Write all the output to file output = fopen("Output_GPUTex_site512.txt", "a"); fprintf(output, "%d %d %f %f %d %f %f %f %f\n", Nx, tx, Intime, error, iter, gputime, flops/(1000000.0*gputime), Outime, gputime_tot); fclose(output); printf("\n"); printf("Finish computing lattice size : %d, block size : %d\n", Nx, tx); free(h_new); free(h_old); free(g_new); free(h_C); } // destroy the timer hipEventDestroy(start); hipEventDestroy(stop); hipDeviceReset(); }
8aaecba60231cb15ba9cfed2deb786de7f11a815.cu
// Solve the Laplace equation on a 2D lattice with boundary conditions. // // compile with the following command: // // (for GTX970) // nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o laplace laplace.cu // // (for GTX1060) // nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o laplace laplace.cu // Includes #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> // field variables float* h_new; // host field vectors float* h_old; float* h_C; // result of diff*diff of each block float* g_new; float* d_new; // device field vectors float* d_old; float* d_C; int MAX=1000000; // maximum iterations double eps=1.0e-10; // stopping criterion __align__(8) texture<float> texOld; // declare the texture __align__(8) texture<float> texNew; __global__ void laplacian(float* phi_old, float* phi_new, float* C, bool flag) { extern __shared__ float cache[]; float t, l, c, r, b; // top, left, center, right, bottom float diff; int site, ym1, xm1, xp1, yp1; int Nx = blockDim.x*gridDim.x; // number of site in x direction int Ny = blockDim.y*gridDim.y; // number of site in y direction int x = blockDim.x*blockIdx.x + threadIdx.x; int y = blockDim.y*blockIdx.y + threadIdx.y; int cacheIndex = threadIdx.x + threadIdx.y*blockDim.x; site = x + y*Nx; if((x == 0) || (x == Nx-1) || (y == 0) || (y == Ny-1) ) { // do nothing on the boundaries } else { xm1 = site - 1; // x-1 xp1 = site + 1; // x+1 ym1 = site - Nx; // y-1 yp1 = site + Nx; // y+1 if(flag) { b = tex1Dfetch(texOld, ym1); // read d_old via texOld l = tex1Dfetch(texOld, xm1); c = tex1Dfetch(texOld, site); r = tex1Dfetch(texOld, xp1); t = tex1Dfetch(texOld, yp1); phi_new[site] = 0.25*(b+l+r+t); diff = phi_new[site]-c; } else { b = tex1Dfetch(texNew, ym1); // read d_new via texNew l = tex1Dfetch(texNew, xm1); c = tex1Dfetch(texNew, site); r = tex1Dfetch(texNew, xp1); t = tex1Dfetch(texNew, yp1); phi_old[site] = 0.25*(b+l+r+t); diff = phi_old[site]-c; } } // each thread saves its error estimate to the shared memory cache[cacheIndex]=diff*diff; __syncthreads(); // parallel reduction in each block int ib = blockDim.x*blockDim.y/2; while (ib != 0) { if(cacheIndex < ib) cache[cacheIndex] += cache[cacheIndex + ib]; __syncthreads(); ib /=2; } // save the partial sum of each block to C int blockIndex = blockIdx.x + gridDim.x*blockIdx.y; if(cacheIndex == 0) C[blockIndex] = cache[0]; } int main(void) { int gid; // GPU_ID int iter; volatile bool flag; // to toggle between *_new and *_old float gputime; float gputime_tot; double flops; double error; printf("Enter the GPU ID (0/1): "); scanf("%d",&gid); printf("%d\n",gid); // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; err = cudaSetDevice(gid); if (err != cudaSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Select GPU with device ID = %d\n", gid); cudaSetDevice(gid); int Nx, Ny; // lattice size int tx, ty; // block size, threads (tx, ty) per block int bx, by; // grid size, block (bx, by) per grid int N; // total number of site int size; // size of the array h_old, h_new int sb; // size of the array h_C; int sm; // size of shared memory float Intime; float Outime; // create the timer cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Write all the result to file. FILE *output; output = fopen("Output_GPUTex_site512.txt", "a"); fprintf(output, "LatticeSize BlockSize GPUTexInput GPUTexerror GPUTexiteration GPUTexonly GPUTexflop GPUTexoutput GPUTextotal\n"); fclose(output); Nx = 512; Ny = 512; for(int n = 2; n <= 5; n = n+1){ tx = pow(2, n); ty = tx; dim3 threads(tx,ty); bx = Nx / tx; by = Ny / ty; dim3 blocks(bx,by); // Allocate field vector h_phi in host memory N = Nx * Ny; size = N * sizeof(float); sb = bx * by * sizeof(float); h_old = (float*)malloc(size); h_new = (float*)malloc(size); g_new = (float*)malloc(size); h_C = (float*)malloc(sb); // Initialize the array to 0 memset(h_old, 0, size); memset(h_new, 0, size); // Initialize the field vector with boundary conditions for(int x = 0; x < Nx; x = x+1) { // phi = +1 on top h_new[x + Nx * (Ny - 1)] = 1.0; h_old[x + Nx * (Ny - 1)] = 1.0; // phi = +5 in bottom h_new[x] = 5.0; h_old[x] = 5.0; } for(int y = 0; y < Ny; y = y+1){ //phi = -1 in left h_new[Nx * y] = -1.0; h_old[Nx * y] = -1.0; //phi = -2 in right h_new[(Nx - 1) + Nx * y] = -2.0; h_old[(Nx - 1) + Nx * y] = -2.0; } FILE *out1; // save initial configuration in phi_initial.dat out1 = fopen("phi_initial_GPUTex_site512.dat","w"); for(int j=Ny-1;j>-1;j--) { for(int i=0; i<Nx; i++) { fprintf(out1,"%.2e ",h_new[i+j*Nx]); } fprintf(out1,"\n"); } fclose(out1); printf("\n"); // start the timer cudaEventRecord(start,0); // Allocate vectors in device memory cudaMalloc((void**)&d_new, size); cudaMalloc((void**)&d_old, size); cudaMalloc((void**)&d_C, sb); cudaBindTexture(NULL, texOld, d_old, size); // bind the texture to already existed variable on cudaBindTexture(NULL, texNew, d_new, size); // device memory // Copy vectors from host memory to device memory cudaMemcpy(d_new, h_new, size, cudaMemcpyHostToDevice); cudaMemcpy(d_old, h_old, size, cudaMemcpyHostToDevice); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &Intime, start, stop); printf("Input time for GPU: %f (ms) \n",Intime); // start the timer cudaEventRecord(start,0); error = 10*eps; // any value bigger than eps is OK iter = 0; // counter for iterations flag = true; sm = tx * ty * sizeof(float); // size of the shared memory in each block while ( (error > eps) && (iter < MAX) ) { laplacian<<<blocks,threads,sm>>>(d_old, d_new, d_C, flag); cudaMemcpy(h_C, d_C, sb, cudaMemcpyDeviceToHost); error = 0.0; for(int i=0; i<bx*by; i++) { error = error + h_C[i]; } error = sqrt(error); // printf("error = %.15e\n",error); // printf("iteration = %d\n",iter); iter++; flag = !flag; } printf("error (GPU) = %.15e\n",error); printf("total iterations (GPU) = %d\n",iter); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); flops = 7.0*(Nx-2)*(Ny-2)*iter; printf("GPU Gflops: %f\n",flops/(1000000.0*gputime)); // Copy result from device memory to host memory // start the timer cudaEventRecord(start,0); // Because after the iteration, d_new and d_old are basically the same. cudaMemcpy(g_new, d_new, size, cudaMemcpyDeviceToHost); cudaFree(d_new); cudaFree(d_old); cudaFree(d_C); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &Outime, start, stop); printf("Output time for GPU: %f (ms) \n",Outime); gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); fflush(stdout); FILE *outg; // save GPU solution in phi_GPU.dat outg = fopen("phi_GPUTex_site512.dat","w"); for(int j=Ny-1;j>-1;j--) { for(int i=0; i<Nx; i++) { fprintf(outg,"%.2e ",g_new[i+j*Nx]); } fprintf(outg,"\n"); } fclose(outg); // Write all the output to file output = fopen("Output_GPUTex_site512.txt", "a"); fprintf(output, "%d %d %f %f %d %f %f %f %f\n", Nx, tx, Intime, error, iter, gputime, flops/(1000000.0*gputime), Outime, gputime_tot); fclose(output); printf("\n"); printf("Finish computing lattice size : %d, block size : %d\n", Nx, tx); free(h_new); free(h_old); free(g_new); free(h_C); } // destroy the timer cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); }
000c1bd45ebb4b814801f9a7eaa542ac40ce357c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "sub.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); float *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( sub), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( sub), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( sub), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
000c1bd45ebb4b814801f9a7eaa542ac40ce357c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "sub.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); float *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); sub<<<gridBlock,threadBlock>>>(a,b,c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { sub<<<gridBlock,threadBlock>>>(a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { sub<<<gridBlock,threadBlock>>>(a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b3b75e53b13ac067a733e408c14e6fb5257ed915.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (block_size * i + j)) #define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (block_size * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif //////////////////////////////////////////////////////////////////////////////// //! Matrix multiplication on the device: C = A * B //! wA is A's width and wB is B's width //////////////////////////////////////////////////////////////////////////////// template <int block_size, typename size_type> __device__ void matrixMul(float *C, float *A, float *B, size_type wA, size_type wB) { // Block index size_type bx = blockIdx.x; size_type by = blockIdx.y; // Thread index size_type tx = threadIdx.x; size_type ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block size_type aBegin = wA * block_size * by; // Index of the last sub-matrix of A processed by the block size_type aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A size_type aStep = block_size; // Index of the first sub-matrix of B processed by the block size_type bBegin = block_size * bx; // Step size used to iterate through the sub-matrices of B size_type bStep = block_size * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (size_type a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[block_size][block_size]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[block_size][block_size]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix AS(ty, tx) = A[a + wA * ty + tx]; BS(ty, tx) = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (size_type k = 0; k < block_size; ++k) Csub += AS(ty, k) * BS(k, tx); // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element size_type c = wB * block_size * by + block_size * bx; C[c + wB * ty + tx] = Csub; } // C wrappers around our template kernel extern "C" __global__ void matrixMul_bs16_32bit(float *C, float *A, float *B, int wA, int wB) { matrixMul<16, int>(C, A, B, wA, wB); } extern "C" __global__ void matrixMul_bs16_64bit(float *C, float *A, float *B, size_t wA, size_t wB) { matrixMul<16, size_t>(C, A, B, wA, wB); } extern "C" __global__ void matrixMul_bs32_32bit(float *C, float *A, float *B, int wA, int wB) { matrixMul<32, int>(C, A, B, wA, wB); } extern "C" __global__ void matrixMul_bs32_64bit(float *C, float *A, float *B, size_t wA, size_t wB) { matrixMul<32, size_t>(C, A, B, wA, wB); } #endif // #ifndef _MATRIXMUL_KERNEL_H_
b3b75e53b13ac067a733e408c14e6fb5257ed915.cu
/* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (block_size * i + j)) #define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (block_size * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif //////////////////////////////////////////////////////////////////////////////// //! Matrix multiplication on the device: C = A * B //! wA is A's width and wB is B's width //////////////////////////////////////////////////////////////////////////////// template <int block_size, typename size_type> __device__ void matrixMul(float *C, float *A, float *B, size_type wA, size_type wB) { // Block index size_type bx = blockIdx.x; size_type by = blockIdx.y; // Thread index size_type tx = threadIdx.x; size_type ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block size_type aBegin = wA * block_size * by; // Index of the last sub-matrix of A processed by the block size_type aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A size_type aStep = block_size; // Index of the first sub-matrix of B processed by the block size_type bBegin = block_size * bx; // Step size used to iterate through the sub-matrices of B size_type bStep = block_size * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (size_type a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[block_size][block_size]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[block_size][block_size]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix AS(ty, tx) = A[a + wA * ty + tx]; BS(ty, tx) = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (size_type k = 0; k < block_size; ++k) Csub += AS(ty, k) * BS(k, tx); // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element size_type c = wB * block_size * by + block_size * bx; C[c + wB * ty + tx] = Csub; } // C wrappers around our template kernel extern "C" __global__ void matrixMul_bs16_32bit(float *C, float *A, float *B, int wA, int wB) { matrixMul<16, int>(C, A, B, wA, wB); } extern "C" __global__ void matrixMul_bs16_64bit(float *C, float *A, float *B, size_t wA, size_t wB) { matrixMul<16, size_t>(C, A, B, wA, wB); } extern "C" __global__ void matrixMul_bs32_32bit(float *C, float *A, float *B, int wA, int wB) { matrixMul<32, int>(C, A, B, wA, wB); } extern "C" __global__ void matrixMul_bs32_64bit(float *C, float *A, float *B, size_t wA, size_t wB) { matrixMul<32, size_t>(C, A, B, wA, wB); } #endif // #ifndef _MATRIXMUL_KERNEL_H_
deb194dbdaa5e6f703b1dfc566737d31882394f2.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/ExpandUtils.h> #include <ATen/InitialTensorOptions.h> #include <ATen/NativeFunctions.h> #include <ATen/SparseCsrTensorImpl.h> #include <ATen/SparseCsrTensorUtils.h> #include <ATen/SparseTensorUtils.h> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/Resize.h> #include <algorithm> #include <hip/hip_runtime.h> #include <type_traits> #include <THH/THHThrustAllocator.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPUtils.h> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include <ATen/native/sparse/hip/SparseBlasLegacy.h> #include <ATen/native/sparse/hip/SparseHIPBlas.h> #include <ATen/native/sparse/hip/SparseHIPTensorMath.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/sequence.h> namespace at { namespace native { namespace { template <typename input_t, typename output_t> __global__ void convert_indices_from_coo_to_csr_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t size, const int64_t numel) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { for (int64_t i = 0; i <= data_in[0]; i++) data_out[i] = static_cast<output_t>(0); } else if (tid < numel) { for (int64_t i = data_in[tid - 1]; i < data_in[tid]; i++) data_out[i + 1] = static_cast<output_t>(tid); } else if (tid == numel) { for (int64_t i = data_in[numel - 1] + 1; i < size + 1; i++) data_out[i] = static_cast<output_t>(numel); } } template <typename input_t, typename output_t> void convert_indices_from_coo_to_csr_cuda(const Tensor& result, const Tensor& input, const int64_t size) { int64_t numel = input.numel(); const input_t* data_in = input.data_ptr<input_t>(); output_t* data_out = result.data_ptr<output_t>(); if (numel == 0) { result.zero_(); return; } // Run (numel + 1) threads... int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (numel + THREADS) / THREADS; at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( convert_indices_from_coo_to_csr_cuda_kernel), dim3(BLOCKS), dim3(THREADS), 0, stream, data_out, data_in, size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } } // namespace using namespace at::sparse_csr; // certain utiliy functions are usable from sparse COO. using namespace at::sparse; Tensor& addmm_out_sparse_csr_dense_cuda( const Tensor& self, const SparseCsrTensor& sparse, const Tensor& dense, const Scalar& beta, const Scalar& alpha, Tensor& r) { TORCH_INTERNAL_ASSERT(sparse.is_sparse_csr()); Tensor t = *expand_size(self, {sparse.size(0), dense.size(1)}, "addmm_out_sparse_csr"); TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm expected 't' to be CUDA tensor"); TORCH_CHECK( r.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA tensor, but got CPU tensor"); TORCH_CHECK( sparse.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat1' to be a CUDA tensor, but got a CPU tensor"); TORCH_CHECK( dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2' to be a CUDA tensor, but got a CPU tensor"); TORCH_CHECK( sparse.dim() == 2, "addmm: 2-D matrices expected, got ", sparse.dim(), "D tensor"); TORCH_CHECK( dense.dim() == 2, "addmm: 2-D matrices expected, got ", dense.dim(), "D tensor"); TORCH_CHECK( r.is_contiguous(), "out argument must be contiguous, but got: ", r.suggest_memory_format()); // mxk * kxn = mxn int64_t m = sparse.size(0); int64_t k = sparse.size(1); int64_t n = dense.size(1); TORCH_CHECK( dense.size(0) == k, "addmm: Expected dense matrix (dense) size(0)=", k, ", got ", dense.size(0)); resize_output(r, {m, n}); int64_t nnz = sparse._nnz(); if (nnz == 0) { at::mul_out(r, t, at::scalar_tensor(beta, r.options())); return r; } // TODO: Check if hipsparseSpMM can use 64-bit indices // https://docs.nvidia.com/cuda/cusparse/index.html auto col_indices = sparse.col_indices().to(at::kInt); auto crow_indices = sparse.crow_indices().to(at::kInt); auto values = sparse.values(); s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r, beta, t, alpha, crow_indices, col_indices, values, dense); return r; } Tensor& add_out_dense_sparse_csr_cuda( Tensor& output, const Tensor& dense, const SparseCsrTensor& src, const Scalar& alpha) { TORCH_INTERNAL_ASSERT(dense.layout() == kStrided); TORCH_INTERNAL_ASSERT(src.is_sparse_csr()); TORCH_INTERNAL_ASSERT(dense.is_cuda()); TORCH_CHECK( output.is_contiguous(), "out argument must be contiguous, but got: ", output.suggest_memory_format()); TORCH_CHECK( output.is_cuda(), "add: expected 'out' to be CUDA tensor, but got tensor on device: ", output.device()); TORCH_CHECK( src.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got tensor on device: ", src.device()); TORCH_CHECK( dense.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ", dense.sizes(), " while other has size ", src.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)"); auto commonDtype = promoteTypes(dense.scalar_type(), src.scalar_type()); TORCH_CHECK( canCast(commonDtype, output.scalar_type()), "Can't convert result type ", commonDtype, " to output ", output.scalar_type(), " in add operation"); Tensor src_values = src.values(); Tensor src_crow_indices = src.crow_indices(); Tensor src_col_indices = src.col_indices(); resize_output(output, dense.sizes()); Tensor resultBuffer = output; Tensor valuesBuffer = src_values.to(commonDtype); if (output.scalar_type() != commonDtype) { resultBuffer = dense.to(commonDtype); } else if (!is_same_tensor(output, dense)) { resultBuffer.copy_(dense); } AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kHalf, kBool, kBFloat16, commonDtype, "add_out_op2_sparse_csr", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { AT_DISPATCH_INDEX_TYPES( src_crow_indices.scalar_type(), "csr_add_out_crow_indices", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { scalar_t* values_accessor = valuesBuffer.data_ptr<scalar_t>(); scalar_t* out_ptr = resultBuffer.data_ptr<scalar_t>(); scalar_t cast_value = alpha.to<scalar_t>(); index_t* crow_indices_accessor = src_crow_indices.data_ptr<index_t>(); index_t* col_indices_accessor = src_col_indices.data_ptr<index_t>(); int64_t out_storage_offset = resultBuffer.storage_offset(); auto out_strides = resultBuffer.strides(); int64_t out_strides0 = out_strides[0]; int64_t out_strides1 = out_strides[1]; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Note that this could be wildly imbalanced if the sparsity pattern varies a lot between rows. thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(src_crow_indices.size(0) - 1)), [values_accessor, crow_indices_accessor, col_indices_accessor, out_ptr, out_storage_offset, out_strides0, cast_value, out_strides1 ]__device__(int64_t irow) { index_t start_index = crow_indices_accessor[irow]; index_t end_index = crow_indices_accessor[irow + 1]; for (index_t i = start_index; i < end_index; ++i) { auto icol = col_indices_accessor[i]; auto index = out_storage_offset + irow * out_strides0 + icol * out_strides1; out_ptr[index] += cast_value * values_accessor[i]; } }); }); }); if (output.scalar_type() != commonDtype) { output.copy_(resultBuffer); } return output; } Tensor& add_out_sparse_csr_cuda( const Tensor& self, const SparseCsrTensor& other, const Scalar& alpha, SparseCsrTensor& out) { if (self.layout() == kStrided) { return add_out_dense_sparse_csr_cuda(out, self, other, alpha); } else { TORCH_CHECK( false, "NotImplementedError: Addition of sparse CSR tensors is not yet implemented.") } return out; } TORCH_IMPL_FUNC(_convert_indices_from_coo_to_csr_structured_cuda) ( const Tensor& input, const int64_t size, const bool out_int32, const Tensor& result ) { if (out_int32) { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int>(result, input, size); }); } else { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int64_t>(result, input, size); }); } } } // namespace native } // namespace at
deb194dbdaa5e6f703b1dfc566737d31882394f2.cu
#include <ATen/ATen.h> #include <ATen/ExpandUtils.h> #include <ATen/InitialTensorOptions.h> #include <ATen/NativeFunctions.h> #include <ATen/SparseCsrTensorImpl.h> #include <ATen/SparseCsrTensorUtils.h> #include <ATen/SparseTensorUtils.h> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/Resize.h> #include <algorithm> #include <cuda_runtime.h> #include <type_traits> #include <THC/THCThrustAllocator.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAUtils.h> #include <c10/cuda/CUDACachingAllocator.h> #include <ATen/native/sparse/cuda/SparseBlasLegacy.h> #include <ATen/native/sparse/cuda/SparseCUDABlas.h> #include <ATen/native/sparse/cuda/SparseCUDATensorMath.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/sequence.h> namespace at { namespace native { namespace { template <typename input_t, typename output_t> __global__ void convert_indices_from_coo_to_csr_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t size, const int64_t numel) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { for (int64_t i = 0; i <= data_in[0]; i++) data_out[i] = static_cast<output_t>(0); } else if (tid < numel) { for (int64_t i = data_in[tid - 1]; i < data_in[tid]; i++) data_out[i + 1] = static_cast<output_t>(tid); } else if (tid == numel) { for (int64_t i = data_in[numel - 1] + 1; i < size + 1; i++) data_out[i] = static_cast<output_t>(numel); } } template <typename input_t, typename output_t> void convert_indices_from_coo_to_csr_cuda(const Tensor& result, const Tensor& input, const int64_t size) { int64_t numel = input.numel(); const input_t* data_in = input.data_ptr<input_t>(); output_t* data_out = result.data_ptr<output_t>(); if (numel == 0) { result.zero_(); return; } // Run (numel + 1) threads... int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (numel + THREADS) / THREADS; at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); convert_indices_from_coo_to_csr_cuda_kernel<<<BLOCKS, THREADS, 0, stream>>>(data_out, data_in, size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } // namespace using namespace at::sparse_csr; // certain utiliy functions are usable from sparse COO. using namespace at::sparse; Tensor& addmm_out_sparse_csr_dense_cuda( const Tensor& self, const SparseCsrTensor& sparse, const Tensor& dense, const Scalar& beta, const Scalar& alpha, Tensor& r) { TORCH_INTERNAL_ASSERT(sparse.is_sparse_csr()); Tensor t = *expand_size(self, {sparse.size(0), dense.size(1)}, "addmm_out_sparse_csr"); TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm expected 't' to be CUDA tensor"); TORCH_CHECK( r.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA tensor, but got CPU tensor"); TORCH_CHECK( sparse.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat1' to be a CUDA tensor, but got a CPU tensor"); TORCH_CHECK( dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2' to be a CUDA tensor, but got a CPU tensor"); TORCH_CHECK( sparse.dim() == 2, "addmm: 2-D matrices expected, got ", sparse.dim(), "D tensor"); TORCH_CHECK( dense.dim() == 2, "addmm: 2-D matrices expected, got ", dense.dim(), "D tensor"); TORCH_CHECK( r.is_contiguous(), "out argument must be contiguous, but got: ", r.suggest_memory_format()); // mxk * kxn = mxn int64_t m = sparse.size(0); int64_t k = sparse.size(1); int64_t n = dense.size(1); TORCH_CHECK( dense.size(0) == k, "addmm: Expected dense matrix (dense) size(0)=", k, ", got ", dense.size(0)); resize_output(r, {m, n}); int64_t nnz = sparse._nnz(); if (nnz == 0) { at::mul_out(r, t, at::scalar_tensor(beta, r.options())); return r; } // TODO: Check if cusparseSpMM can use 64-bit indices // https://docs.nvidia.com/cuda/cusparse/index.html auto col_indices = sparse.col_indices().to(at::kInt); auto crow_indices = sparse.crow_indices().to(at::kInt); auto values = sparse.values(); s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r, beta, t, alpha, crow_indices, col_indices, values, dense); return r; } Tensor& add_out_dense_sparse_csr_cuda( Tensor& output, const Tensor& dense, const SparseCsrTensor& src, const Scalar& alpha) { TORCH_INTERNAL_ASSERT(dense.layout() == kStrided); TORCH_INTERNAL_ASSERT(src.is_sparse_csr()); TORCH_INTERNAL_ASSERT(dense.is_cuda()); TORCH_CHECK( output.is_contiguous(), "out argument must be contiguous, but got: ", output.suggest_memory_format()); TORCH_CHECK( output.is_cuda(), "add: expected 'out' to be CUDA tensor, but got tensor on device: ", output.device()); TORCH_CHECK( src.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got tensor on device: ", src.device()); TORCH_CHECK( dense.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ", dense.sizes(), " while other has size ", src.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)"); auto commonDtype = promoteTypes(dense.scalar_type(), src.scalar_type()); TORCH_CHECK( canCast(commonDtype, output.scalar_type()), "Can't convert result type ", commonDtype, " to output ", output.scalar_type(), " in add operation"); Tensor src_values = src.values(); Tensor src_crow_indices = src.crow_indices(); Tensor src_col_indices = src.col_indices(); resize_output(output, dense.sizes()); Tensor resultBuffer = output; Tensor valuesBuffer = src_values.to(commonDtype); if (output.scalar_type() != commonDtype) { resultBuffer = dense.to(commonDtype); } else if (!is_same_tensor(output, dense)) { resultBuffer.copy_(dense); } AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kHalf, kBool, kBFloat16, commonDtype, "add_out_op2_sparse_csr", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { AT_DISPATCH_INDEX_TYPES( src_crow_indices.scalar_type(), "csr_add_out_crow_indices", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { scalar_t* values_accessor = valuesBuffer.data_ptr<scalar_t>(); scalar_t* out_ptr = resultBuffer.data_ptr<scalar_t>(); scalar_t cast_value = alpha.to<scalar_t>(); index_t* crow_indices_accessor = src_crow_indices.data_ptr<index_t>(); index_t* col_indices_accessor = src_col_indices.data_ptr<index_t>(); int64_t out_storage_offset = resultBuffer.storage_offset(); auto out_strides = resultBuffer.strides(); int64_t out_strides0 = out_strides[0]; int64_t out_strides1 = out_strides[1]; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Note that this could be wildly imbalanced if the sparsity pattern varies a lot between rows. thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(src_crow_indices.size(0) - 1)), [values_accessor, crow_indices_accessor, col_indices_accessor, out_ptr, out_storage_offset, out_strides0, cast_value, out_strides1 ]__device__(int64_t irow) { index_t start_index = crow_indices_accessor[irow]; index_t end_index = crow_indices_accessor[irow + 1]; for (index_t i = start_index; i < end_index; ++i) { auto icol = col_indices_accessor[i]; auto index = out_storage_offset + irow * out_strides0 + icol * out_strides1; out_ptr[index] += cast_value * values_accessor[i]; } }); }); }); if (output.scalar_type() != commonDtype) { output.copy_(resultBuffer); } return output; } Tensor& add_out_sparse_csr_cuda( const Tensor& self, const SparseCsrTensor& other, const Scalar& alpha, SparseCsrTensor& out) { if (self.layout() == kStrided) { return add_out_dense_sparse_csr_cuda(out, self, other, alpha); } else { TORCH_CHECK( false, "NotImplementedError: Addition of sparse CSR tensors is not yet implemented.") } return out; } TORCH_IMPL_FUNC(_convert_indices_from_coo_to_csr_structured_cuda) ( const Tensor& input, const int64_t size, const bool out_int32, const Tensor& result ) { if (out_int32) { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int>(result, input, size); }); } else { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int64_t>(result, input, size); }); } } } // namespace native } // namespace at
162ce4ec668f325934fdca4c230545e8bce7c2d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void saxpy(int n, float a, float* x, float* y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a * x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y, *d_x, *d_y; x = (float*) malloc(N*sizeof(float)); y = (float*) malloc(N*sizeof(float)); hipMalloc(&d_x, N*sizeof(float)); hipMalloc(&d_y, N*sizeof(float)); for (int i = 0; i < N; ++i) { x[i] = 1.0f; y[i] = 2.0f; } hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y); hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = max(maxError, abs(y[i]-5.0f)); printf("Max error: %f\n", maxError); }
162ce4ec668f325934fdca4c230545e8bce7c2d5.cu
#include <stdio.h> __global__ void saxpy(int n, float a, float* x, float* y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a * x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y, *d_x, *d_y; x = (float*) malloc(N*sizeof(float)); y = (float*) malloc(N*sizeof(float)); cudaMalloc(&d_x, N*sizeof(float)); cudaMalloc(&d_y, N*sizeof(float)); for (int i = 0; i < N; ++i) { x[i] = 1.0f; y[i] = 2.0f; } cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice); saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y); cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = max(maxError, abs(y[i]-5.0f)); printf("Max error: %f\n", maxError); }
b07db293d432c08d21959cd731675dadf003ea21.hip
// !!! This is a file automatically generated by hipify!!! #include<cuda.h> #include<stdio.h> #include<cuda_runtime.h> #include <hip/hip_runtime_api.h> #define NSTREAM 8 void matricMul(int *A, int *B, int *C, int size) { for (int col = 0; col < size; col++) { for (int row = 0; row < size; row++) { int outidx = col * size + row; for (int idx = 0; idx < size; idx++) { C[outidx] += A[col*size + idx] * B[idx*size + row]; } } } } void matrixMulCheck(int *C_cpu, int *C_gpu, int size) { bool ResultFlag = true; // Print the result for (int i = 0; i < size; i++) { if (C_cpu[i] != C_gpu[i]) { ResultFlag = false; printf("Error: C_cpu[%d] = %d; C_gpu[%d] = %d;\n", i, C_cpu[i], i, C_gpu[i]); break; } } if (ResultFlag == true) printf("Matrix Multiplication OK!\n"); else printf("Matrix Multiplication Error!\n"); } __global__ void matrixMulDepth(int *A, int *B, int *C, int size) { int tid, tx, ty; tx = threadIdx.x + blockDim.x * blockIdx.x; ty = threadIdx.y + blockDim.y * blockIdx.y; tid = size * ty + tx; int Aval = 0; int Bval = 0; int Cval = 0; for (int i = 0; i < size; i++) { Aval = A[ty * size + i]; Bval = B[i * size + tx]; Cval += Aval * Bval; } C[tid] = Cval; } int main() { int nx = 1600; int ny = 1600; int dimx = 32; int dimy = 16; dim3 block(dimx, dimy); // Block dimension 32x16 dim3 grid((nx+block.x-1)/block.x, (ny/NSTREAM+block.y-1)/block.y); char *iname = "CUDA_DEVICE_MAX_CONNECTIONS"; setenv(iname, "4", 1); int MatrixSize = nx * ny; int BufferSize = MatrixSize * sizeof(int); int iElem = MatrixSize / NSTREAM; int ibytes = iElem * sizeof(int); int *h_A, *h_B, *h_C; int *C_cpu; // Create streams hipStream_t *stream = (hipStream_t*)malloc(NSTREAM*sizeof(hipStream_t)); for (int i = 0; i < NSTREAM; i++) { hipStreamCreate(&stream[i]); } // Host memory allocation hipHostMalloc((void**)&h_A, BufferSize, hipHostMallocDefault); hipHostMalloc((void**)&h_B, BufferSize, hipHostMallocDefault); hipHostMalloc((void**)&h_C, BufferSize, hipHostMallocDefault); hipHostMalloc((void**)&C_cpu, BufferSize, hipHostMallocDefault); // Data input for (int i = 0; i < MatrixSize; i++) { h_A[i] = i % 100; h_B[i] = i % 100; h_C[i] = 0; C_cpu[i] = 0; } int *d_A, *d_B, *d_C; // Device memory allocation hipMalloc((void**)&d_A, BufferSize); hipMalloc((void**)&d_B, BufferSize); hipMalloc((void**)&d_C, BufferSize); hipHostGetDevicePointer((void**)&d_B, (void*)h_B, 0); for (int i = 0; i < NSTREAM; i++) { int ioffset = i * iElem; hipMemcpyAsync(&d_A[ioffset], &h_A[ioffset], ibytes, hipMemcpyHostToDevice, stream[i]); hipLaunchKernelGGL(( matrixMulDepth), dim3(grid), dim3(block), 0, stream[i], &d_A[ioffset], d_B, &d_C[ioffset], nx); hipMemcpyAsync(&h_C[ioffset], &d_C[ioffset], ibytes, hipMemcpyDeviceToHost, stream[i]); hipStreamSynchronize(stream[i]); } hipProfilerStop(); // Check result matricMul(h_A, h_B, C_cpu, nx); matrixMulCheck(C_cpu, h_C, nx); // Free device memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); free(C_cpu); return 0; }
b07db293d432c08d21959cd731675dadf003ea21.cu
#include<cuda.h> #include<stdio.h> #include<cuda_runtime.h> #include <cuda_profiler_api.h> #define NSTREAM 8 void matricMul(int *A, int *B, int *C, int size) { for (int col = 0; col < size; col++) { for (int row = 0; row < size; row++) { int outidx = col * size + row; for (int idx = 0; idx < size; idx++) { C[outidx] += A[col*size + idx] * B[idx*size + row]; } } } } void matrixMulCheck(int *C_cpu, int *C_gpu, int size) { bool ResultFlag = true; // Print the result for (int i = 0; i < size; i++) { if (C_cpu[i] != C_gpu[i]) { ResultFlag = false; printf("Error: C_cpu[%d] = %d; C_gpu[%d] = %d;\n", i, C_cpu[i], i, C_gpu[i]); break; } } if (ResultFlag == true) printf("Matrix Multiplication OK!\n"); else printf("Matrix Multiplication Error!\n"); } __global__ void matrixMulDepth(int *A, int *B, int *C, int size) { int tid, tx, ty; tx = threadIdx.x + blockDim.x * blockIdx.x; ty = threadIdx.y + blockDim.y * blockIdx.y; tid = size * ty + tx; int Aval = 0; int Bval = 0; int Cval = 0; for (int i = 0; i < size; i++) { Aval = A[ty * size + i]; Bval = B[i * size + tx]; Cval += Aval * Bval; } C[tid] = Cval; } int main() { int nx = 1600; int ny = 1600; int dimx = 32; int dimy = 16; dim3 block(dimx, dimy); // Block dimension 32x16 dim3 grid((nx+block.x-1)/block.x, (ny/NSTREAM+block.y-1)/block.y); char *iname = "CUDA_DEVICE_MAX_CONNECTIONS"; setenv(iname, "4", 1); int MatrixSize = nx * ny; int BufferSize = MatrixSize * sizeof(int); int iElem = MatrixSize / NSTREAM; int ibytes = iElem * sizeof(int); int *h_A, *h_B, *h_C; int *C_cpu; // Create streams cudaStream_t *stream = (cudaStream_t*)malloc(NSTREAM*sizeof(cudaStream_t)); for (int i = 0; i < NSTREAM; i++) { cudaStreamCreate(&stream[i]); } // Host memory allocation cudaHostAlloc((void**)&h_A, BufferSize, cudaHostAllocDefault); cudaHostAlloc((void**)&h_B, BufferSize, cudaHostAllocDefault); cudaHostAlloc((void**)&h_C, BufferSize, cudaHostAllocDefault); cudaHostAlloc((void**)&C_cpu, BufferSize, cudaHostAllocDefault); // Data input for (int i = 0; i < MatrixSize; i++) { h_A[i] = i % 100; h_B[i] = i % 100; h_C[i] = 0; C_cpu[i] = 0; } int *d_A, *d_B, *d_C; // Device memory allocation cudaMalloc((void**)&d_A, BufferSize); cudaMalloc((void**)&d_B, BufferSize); cudaMalloc((void**)&d_C, BufferSize); cudaHostGetDevicePointer((void**)&d_B, (void*)h_B, 0); for (int i = 0; i < NSTREAM; i++) { int ioffset = i * iElem; cudaMemcpyAsync(&d_A[ioffset], &h_A[ioffset], ibytes, cudaMemcpyHostToDevice, stream[i]); matrixMulDepth<<<grid, block, 0, stream[i]>>>(&d_A[ioffset], d_B, &d_C[ioffset], nx); cudaMemcpyAsync(&h_C[ioffset], &d_C[ioffset], ibytes, cudaMemcpyDeviceToHost, stream[i]); cudaStreamSynchronize(stream[i]); } cudaProfilerStop(); // Check result matricMul(h_A, h_B, C_cpu, nx); matrixMulCheck(C_cpu, h_C, nx); // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); free(C_cpu); return 0; }
6dcfc559a02b957864bd122828ec9c3731f2a6bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 28.11.2018 // #include <ops/specials_cuda.h> ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> __global__ void bitonicArbitraryStepKernelKey(void *vx, Nd4jLong const* xShapeInfo, void *vy, Nd4jLong const* yShapeInfo, int window, int length, int reverse, bool descending) { auto x = static_cast<X*>(vx); auto y = static_cast<Y*>(vy); int tid = threadIdx.x + blockDim.x * blockIdx.x; int half = window>>1; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); } __syncthreads(); //for (int i = 0; i < length; i+= window) /* if window == 4; iterations will be: 0; 4; 8; 12; 16; 20 if gridDim = 3; on first iteration we'll have: 0; 4; 8; on second iteration we'll have: 0 + (3 * 4) = 12; 4 + (3 * 4) = 16; 8 + (3 * 4) = 20 */ int firstPosition; int firstStep; int secondPosition; int secondStep; int WARP_SIZE = 32; int numWarps = (gridDim.x * blockDim.x) / 32; int warpId = tid / WARP_SIZE; int warpIdx = tid % WARP_SIZE; if (half >= 128) { firstPosition = blockIdx.x * window; firstStep = gridDim.x * window; secondPosition = threadIdx.x; secondStep = blockDim.x; } else if (half >= 32) { firstPosition = warpId * window; firstStep = numWarps * window; secondPosition = warpIdx; secondStep = WARP_SIZE; } else { firstPosition = tid * window; firstStep = blockDim.x * gridDim.x * window; secondPosition = 0; secondStep = 1; } for (int i = firstPosition; i < length; i += firstStep) { for (int j = secondPosition; j < half; j += secondStep) { int it = (reverse) ? i + j + half : i + window - j - 1; int ij = i+j; if (it < length && ij < length ) { int posIT = shape::getIndexOffset(it, xShapeInfo); int posIJ = shape::getIndexOffset(ij, xShapeInfo); X v0 = x[posIJ]; X v1 = x[posIT]; if(!descending == (v0 > v1)) { x[posIJ] = v1; x[posIT] = v0; Y ytemp = y[posIJ]; y[posIJ] = y[posIT]; y[posIT] = ytemp; } } } } } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execBitonicArbitraryStepKernel(void *vx, Nd4jLong const* xShapeInfo, int window, int length, int reverse, bool descending) { auto x = static_cast<T*>(vx); int tid = threadIdx.x + blockDim.x * blockIdx.x; int half = window>>1; __shared__ T *shmem; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) { extern __shared__ unsigned char shrd[]; shmem = (T *) shrd; xLength = shape::length(xShapeInfo); } __syncthreads(); //for (int i = 0; i < length; i+= window) /* if window == 4; iterations will be: 0; 4; 8; 12; 16; 20 if gridDim = 3; on first iteration we'll have: 0; 4; 8; on second iteration we'll have: 0 + (3 * 4) = 12; 4 + (3 * 4) = 16; 8 + (3 * 4) = 20 */ int firstPosition; int firstStep; int secondPosition; int secondStep; int WARP_SIZE = 32; int numWarps = (gridDim.x * blockDim.x) / 32; int warpId = tid / WARP_SIZE; int warpIdx = tid % WARP_SIZE; if (half >= 128) { firstPosition = blockIdx.x * window; firstStep = gridDim.x * window; secondPosition = threadIdx.x; secondStep = blockDim.x; } else if (half >= 32) { firstPosition = warpId * window; firstStep = numWarps * window; secondPosition = warpIdx; secondStep = WARP_SIZE; } else { firstPosition = tid * window; firstStep = blockDim.x * gridDim.x * window; secondPosition = 0; secondStep = 1; } for (int i = firstPosition; i < length; i += firstStep) { for (int j = secondPosition; j < half; j += secondStep) { int it = (reverse) ? i + j + half : i + window - j - 1; int ij = i+j; if (it < length && ij < length ) { int posIT = shape::getIndexOffset(it, xShapeInfo); int posIJ = shape::getIndexOffset(ij, xShapeInfo); shmem[threadIdx.x] = x[posIJ]; shmem[threadIdx.x + blockDim.x] = x[posIT]; if(!descending == (shmem[threadIdx.x] > shmem[threadIdx.x + blockDim.x])) { x[posIJ] = shmem[threadIdx.x + blockDim.x]; x[posIT] = shmem[threadIdx.x]; } } } } } ////////////////////////////////////////////////////////////////////////// template<typename T> __host__ void bitonicArbitraryStepGeneric(dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, int window, int length, int reverse, bool descending) { hipLaunchKernelGGL(( execBitonicArbitraryStepKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, window, length, reverse, descending); } template <typename X, typename Y> __host__ void bitonicArbitraryStepGenericKey(dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, void *vy, Nd4jLong const* yShapeInfo, int window, int length, int reverse, bool descending) { hipLaunchKernelGGL(( bitonicArbitraryStepKernelKey<X,Y>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, vy, yShapeInfo, window, length, reverse, descending); } BUILD_SINGLE_TEMPLATE(template void ND4J_LOCAL bitonicArbitraryStepGeneric, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, int window, int length, int reverse, bool descending), LIBND4J_TYPES); BUILD_DOUBLE_TEMPLATE(template void ND4J_LOCAL bitonicArbitraryStepGenericKey, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, void *vy, Nd4jLong const* yShapeInfo, int window, int length, int reverse, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);
6dcfc559a02b957864bd122828ec9c3731f2a6bf.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 28.11.2018 // #include <ops/specials_cuda.h> ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> __global__ void bitonicArbitraryStepKernelKey(void *vx, Nd4jLong const* xShapeInfo, void *vy, Nd4jLong const* yShapeInfo, int window, int length, int reverse, bool descending) { auto x = static_cast<X*>(vx); auto y = static_cast<Y*>(vy); int tid = threadIdx.x + blockDim.x * blockIdx.x; int half = window>>1; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); } __syncthreads(); //for (int i = 0; i < length; i+= window) /* if window == 4; iterations will be: 0; 4; 8; 12; 16; 20 if gridDim = 3; on first iteration we'll have: 0; 4; 8; on second iteration we'll have: 0 + (3 * 4) = 12; 4 + (3 * 4) = 16; 8 + (3 * 4) = 20 */ int firstPosition; int firstStep; int secondPosition; int secondStep; int WARP_SIZE = 32; int numWarps = (gridDim.x * blockDim.x) / 32; int warpId = tid / WARP_SIZE; int warpIdx = tid % WARP_SIZE; if (half >= 128) { firstPosition = blockIdx.x * window; firstStep = gridDim.x * window; secondPosition = threadIdx.x; secondStep = blockDim.x; } else if (half >= 32) { firstPosition = warpId * window; firstStep = numWarps * window; secondPosition = warpIdx; secondStep = WARP_SIZE; } else { firstPosition = tid * window; firstStep = blockDim.x * gridDim.x * window; secondPosition = 0; secondStep = 1; } for (int i = firstPosition; i < length; i += firstStep) { for (int j = secondPosition; j < half; j += secondStep) { int it = (reverse) ? i + j + half : i + window - j - 1; int ij = i+j; if (it < length && ij < length ) { int posIT = shape::getIndexOffset(it, xShapeInfo); int posIJ = shape::getIndexOffset(ij, xShapeInfo); X v0 = x[posIJ]; X v1 = x[posIT]; if(!descending == (v0 > v1)) { x[posIJ] = v1; x[posIT] = v0; Y ytemp = y[posIJ]; y[posIJ] = y[posIT]; y[posIT] = ytemp; } } } } } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execBitonicArbitraryStepKernel(void *vx, Nd4jLong const* xShapeInfo, int window, int length, int reverse, bool descending) { auto x = static_cast<T*>(vx); int tid = threadIdx.x + blockDim.x * blockIdx.x; int half = window>>1; __shared__ T *shmem; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) { extern __shared__ unsigned char shrd[]; shmem = (T *) shrd; xLength = shape::length(xShapeInfo); } __syncthreads(); //for (int i = 0; i < length; i+= window) /* if window == 4; iterations will be: 0; 4; 8; 12; 16; 20 if gridDim = 3; on first iteration we'll have: 0; 4; 8; on second iteration we'll have: 0 + (3 * 4) = 12; 4 + (3 * 4) = 16; 8 + (3 * 4) = 20 */ int firstPosition; int firstStep; int secondPosition; int secondStep; int WARP_SIZE = 32; int numWarps = (gridDim.x * blockDim.x) / 32; int warpId = tid / WARP_SIZE; int warpIdx = tid % WARP_SIZE; if (half >= 128) { firstPosition = blockIdx.x * window; firstStep = gridDim.x * window; secondPosition = threadIdx.x; secondStep = blockDim.x; } else if (half >= 32) { firstPosition = warpId * window; firstStep = numWarps * window; secondPosition = warpIdx; secondStep = WARP_SIZE; } else { firstPosition = tid * window; firstStep = blockDim.x * gridDim.x * window; secondPosition = 0; secondStep = 1; } for (int i = firstPosition; i < length; i += firstStep) { for (int j = secondPosition; j < half; j += secondStep) { int it = (reverse) ? i + j + half : i + window - j - 1; int ij = i+j; if (it < length && ij < length ) { int posIT = shape::getIndexOffset(it, xShapeInfo); int posIJ = shape::getIndexOffset(ij, xShapeInfo); shmem[threadIdx.x] = x[posIJ]; shmem[threadIdx.x + blockDim.x] = x[posIT]; if(!descending == (shmem[threadIdx.x] > shmem[threadIdx.x + blockDim.x])) { x[posIJ] = shmem[threadIdx.x + blockDim.x]; x[posIT] = shmem[threadIdx.x]; } } } } } ////////////////////////////////////////////////////////////////////////// template<typename T> __host__ void bitonicArbitraryStepGeneric(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, int window, int length, int reverse, bool descending) { execBitonicArbitraryStepKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, window, length, reverse, descending); } template <typename X, typename Y> __host__ void bitonicArbitraryStepGenericKey(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, void *vy, Nd4jLong const* yShapeInfo, int window, int length, int reverse, bool descending) { bitonicArbitraryStepKernelKey<X,Y><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, window, length, reverse, descending); } BUILD_SINGLE_TEMPLATE(template void ND4J_LOCAL bitonicArbitraryStepGeneric, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, int window, int length, int reverse, bool descending), LIBND4J_TYPES); BUILD_DOUBLE_TEMPLATE(template void ND4J_LOCAL bitonicArbitraryStepGenericKey, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong const* xShapeInfo, void *vy, Nd4jLong const* yShapeInfo, int window, int length, int reverse, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);
2e26b31117c67e733be033df29165ab943b39194.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hl_base.h" #include "hl_top_k.h" #include "hl_sparse.ph" #include "paddle/utils/Logging.h" // using namespace hppl; struct Pair { __device__ __forceinline__ Pair() {} __device__ __forceinline__ Pair(real value, int id) : v_(value), id_(id) {} __device__ __forceinline__ void set(real value, int id) { v_ = value; id_ = id; } __device__ __forceinline__ void operator=(const Pair& in) { v_ = in.v_; id_ = in.id_; } __device__ __forceinline__ bool operator<(const real value) const { return (v_ < value); } __device__ __forceinline__ bool operator<(const Pair& in) const { return (v_ < in.v_) || ((v_ == in.v_) && (id_ > in.id_)); } __device__ __forceinline__ bool operator>(const Pair& in) const { return (v_ > in.v_) || ((v_ == in.v_) && (id_ < in.id_)); } real v_; int id_; }; __device__ __forceinline__ void addTo(Pair topK[], const Pair &p, int beamSize) { for (int k = beamSize - 2; k >= 0; k--) { if (topK[k] < p) { topK[k + 1] = topK[k]; } else { topK[k + 1] = p; return; } } topK[0] = p; } template<int beamSize> __device__ __forceinline__ void addTo(Pair topK[], const Pair &p) { for (int k = beamSize - 2; k >= 0; k--) { if (topK[k] < p) { topK[k + 1] = topK[k]; } else { topK[k + 1] = p; return; } } topK[0] = p; } template<int blockSize> __device__ __forceinline__ void getTopK(Pair topK[], real *src, int idx, int dim, int beamSize) { while (idx < dim) { if (topK[beamSize - 1] < src[idx]) { Pair tmp(src[idx], idx); addTo(topK, tmp, beamSize); } idx += blockSize; } } template<int blockSize> __device__ __forceinline__ void getTopK(Pair topK[], real *src, int idx, int dim, const Pair& max, int beamSize) { while (idx < dim) { if (topK[beamSize - 1] < src[idx]) { Pair tmp(src[idx], idx); if (tmp < max) { addTo(topK, tmp, beamSize); } } idx += blockSize; } } template<int blockSize> __device__ __forceinline__ void getTopK(Pair topK[], real *val, int *col, int idx, int dim, int beamSize) { while (idx < dim) { if (topK[beamSize - 1] < val[idx]) { Pair tmp(val[idx], col[idx]); addTo(topK, tmp, beamSize); } idx += blockSize; } } template<int blockSize> __device__ __forceinline__ void getTopK(Pair topK[], real *val, int *col, int idx, int dim, const Pair& max, int beamSize) { while (idx < dim) { if (topK[beamSize - 1] < val[idx]) { Pair tmp(val[idx], col[idx]); if (tmp < max) { addTo(topK, tmp, beamSize); } } idx += blockSize; } } template<int maxLength, int blockSize> __device__ __forceinline__ void threadGetTopK(Pair topK[], int& beam, int beamSize, real* src, bool& firstStep, bool& isEmpty, Pair& max, int dim, const int tid) { if (beam > 0) { int length = beam < beamSize ? beam : beamSize; if (firstStep) { firstStep = false; getTopK<blockSize>(topK, src, tid, dim, length); } else { for (int k = 0; k < maxLength; k++) { if (k < maxLength - beam) { topK[k] = topK[k + beam]; } else { topK[k].set(-HL_FLOAT_MAX, -1); } } if (!isEmpty) { getTopK<blockSize>(topK + maxLength - beam, src, tid, dim, max, length); } } max = topK[maxLength - 1]; if (max.id_ == -1) isEmpty = true; beam = 0; } } template<int maxLength, int blockSize> __device__ __forceinline__ void threadGetTopK(Pair topK[], int& beam, int beamSize, real* val, int* col, bool& firstStep, bool& isEmpty, Pair& max, int dim, const int tid) { if (beam > 0) { int length = beam < beamSize ? beam : beamSize; if (firstStep) { firstStep = false; getTopK<blockSize>(topK, val, col, tid, dim, length); } else { for (int k = 0; k < maxLength; k++) { if (k < maxLength - beam) { topK[k] = topK[k + beam]; } else { topK[k].set(-HL_FLOAT_MAX, -1); } } if (!isEmpty) { getTopK<blockSize>(topK + maxLength - beam, val, col, tid, dim, max, length); } } max = topK[maxLength - 1]; if (max.id_ == -1) isEmpty = true; beam = 0; } } template<int maxLength, int blockSize> __device__ __forceinline__ void blockReduce(Pair* shTopK, int* maxId, Pair topK[], real** topVal, int** topIds, int& beam, int& beamSize, const int tid, const int warp) { while (true) { __syncthreads(); if (tid < blockSize / 2) { if (shTopK[tid] < shTopK[tid + blockSize / 2]) { maxId[tid] = tid + blockSize / 2; } else { maxId[tid] = tid; } } __syncthreads(); for (int stride = blockSize / 4; stride > 0; stride = stride/2) { if (tid < stride) { if (shTopK[maxId[tid]] < shTopK[maxId[tid + stride]]) { maxId[tid] = maxId[tid + stride]; } } __syncthreads(); } __syncthreads(); if (tid == 0) { **topVal = shTopK[maxId[0]].v_; **topIds = shTopK[maxId[0]].id_; (*topVal)++; (*topIds)++; } if (tid == maxId[0]) beam++; if (--beamSize == 0) break; __syncthreads(); if (tid == maxId[0]) { if (beam < maxLength) { shTopK[tid] = topK[beam]; } } if (maxId[0] / 32 == warp) { if (__shfl(beam, (maxId[0]) % 32, 32) == maxLength) break; } } } /** * Each block compute one sample. * In a block: * 1. every thread get top maxLength value; * 2. merge to shTopK, block reduce and get max value; * 3. go to the second setp, until one thread's topK value is null; * 4. go to the first setp, until get the topK value. */ template<int maxLength, int blockSize> __global__ void KeMatrixTopK(real* topVal, int ldv, int * topIds, real* src, int lds, int dim, int beamSize) { __shared__ Pair shTopK[blockSize]; __shared__ int maxId[blockSize / 2]; const int tid = threadIdx.x; const int warp = threadIdx.x / 32; src += blockIdx.x * lds; topVal += blockIdx.x * ldv; topIds += blockIdx.x * beamSize; Pair topK[maxLength]; // NOLINT int beam = maxLength; Pair max; bool isEmpty = false; bool firstStep = true; for (int k = 0; k < maxLength; k++) { topK[k].set(-HL_FLOAT_MAX, -1); } while (beamSize) { threadGetTopK<maxLength, blockSize> (topK, beam, beamSize, src, firstStep, isEmpty, max, dim, tid); shTopK[tid] = topK[0]; blockReduce<maxLength, blockSize> (shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp); } } template<int maxLength, int blockSize> __global__ void KeSMatrixTopK(real* topVal, int ldv, int * topIds, real* val, int* row, int* col, int beamSize) { __shared__ Pair shTopK[blockSize]; __shared__ int maxId[blockSize / 2]; const int tid = threadIdx.x; const int warp = threadIdx.x / 32; topVal += blockIdx.x * ldv; topIds += blockIdx.x * beamSize; Pair topK[maxLength]; // NOLINT int beam = maxLength; Pair max; bool isEmpty = false; bool firstStep = true; int start = row[blockIdx.x]; int end = row[blockIdx.x + 1]; int dim = end - start; val += start; col += start; if (beamSize > dim) { // if the number of values to sort are less than the output size, // use -1 to indicate the end of valid sorted values. if (tid == 0) { topIds[dim] = -1; } beamSize = dim; } for (int k = 0; k < maxLength; k++) { topK[k].set(-HL_FLOAT_MAX, -1); } while (beamSize) { threadGetTopK<maxLength, blockSize> (topK, beam, beamSize, val, col, firstStep, isEmpty, max, dim, tid); shTopK[tid] = topK[0]; blockReduce<maxLength, blockSize> (shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp); } } void hl_matrix_top_k(real* topVal, int ldv, int * topIds, real* src, int lds, int dim, int beamSize, int numSamples) { CHECK_NOTNULL(topVal); CHECK_NOTNULL(topIds); CHECK_NOTNULL(src); if (beamSize > dim) beamSize = dim; dim3 threads(256, 1); dim3 grid(numSamples, 1); hipLaunchKernelGGL(( KeMatrixTopK<5, 256>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT , topVal, ldv, topIds, src, lds, dim, beamSize); CHECK_SYNC("hl_matrix_top_k failed"); } void hl_sparse_matrix_top_k(real* topVal, int ldv, int * topIds, hl_sparse_matrix_s src, int beamSize, int numSamples) { CHECK_NOTNULL(topVal); CHECK_NOTNULL(topIds); CHECK_NOTNULL(src); CHECK_EQ(src->format, HL_SPARSE_CSR) <<"sparse matrix format error!"; hl_csr_matrix csr = (hl_csr_matrix)src->matrix; if (csr->csr_val == NULL || csr->csr_row == NULL || csr->csr_col == NULL) { LOG(FATAL) << "parameter src is null!"; } dim3 threads(256, 1); dim3 grid(numSamples, 1); hipLaunchKernelGGL(( KeSMatrixTopK<5, 256>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT , topVal, ldv, topIds, csr->csr_val, csr->csr_row, csr->csr_col, beamSize); CHECK_SYNC("hl_sparse_matrix_top_k failed"); }
2e26b31117c67e733be033df29165ab943b39194.cu
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hl_base.h" #include "hl_top_k.h" #include "hl_sparse.ph" #include "paddle/utils/Logging.h" // using namespace hppl; struct Pair { __device__ __forceinline__ Pair() {} __device__ __forceinline__ Pair(real value, int id) : v_(value), id_(id) {} __device__ __forceinline__ void set(real value, int id) { v_ = value; id_ = id; } __device__ __forceinline__ void operator=(const Pair& in) { v_ = in.v_; id_ = in.id_; } __device__ __forceinline__ bool operator<(const real value) const { return (v_ < value); } __device__ __forceinline__ bool operator<(const Pair& in) const { return (v_ < in.v_) || ((v_ == in.v_) && (id_ > in.id_)); } __device__ __forceinline__ bool operator>(const Pair& in) const { return (v_ > in.v_) || ((v_ == in.v_) && (id_ < in.id_)); } real v_; int id_; }; __device__ __forceinline__ void addTo(Pair topK[], const Pair &p, int beamSize) { for (int k = beamSize - 2; k >= 0; k--) { if (topK[k] < p) { topK[k + 1] = topK[k]; } else { topK[k + 1] = p; return; } } topK[0] = p; } template<int beamSize> __device__ __forceinline__ void addTo(Pair topK[], const Pair &p) { for (int k = beamSize - 2; k >= 0; k--) { if (topK[k] < p) { topK[k + 1] = topK[k]; } else { topK[k + 1] = p; return; } } topK[0] = p; } template<int blockSize> __device__ __forceinline__ void getTopK(Pair topK[], real *src, int idx, int dim, int beamSize) { while (idx < dim) { if (topK[beamSize - 1] < src[idx]) { Pair tmp(src[idx], idx); addTo(topK, tmp, beamSize); } idx += blockSize; } } template<int blockSize> __device__ __forceinline__ void getTopK(Pair topK[], real *src, int idx, int dim, const Pair& max, int beamSize) { while (idx < dim) { if (topK[beamSize - 1] < src[idx]) { Pair tmp(src[idx], idx); if (tmp < max) { addTo(topK, tmp, beamSize); } } idx += blockSize; } } template<int blockSize> __device__ __forceinline__ void getTopK(Pair topK[], real *val, int *col, int idx, int dim, int beamSize) { while (idx < dim) { if (topK[beamSize - 1] < val[idx]) { Pair tmp(val[idx], col[idx]); addTo(topK, tmp, beamSize); } idx += blockSize; } } template<int blockSize> __device__ __forceinline__ void getTopK(Pair topK[], real *val, int *col, int idx, int dim, const Pair& max, int beamSize) { while (idx < dim) { if (topK[beamSize - 1] < val[idx]) { Pair tmp(val[idx], col[idx]); if (tmp < max) { addTo(topK, tmp, beamSize); } } idx += blockSize; } } template<int maxLength, int blockSize> __device__ __forceinline__ void threadGetTopK(Pair topK[], int& beam, int beamSize, real* src, bool& firstStep, bool& isEmpty, Pair& max, int dim, const int tid) { if (beam > 0) { int length = beam < beamSize ? beam : beamSize; if (firstStep) { firstStep = false; getTopK<blockSize>(topK, src, tid, dim, length); } else { for (int k = 0; k < maxLength; k++) { if (k < maxLength - beam) { topK[k] = topK[k + beam]; } else { topK[k].set(-HL_FLOAT_MAX, -1); } } if (!isEmpty) { getTopK<blockSize>(topK + maxLength - beam, src, tid, dim, max, length); } } max = topK[maxLength - 1]; if (max.id_ == -1) isEmpty = true; beam = 0; } } template<int maxLength, int blockSize> __device__ __forceinline__ void threadGetTopK(Pair topK[], int& beam, int beamSize, real* val, int* col, bool& firstStep, bool& isEmpty, Pair& max, int dim, const int tid) { if (beam > 0) { int length = beam < beamSize ? beam : beamSize; if (firstStep) { firstStep = false; getTopK<blockSize>(topK, val, col, tid, dim, length); } else { for (int k = 0; k < maxLength; k++) { if (k < maxLength - beam) { topK[k] = topK[k + beam]; } else { topK[k].set(-HL_FLOAT_MAX, -1); } } if (!isEmpty) { getTopK<blockSize>(topK + maxLength - beam, val, col, tid, dim, max, length); } } max = topK[maxLength - 1]; if (max.id_ == -1) isEmpty = true; beam = 0; } } template<int maxLength, int blockSize> __device__ __forceinline__ void blockReduce(Pair* shTopK, int* maxId, Pair topK[], real** topVal, int** topIds, int& beam, int& beamSize, const int tid, const int warp) { while (true) { __syncthreads(); if (tid < blockSize / 2) { if (shTopK[tid] < shTopK[tid + blockSize / 2]) { maxId[tid] = tid + blockSize / 2; } else { maxId[tid] = tid; } } __syncthreads(); for (int stride = blockSize / 4; stride > 0; stride = stride/2) { if (tid < stride) { if (shTopK[maxId[tid]] < shTopK[maxId[tid + stride]]) { maxId[tid] = maxId[tid + stride]; } } __syncthreads(); } __syncthreads(); if (tid == 0) { **topVal = shTopK[maxId[0]].v_; **topIds = shTopK[maxId[0]].id_; (*topVal)++; (*topIds)++; } if (tid == maxId[0]) beam++; if (--beamSize == 0) break; __syncthreads(); if (tid == maxId[0]) { if (beam < maxLength) { shTopK[tid] = topK[beam]; } } if (maxId[0] / 32 == warp) { if (__shfl(beam, (maxId[0]) % 32, 32) == maxLength) break; } } } /** * Each block compute one sample. * In a block: * 1. every thread get top maxLength value; * 2. merge to shTopK, block reduce and get max value; * 3. go to the second setp, until one thread's topK value is null; * 4. go to the first setp, until get the topK value. */ template<int maxLength, int blockSize> __global__ void KeMatrixTopK(real* topVal, int ldv, int * topIds, real* src, int lds, int dim, int beamSize) { __shared__ Pair shTopK[blockSize]; __shared__ int maxId[blockSize / 2]; const int tid = threadIdx.x; const int warp = threadIdx.x / 32; src += blockIdx.x * lds; topVal += blockIdx.x * ldv; topIds += blockIdx.x * beamSize; Pair topK[maxLength]; // NOLINT int beam = maxLength; Pair max; bool isEmpty = false; bool firstStep = true; for (int k = 0; k < maxLength; k++) { topK[k].set(-HL_FLOAT_MAX, -1); } while (beamSize) { threadGetTopK<maxLength, blockSize> (topK, beam, beamSize, src, firstStep, isEmpty, max, dim, tid); shTopK[tid] = topK[0]; blockReduce<maxLength, blockSize> (shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp); } } template<int maxLength, int blockSize> __global__ void KeSMatrixTopK(real* topVal, int ldv, int * topIds, real* val, int* row, int* col, int beamSize) { __shared__ Pair shTopK[blockSize]; __shared__ int maxId[blockSize / 2]; const int tid = threadIdx.x; const int warp = threadIdx.x / 32; topVal += blockIdx.x * ldv; topIds += blockIdx.x * beamSize; Pair topK[maxLength]; // NOLINT int beam = maxLength; Pair max; bool isEmpty = false; bool firstStep = true; int start = row[blockIdx.x]; int end = row[blockIdx.x + 1]; int dim = end - start; val += start; col += start; if (beamSize > dim) { // if the number of values to sort are less than the output size, // use -1 to indicate the end of valid sorted values. if (tid == 0) { topIds[dim] = -1; } beamSize = dim; } for (int k = 0; k < maxLength; k++) { topK[k].set(-HL_FLOAT_MAX, -1); } while (beamSize) { threadGetTopK<maxLength, blockSize> (topK, beam, beamSize, val, col, firstStep, isEmpty, max, dim, tid); shTopK[tid] = topK[0]; blockReduce<maxLength, blockSize> (shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp); } } void hl_matrix_top_k(real* topVal, int ldv, int * topIds, real* src, int lds, int dim, int beamSize, int numSamples) { CHECK_NOTNULL(topVal); CHECK_NOTNULL(topIds); CHECK_NOTNULL(src); if (beamSize > dim) beamSize = dim; dim3 threads(256, 1); dim3 grid(numSamples, 1); KeMatrixTopK<5, 256><<< grid, threads, 0, STREAM_DEFAULT >>> (topVal, ldv, topIds, src, lds, dim, beamSize); CHECK_SYNC("hl_matrix_top_k failed"); } void hl_sparse_matrix_top_k(real* topVal, int ldv, int * topIds, hl_sparse_matrix_s src, int beamSize, int numSamples) { CHECK_NOTNULL(topVal); CHECK_NOTNULL(topIds); CHECK_NOTNULL(src); CHECK_EQ(src->format, HL_SPARSE_CSR) <<"sparse matrix format error!"; hl_csr_matrix csr = (hl_csr_matrix)src->matrix; if (csr->csr_val == NULL || csr->csr_row == NULL || csr->csr_col == NULL) { LOG(FATAL) << "parameter src is null!"; } dim3 threads(256, 1); dim3 grid(numSamples, 1); KeSMatrixTopK<5, 256><<< grid, threads, 0, STREAM_DEFAULT >>> (topVal, ldv, topIds, csr->csr_val, csr->csr_row, csr->csr_col, beamSize); CHECK_SYNC("hl_sparse_matrix_top_k failed"); }
0b1627324529027bed28b7ee7380de52c8dff878.hip
// !!! This is a file automatically generated by hipify!!! extern "C"{ #include "fractol.h" } #include "fractol.cuh" __host__ int main(int argc, char **argv) { c_cntrl *cntrl; if (!(cntrl = (c_cntrl *)malloc(sizeof(c_cntrl)))) return (0); mlx(cntrl); if (args_parse(argc, argv, cntrl) < 0) exit(0); hipMalloc((int **)&cntrl->d_data_ptr, sizeof(int) *NumberOfPixel); draw_call(cntrl); key_mouse_control(cntrl); mlx_loop(cntrl->mlx); return (0); }
0b1627324529027bed28b7ee7380de52c8dff878.cu
extern "C"{ #include "fractol.h" } #include "fractol.cuh" __host__ int main(int argc, char **argv) { c_cntrl *cntrl; if (!(cntrl = (c_cntrl *)malloc(sizeof(c_cntrl)))) return (0); mlx(cntrl); if (args_parse(argc, argv, cntrl) < 0) exit(0); cudaMalloc((int **)&cntrl->d_data_ptr, sizeof(int) *NumberOfPixel); draw_call(cntrl); key_mouse_control(cntrl); mlx_loop(cntrl->mlx); return (0); }
4801398792c271e3a0e2223bbf69ae1c57a24118.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/tuple.h> #include <io/parquet/parquet_gpu.hpp> #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace parquet { namespace gpu { // Minimal thrift implementation for parsing page headers // https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md static const __device__ __constant__ uint8_t g_list2struct[16] = {0, 1, 2, ST_FLD_BYTE, ST_FLD_DOUBLE, 5, ST_FLD_I16, 7, ST_FLD_I32, 9, ST_FLD_I64, ST_FLD_BINARY, ST_FLD_STRUCT, ST_FLD_MAP, ST_FLD_SET, ST_FLD_LIST}; struct byte_stream_s { const uint8_t *cur; const uint8_t *end; const uint8_t *base; // Parsed symbols PageType page_type; PageInfo page; ColumnChunkDesc ck; }; /** * @brief Get current byte from the byte stream * * @param[in] bs Byte stream * * @return Current byte pointed to by the byte stream */ inline __device__ unsigned int getb(byte_stream_s *bs) { return (bs->cur < bs->end) ? *bs->cur++ : 0; } inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt) { bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur)); bs->cur += bytecnt; } /** * @brief Decode unsigned integer from a byte stream using VarInt encoding * * Concatenate least significant 7 bits of each byte to form a 32 bit * integer. Most significant bit of each byte indicates if more bytes * are to be used to form the number. * * @param[in] bs Byte stream * * @return Decoded 32 bit integer */ __device__ uint32_t get_u32(byte_stream_s *bs) { uint32_t v = 0, l = 0, c; do { c = getb(bs); v |= (c & 0x7f) << l; l += 7; } while (c & 0x80); return v; } /** * @brief Decode signed integer from a byte stream using zigzag encoding * * The number n encountered in a byte stream translates to * -1^(n%2) * ceil(n/2), with the exception of 0 which remains the same. * i.e. 0, 1, 2, 3, 4, 5 etc convert to 0, -1, 1, -2, 2 respectively. * * @param[in] bs Byte stream * * @return Decoded 32 bit integer */ inline __device__ int32_t get_i32(byte_stream_s *bs) { uint32_t u = get_u32(bs); return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); } __device__ void skip_struct_field(byte_stream_s *bs, int field_type) { int struct_depth = 0; int rep_cnt = 0; do { if (rep_cnt != 0) { rep_cnt--; } else if (struct_depth != 0) { unsigned int c; do { c = getb(bs); if (!c) --struct_depth; } while (!c && struct_depth); if (!struct_depth) break; field_type = c & 0xf; if (!(c & 0xf0)) get_i32(bs); } switch (field_type) { case ST_FLD_TRUE: case ST_FLD_FALSE: break; case ST_FLD_I16: case ST_FLD_I32: case ST_FLD_I64: get_u32(bs); break; case ST_FLD_BYTE: skip_bytes(bs, 1); break; case ST_FLD_DOUBLE: skip_bytes(bs, 8); break; case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break; case ST_FLD_LIST: case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled auto const c = getb(bs); int n = c >> 4; if (n == 0xf) n = get_u32(bs); field_type = g_list2struct[c & 0xf]; if (field_type == ST_FLD_STRUCT) struct_depth += n; else rep_cnt = n; } break; case ST_FLD_STRUCT: struct_depth++; break; } } while (rep_cnt || struct_depth); } /** * @brief Functor to set value to 32 bit integer read from byte stream * * @return True if field type is not int32 */ struct ParquetFieldInt32 { int field; int32_t &val; __device__ ParquetFieldInt32(int f, int32_t &v) : field(f), val(v) {} inline __device__ bool operator()(byte_stream_s *bs, int field_type) { val = get_i32(bs); return (field_type != ST_FLD_I32); } }; /** * @brief Functor to set value to enum read from byte stream * * @return True if field type is not int32 */ template <typename Enum> struct ParquetFieldEnum { int field; Enum &val; __device__ ParquetFieldEnum(int f, Enum &v) : field(f), val(v) {} inline __device__ bool operator()(byte_stream_s *bs, int field_type) { val = static_cast<Enum>(get_i32(bs)); return (field_type != ST_FLD_I32); } }; /** * @brief Functor to run operator on byte stream * * @return True if field type is not struct type or if the calling operator * fails */ template <typename Operator> struct ParquetFieldStruct { int field; Operator op; __device__ ParquetFieldStruct(int f) : field(f) {} inline __device__ bool operator()(byte_stream_s *bs, int field_type) { return ((field_type != ST_FLD_STRUCT) || !op(bs)); } }; /** * @brief Functor to run an operator * * The purpose of this functor is to replace a switch case. If the field in * the argument is equal to the field specified in any element of the tuple * of operators then it is run with the byte stream and field type arguments. * * If the field does not match any of the functors then skip_struct_field is * called over the byte stream. * * @return Return value of the selected operator or false if no operator * matched the field value */ template <int index> struct FunctionSwitchImpl { template <typename... Operator> static inline __device__ bool run(byte_stream_s *bs, int field_type, const int &field, thrust::tuple<Operator...> &ops) { if (field == thrust::get<index>(ops).field) { return thrust::get<index>(ops)(bs, field_type); } else { return FunctionSwitchImpl<index - 1>::run(bs, field_type, field, ops); } } }; template <> struct FunctionSwitchImpl<0> { template <typename... Operator> static inline __device__ bool run(byte_stream_s *bs, int field_type, const int &field, thrust::tuple<Operator...> &ops) { if (field == thrust::get<0>(ops).field) { return thrust::get<0>(ops)(bs, field_type); } else { skip_struct_field(bs, field_type); return false; } } }; /** * @brief Function to parse page header based on the tuple of functors provided * * Bytes are read from the byte stream and the field delta and field type are * matched up against user supplied reading functors. If they match then the * corresponding values are written to references pointed to by the functors. * * @return Returns false if an unexpected field is encountered while reading * byte stream. Otherwise true is returned. */ template <typename... Operator> inline __device__ bool parse_header(thrust::tuple<Operator...> &op, byte_stream_s *bs) { constexpr int index = thrust::tuple_size<thrust::tuple<Operator...>>::value - 1; int field = 0; while (true) { auto const current_byte = getb(bs); if (!current_byte) break; int const field_delta = current_byte >> 4; int const field_type = current_byte & 0xf; field = field_delta ? field + field_delta : get_i32(bs); bool exit_function = FunctionSwitchImpl<index>::run(bs, field_type, field, op); if (exit_function) { return false; } } return true; } struct gpuParseDataPageHeader { __device__ bool operator()(byte_stream_s *bs) { auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), ParquetFieldEnum<Encoding>(2, bs->page.encoding), ParquetFieldEnum<Encoding>(3, bs->page.definition_level_encoding), ParquetFieldEnum<Encoding>(4, bs->page.repetition_level_encoding)); return parse_header(op, bs); } }; struct gpuParseDictionaryPageHeader { __device__ bool operator()(byte_stream_s *bs) { auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), ParquetFieldEnum<Encoding>(2, bs->page.encoding)); return parse_header(op, bs); } }; struct gpuParseDataPageHeaderV2 { __device__ bool operator()(byte_stream_s *bs) { auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), ParquetFieldInt32(3, bs->page.num_rows), ParquetFieldEnum<Encoding>(4, bs->page.encoding), ParquetFieldEnum<Encoding>(5, bs->page.definition_level_encoding), ParquetFieldEnum<Encoding>(6, bs->page.repetition_level_encoding)); return parse_header(op, bs); } }; struct gpuParsePageHeader { __device__ bool operator()(byte_stream_s *bs) { auto op = thrust::make_tuple(ParquetFieldEnum<PageType>(1, bs->page_type), ParquetFieldInt32(2, bs->page.uncompressed_page_size), ParquetFieldInt32(3, bs->page.compressed_page_size), ParquetFieldStruct<gpuParseDataPageHeader>(5), ParquetFieldStruct<gpuParseDictionaryPageHeader>(7), ParquetFieldStruct<gpuParseDataPageHeaderV2>(8)); return parse_header(op, bs); } }; /** * @brief Kernel for outputting page headers from the specified column chunks * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks */ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks) { gpuParsePageHeader parse_page_header; __shared__ byte_stream_s bs_g[4]; int lane_id = threadIdx.x % 32; int chunk = (blockIdx.x * 4) + (threadIdx.x / 32); byte_stream_s *const bs = &bs_g[threadIdx.x / 32]; if (chunk < num_chunks and lane_id == 0) bs->ck = chunks[chunk]; __syncthreads(); if (chunk < num_chunks) { size_t num_values, values_found; uint32_t data_page_count = 0; uint32_t dictionary_page_count = 0; int32_t max_num_pages; int32_t num_dict_pages = bs->ck.num_dict_pages; PageInfo *page_info; if (!lane_id) { bs->base = bs->cur = bs->ck.compressed_data; bs->end = bs->base + bs->ck.compressed_size; bs->page.chunk_idx = chunk; bs->page.src_col_schema = bs->ck.src_col_schema; // this computation is only valid for flat schemas. for nested schemas, // they will be recomputed in the preprocess step by examining repetition and // definition levels bs->page.chunk_row = 0; bs->page.num_rows = 0; } num_values = bs->ck.num_values; page_info = bs->ck.page_info; num_dict_pages = bs->ck.num_dict_pages; max_num_pages = (page_info) ? bs->ck.max_num_pages : 0; values_found = 0; __syncwarp(); while (values_found < num_values && bs->cur < bs->end) { int index_out = -1; if (lane_id == 0) { // this computation is only valid for flat schemas. for nested schemas, // they will be recomputed in the preprocess step by examining repetition and // definition levels bs->page.chunk_row += bs->page.num_rows; bs->page.num_rows = 0; if (parse_page_header(bs) && bs->page.compressed_page_size >= 0) { switch (bs->page_type) { case PageType::DATA_PAGE: // this computation is only valid for flat schemas. for nested schemas, // they will be recomputed in the preprocess step by examining repetition and // definition levels bs->page.num_rows = bs->page.num_input_values; case PageType::DATA_PAGE_V2: index_out = num_dict_pages + data_page_count; data_page_count++; bs->page.flags = 0; values_found += bs->page.num_input_values; break; case PageType::DICTIONARY_PAGE: index_out = dictionary_page_count; dictionary_page_count++; bs->page.flags = PAGEINFO_FLAGS_DICTIONARY; break; default: index_out = -1; break; } bs->page.page_data = const_cast<uint8_t *>(bs->cur); bs->cur += bs->page.compressed_page_size; } else { bs->cur = bs->end; } } index_out = shuffle(index_out); if (index_out >= 0 && index_out < max_num_pages && lane_id == 0) page_info[index_out] = bs->page; num_values = shuffle(num_values); __syncwarp(); } if (lane_id == 0) { chunks[chunk].num_data_pages = data_page_count; chunks[chunk].num_dict_pages = dictionary_page_count; } } } /** * @brief Kernel for building dictionary index for the specified column chunks * * This function builds an index to point to each dictionary entry * (string format is 4-byte little-endian string length followed by character * data). The index is a 32-bit integer which contains the offset of each string * relative to the beginning of the dictionary page data. * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks */ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks) { __shared__ ColumnChunkDesc chunk_g[4]; int lane_id = threadIdx.x % 32; int chunk = (blockIdx.x * 4) + (threadIdx.x / 32); ColumnChunkDesc *const ck = &chunk_g[threadIdx.x / 32]; if (chunk < num_chunks and lane_id == 0) *ck = chunks[chunk]; __syncthreads(); if (chunk >= num_chunks) { return; } if (!lane_id && ck->num_dict_pages > 0 && ck->str_dict_index) { // Data type to describe a string nvstrdesc_s *dict_index = ck->str_dict_index; const uint8_t *dict = ck->page_info[0].page_data; int dict_size = ck->page_info[0].uncompressed_page_size; int num_entries = ck->page_info[0].num_input_values; int pos = 0, cur = 0; for (int i = 0; i < num_entries; i++) { int len = 0; if (cur + 4 <= dict_size) { len = dict[cur + 0] | (dict[cur + 1] << 8) | (dict[cur + 2] << 16) | (dict[cur + 3] << 24); if (len >= 0 && cur + 4 + len <= dict_size) { pos = cur; cur = cur + 4 + len; } else { cur = dict_size; } } // TODO: Could store 8 entries in shared mem, then do a single warp-wide store dict_index[i].ptr = reinterpret_cast<const char *>(dict + pos + 4); dict_index[i].count = len; } } } void __host__ DecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block hipLaunchKernelGGL(( gpuDecodePageHeaders), dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, num_chunks); } void __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block hipLaunchKernelGGL(( gpuBuildStringDictionaryIndex), dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, num_chunks); } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
4801398792c271e3a0e2223bbf69ae1c57a24118.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/tuple.h> #include <io/parquet/parquet_gpu.hpp> #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace parquet { namespace gpu { // Minimal thrift implementation for parsing page headers // https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md static const __device__ __constant__ uint8_t g_list2struct[16] = {0, 1, 2, ST_FLD_BYTE, ST_FLD_DOUBLE, 5, ST_FLD_I16, 7, ST_FLD_I32, 9, ST_FLD_I64, ST_FLD_BINARY, ST_FLD_STRUCT, ST_FLD_MAP, ST_FLD_SET, ST_FLD_LIST}; struct byte_stream_s { const uint8_t *cur; const uint8_t *end; const uint8_t *base; // Parsed symbols PageType page_type; PageInfo page; ColumnChunkDesc ck; }; /** * @brief Get current byte from the byte stream * * @param[in] bs Byte stream * * @return Current byte pointed to by the byte stream */ inline __device__ unsigned int getb(byte_stream_s *bs) { return (bs->cur < bs->end) ? *bs->cur++ : 0; } inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt) { bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur)); bs->cur += bytecnt; } /** * @brief Decode unsigned integer from a byte stream using VarInt encoding * * Concatenate least significant 7 bits of each byte to form a 32 bit * integer. Most significant bit of each byte indicates if more bytes * are to be used to form the number. * * @param[in] bs Byte stream * * @return Decoded 32 bit integer */ __device__ uint32_t get_u32(byte_stream_s *bs) { uint32_t v = 0, l = 0, c; do { c = getb(bs); v |= (c & 0x7f) << l; l += 7; } while (c & 0x80); return v; } /** * @brief Decode signed integer from a byte stream using zigzag encoding * * The number n encountered in a byte stream translates to * -1^(n%2) * ceil(n/2), with the exception of 0 which remains the same. * i.e. 0, 1, 2, 3, 4, 5 etc convert to 0, -1, 1, -2, 2 respectively. * * @param[in] bs Byte stream * * @return Decoded 32 bit integer */ inline __device__ int32_t get_i32(byte_stream_s *bs) { uint32_t u = get_u32(bs); return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); } __device__ void skip_struct_field(byte_stream_s *bs, int field_type) { int struct_depth = 0; int rep_cnt = 0; do { if (rep_cnt != 0) { rep_cnt--; } else if (struct_depth != 0) { unsigned int c; do { c = getb(bs); if (!c) --struct_depth; } while (!c && struct_depth); if (!struct_depth) break; field_type = c & 0xf; if (!(c & 0xf0)) get_i32(bs); } switch (field_type) { case ST_FLD_TRUE: case ST_FLD_FALSE: break; case ST_FLD_I16: case ST_FLD_I32: case ST_FLD_I64: get_u32(bs); break; case ST_FLD_BYTE: skip_bytes(bs, 1); break; case ST_FLD_DOUBLE: skip_bytes(bs, 8); break; case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break; case ST_FLD_LIST: case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled auto const c = getb(bs); int n = c >> 4; if (n == 0xf) n = get_u32(bs); field_type = g_list2struct[c & 0xf]; if (field_type == ST_FLD_STRUCT) struct_depth += n; else rep_cnt = n; } break; case ST_FLD_STRUCT: struct_depth++; break; } } while (rep_cnt || struct_depth); } /** * @brief Functor to set value to 32 bit integer read from byte stream * * @return True if field type is not int32 */ struct ParquetFieldInt32 { int field; int32_t &val; __device__ ParquetFieldInt32(int f, int32_t &v) : field(f), val(v) {} inline __device__ bool operator()(byte_stream_s *bs, int field_type) { val = get_i32(bs); return (field_type != ST_FLD_I32); } }; /** * @brief Functor to set value to enum read from byte stream * * @return True if field type is not int32 */ template <typename Enum> struct ParquetFieldEnum { int field; Enum &val; __device__ ParquetFieldEnum(int f, Enum &v) : field(f), val(v) {} inline __device__ bool operator()(byte_stream_s *bs, int field_type) { val = static_cast<Enum>(get_i32(bs)); return (field_type != ST_FLD_I32); } }; /** * @brief Functor to run operator on byte stream * * @return True if field type is not struct type or if the calling operator * fails */ template <typename Operator> struct ParquetFieldStruct { int field; Operator op; __device__ ParquetFieldStruct(int f) : field(f) {} inline __device__ bool operator()(byte_stream_s *bs, int field_type) { return ((field_type != ST_FLD_STRUCT) || !op(bs)); } }; /** * @brief Functor to run an operator * * The purpose of this functor is to replace a switch case. If the field in * the argument is equal to the field specified in any element of the tuple * of operators then it is run with the byte stream and field type arguments. * * If the field does not match any of the functors then skip_struct_field is * called over the byte stream. * * @return Return value of the selected operator or false if no operator * matched the field value */ template <int index> struct FunctionSwitchImpl { template <typename... Operator> static inline __device__ bool run(byte_stream_s *bs, int field_type, const int &field, thrust::tuple<Operator...> &ops) { if (field == thrust::get<index>(ops).field) { return thrust::get<index>(ops)(bs, field_type); } else { return FunctionSwitchImpl<index - 1>::run(bs, field_type, field, ops); } } }; template <> struct FunctionSwitchImpl<0> { template <typename... Operator> static inline __device__ bool run(byte_stream_s *bs, int field_type, const int &field, thrust::tuple<Operator...> &ops) { if (field == thrust::get<0>(ops).field) { return thrust::get<0>(ops)(bs, field_type); } else { skip_struct_field(bs, field_type); return false; } } }; /** * @brief Function to parse page header based on the tuple of functors provided * * Bytes are read from the byte stream and the field delta and field type are * matched up against user supplied reading functors. If they match then the * corresponding values are written to references pointed to by the functors. * * @return Returns false if an unexpected field is encountered while reading * byte stream. Otherwise true is returned. */ template <typename... Operator> inline __device__ bool parse_header(thrust::tuple<Operator...> &op, byte_stream_s *bs) { constexpr int index = thrust::tuple_size<thrust::tuple<Operator...>>::value - 1; int field = 0; while (true) { auto const current_byte = getb(bs); if (!current_byte) break; int const field_delta = current_byte >> 4; int const field_type = current_byte & 0xf; field = field_delta ? field + field_delta : get_i32(bs); bool exit_function = FunctionSwitchImpl<index>::run(bs, field_type, field, op); if (exit_function) { return false; } } return true; } struct gpuParseDataPageHeader { __device__ bool operator()(byte_stream_s *bs) { auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), ParquetFieldEnum<Encoding>(2, bs->page.encoding), ParquetFieldEnum<Encoding>(3, bs->page.definition_level_encoding), ParquetFieldEnum<Encoding>(4, bs->page.repetition_level_encoding)); return parse_header(op, bs); } }; struct gpuParseDictionaryPageHeader { __device__ bool operator()(byte_stream_s *bs) { auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), ParquetFieldEnum<Encoding>(2, bs->page.encoding)); return parse_header(op, bs); } }; struct gpuParseDataPageHeaderV2 { __device__ bool operator()(byte_stream_s *bs) { auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), ParquetFieldInt32(3, bs->page.num_rows), ParquetFieldEnum<Encoding>(4, bs->page.encoding), ParquetFieldEnum<Encoding>(5, bs->page.definition_level_encoding), ParquetFieldEnum<Encoding>(6, bs->page.repetition_level_encoding)); return parse_header(op, bs); } }; struct gpuParsePageHeader { __device__ bool operator()(byte_stream_s *bs) { auto op = thrust::make_tuple(ParquetFieldEnum<PageType>(1, bs->page_type), ParquetFieldInt32(2, bs->page.uncompressed_page_size), ParquetFieldInt32(3, bs->page.compressed_page_size), ParquetFieldStruct<gpuParseDataPageHeader>(5), ParquetFieldStruct<gpuParseDictionaryPageHeader>(7), ParquetFieldStruct<gpuParseDataPageHeaderV2>(8)); return parse_header(op, bs); } }; /** * @brief Kernel for outputting page headers from the specified column chunks * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks */ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks) { gpuParsePageHeader parse_page_header; __shared__ byte_stream_s bs_g[4]; int lane_id = threadIdx.x % 32; int chunk = (blockIdx.x * 4) + (threadIdx.x / 32); byte_stream_s *const bs = &bs_g[threadIdx.x / 32]; if (chunk < num_chunks and lane_id == 0) bs->ck = chunks[chunk]; __syncthreads(); if (chunk < num_chunks) { size_t num_values, values_found; uint32_t data_page_count = 0; uint32_t dictionary_page_count = 0; int32_t max_num_pages; int32_t num_dict_pages = bs->ck.num_dict_pages; PageInfo *page_info; if (!lane_id) { bs->base = bs->cur = bs->ck.compressed_data; bs->end = bs->base + bs->ck.compressed_size; bs->page.chunk_idx = chunk; bs->page.src_col_schema = bs->ck.src_col_schema; // this computation is only valid for flat schemas. for nested schemas, // they will be recomputed in the preprocess step by examining repetition and // definition levels bs->page.chunk_row = 0; bs->page.num_rows = 0; } num_values = bs->ck.num_values; page_info = bs->ck.page_info; num_dict_pages = bs->ck.num_dict_pages; max_num_pages = (page_info) ? bs->ck.max_num_pages : 0; values_found = 0; __syncwarp(); while (values_found < num_values && bs->cur < bs->end) { int index_out = -1; if (lane_id == 0) { // this computation is only valid for flat schemas. for nested schemas, // they will be recomputed in the preprocess step by examining repetition and // definition levels bs->page.chunk_row += bs->page.num_rows; bs->page.num_rows = 0; if (parse_page_header(bs) && bs->page.compressed_page_size >= 0) { switch (bs->page_type) { case PageType::DATA_PAGE: // this computation is only valid for flat schemas. for nested schemas, // they will be recomputed in the preprocess step by examining repetition and // definition levels bs->page.num_rows = bs->page.num_input_values; case PageType::DATA_PAGE_V2: index_out = num_dict_pages + data_page_count; data_page_count++; bs->page.flags = 0; values_found += bs->page.num_input_values; break; case PageType::DICTIONARY_PAGE: index_out = dictionary_page_count; dictionary_page_count++; bs->page.flags = PAGEINFO_FLAGS_DICTIONARY; break; default: index_out = -1; break; } bs->page.page_data = const_cast<uint8_t *>(bs->cur); bs->cur += bs->page.compressed_page_size; } else { bs->cur = bs->end; } } index_out = shuffle(index_out); if (index_out >= 0 && index_out < max_num_pages && lane_id == 0) page_info[index_out] = bs->page; num_values = shuffle(num_values); __syncwarp(); } if (lane_id == 0) { chunks[chunk].num_data_pages = data_page_count; chunks[chunk].num_dict_pages = dictionary_page_count; } } } /** * @brief Kernel for building dictionary index for the specified column chunks * * This function builds an index to point to each dictionary entry * (string format is 4-byte little-endian string length followed by character * data). The index is a 32-bit integer which contains the offset of each string * relative to the beginning of the dictionary page data. * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks */ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks) { __shared__ ColumnChunkDesc chunk_g[4]; int lane_id = threadIdx.x % 32; int chunk = (blockIdx.x * 4) + (threadIdx.x / 32); ColumnChunkDesc *const ck = &chunk_g[threadIdx.x / 32]; if (chunk < num_chunks and lane_id == 0) *ck = chunks[chunk]; __syncthreads(); if (chunk >= num_chunks) { return; } if (!lane_id && ck->num_dict_pages > 0 && ck->str_dict_index) { // Data type to describe a string nvstrdesc_s *dict_index = ck->str_dict_index; const uint8_t *dict = ck->page_info[0].page_data; int dict_size = ck->page_info[0].uncompressed_page_size; int num_entries = ck->page_info[0].num_input_values; int pos = 0, cur = 0; for (int i = 0; i < num_entries; i++) { int len = 0; if (cur + 4 <= dict_size) { len = dict[cur + 0] | (dict[cur + 1] << 8) | (dict[cur + 2] << 16) | (dict[cur + 3] << 24); if (len >= 0 && cur + 4 + len <= dict_size) { pos = cur; cur = cur + 4 + len; } else { cur = dict_size; } } // TODO: Could store 8 entries in shared mem, then do a single warp-wide store dict_index[i].ptr = reinterpret_cast<const char *>(dict + pos + 4); dict_index[i].count = len; } } } void __host__ DecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block gpuDecodePageHeaders<<<dim_grid, dim_block, 0, stream.value()>>>(chunks, num_chunks); } void __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block gpuBuildStringDictionaryIndex<<<dim_grid, dim_block, 0, stream.value()>>>(chunks, num_chunks); } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
3b9632ea84f00e27075c6f1d8d2604569d3cdb57.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "basis_cuda.h" #include <ATen/hip/HIPContext.h> #include "utils.cuh" #define THREADS 1024 #define BLOCKS(N) (N + THREADS - 1) / THREADS template <typename scalar_t, int64_t degree> struct Basis { static inline __device__ scalar_t forward(scalar_t v, int64_t k_mod) { if (degree == 1) { return 1. - v - k_mod + 2. * v * k_mod; } else if (degree == 2) { if (k_mod == 0) return 0.5 * v * v - v + 0.5; else if (k_mod == 1) return -v * v + v + 0.5; else return 0.5 * v * v; } else if (degree == 3) { if (k_mod == 0) return (1. - v) * (1. - v) * (1. - v) / 6.; else if (k_mod == 1) return (3. * v * v * v - 6. * v * v + 4.) / 6.; else if (k_mod == 2) return (-3. * v * v * v + 3. * v * v + 3. * v + 1.) / 6.; else return v * v * v / 6.; } else { return (scalar_t)-1.; } } static inline __device__ scalar_t backward(scalar_t v, int64_t k_mod) { if (degree == 1) { return 2 * k_mod - 1; } else if (degree == 2) { if (k_mod == 0) return v - 1.; else if (k_mod == 1) return -2. * v + 1.; else return v; } else if (degree == 3) { if (k_mod == 0) return (-v * v + 2. * v - 1.) / 2.; else if (k_mod == 1) return (3. * v * v - 4. * v) / 2.; else if (k_mod == 2) return (-3. * v * v + 2. * v + 1.) / 2.; else return v * v / 2.; } else { return (scalar_t)-1.; } } }; template <typename scalar_t, int64_t degree> __global__ void spline_basis_fw_kernel(const scalar_t *pseudo, const int64_t *kernel_size, const uint8_t *is_open_spline, scalar_t *basis, int64_t *weight_index, int64_t E, int64_t D, int64_t S, int64_t numel) { const int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x; const int64_t e = thread_idx / S; const int64_t s = thread_idx % S; if (thread_idx < numel) { int64_t k = s, wi = 0, wi_offset = 1; scalar_t b = (scalar_t)1.; for (int64_t d = 0; d < D; d++) { const int64_t k_mod = k % (degree + 1); k /= degree + 1; scalar_t v = pseudo[e * D + d]; v *= kernel_size[d] - degree * is_open_spline[d]; wi += (((int64_t)v + k_mod) % kernel_size[d]) * wi_offset; wi_offset *= kernel_size[d]; v -= floor(v); v = Basis<scalar_t, degree>::forward(v, k_mod); b *= v; } basis[thread_idx] = b; weight_index[thread_idx] = wi; } } std::tuple<torch::Tensor, torch::Tensor> spline_basis_fw_cuda(torch::Tensor pseudo, torch::Tensor kernel_size, torch::Tensor is_open_spline, int64_t degree) { CHECK_CUDA(pseudo); CHECK_CUDA(kernel_size); CHECK_CUDA(is_open_spline); hipSetDevice(pseudo.get_device()); CHECK_INPUT(kernel_size.dim() == 1); CHECK_INPUT(pseudo.size(1) == kernel_size.numel()); CHECK_INPUT(is_open_spline.dim()); CHECK_INPUT(pseudo.size(1) == is_open_spline.numel()); auto E = pseudo.size(0); auto D = pseudo.size(1); auto S = (int64_t)(powf(degree + 1, D) + 0.5); auto basis = at::empty({E, S}, pseudo.options()); auto weight_index = at::empty({E, S}, kernel_size.options()); auto kernel_size_data = kernel_size.data_ptr<int64_t>(); auto is_open_spline_data = is_open_spline.data_ptr<uint8_t>(); auto weight_index_data = weight_index.data_ptr<int64_t>(); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES(pseudo.scalar_type(), "basis_fw", [&] { auto pseudo_data = pseudo.data_ptr<scalar_t>(); auto basis_data = basis.data_ptr<scalar_t>(); AT_DISPATCH_DEGREE_TYPES(degree, [&] { hipLaunchKernelGGL(( spline_basis_fw_kernel<scalar_t, DEGREE>) , dim3(BLOCKS(basis.numel())), dim3(THREADS), 0, stream, pseudo_data, kernel_size_data, is_open_spline_data, basis_data, weight_index_data, E, D, S, basis.numel()); }); }); return std::make_tuple(basis, weight_index); } template <typename scalar_t, int64_t degree> __global__ void spline_basis_bw_kernel(const scalar_t *grad_basis, const scalar_t *pseudo, const int64_t *kernel_size, const uint8_t *is_open_spline, scalar_t *grad_pseudo, int64_t E, int64_t D, int64_t S, int64_t numel) { const int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x; const int64_t e = thread_idx / D; const int64_t d = thread_idx % D; if (thread_idx < numel) { scalar_t g = (scalar_t)0., tmp; for (ptrdiff_t s = 0; s < S; s++) { int64_t k_mod = (s / (int64_t)(powf(degree + 1, d) + 0.5)) % (degree + 1); scalar_t v = pseudo[e * D + d]; v *= kernel_size[d] - degree * is_open_spline[d]; v -= floor(v); v = Basis<scalar_t, degree>::backward(v, k_mod); tmp = v; for (int64_t d_it = 1; d_it < D; d_it++) { const int64_t d_new = d_it - (d >= d_it); k_mod = (s / (int64_t)(powf(degree + 1, d_new) + 0.5)) % (degree + 1); v = pseudo[e * D + d_new]; v *= kernel_size[d_new] - degree * is_open_spline[d_new]; v -= floor(v); v = Basis<scalar_t, degree>::forward(v, k_mod); tmp *= v; } g += tmp * grad_basis[e * S + s]; } g *= kernel_size[d] - degree * is_open_spline[d]; grad_pseudo[thread_idx] = g; } } torch::Tensor spline_basis_bw_cuda(torch::Tensor grad_basis, torch::Tensor pseudo, torch::Tensor kernel_size, torch::Tensor is_open_spline, int64_t degree) { CHECK_CUDA(grad_basis); CHECK_CUDA(pseudo); CHECK_CUDA(kernel_size); CHECK_CUDA(is_open_spline); hipSetDevice(grad_basis.get_device()); CHECK_INPUT(grad_basis.size(0) == pseudo.size(0)); CHECK_INPUT(kernel_size.dim() == 1); CHECK_INPUT(pseudo.size(1) == kernel_size.numel()); CHECK_INPUT(is_open_spline.dim()); CHECK_INPUT(pseudo.size(1) == is_open_spline.numel()); auto E = pseudo.size(0); auto D = pseudo.size(1); auto S = grad_basis.size(1); auto grad_pseudo = at::empty({E, D}, pseudo.options()); auto kernel_size_data = kernel_size.data_ptr<int64_t>(); auto is_open_spline_data = is_open_spline.data_ptr<uint8_t>(); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES(pseudo.scalar_type(), "basis_bw", [&] { auto grad_basis_data = grad_basis.data_ptr<scalar_t>(); auto pseudo_data = pseudo.data_ptr<scalar_t>(); auto grad_pseudo_data = grad_pseudo.data_ptr<scalar_t>(); AT_DISPATCH_DEGREE_TYPES(degree, [&] { hipLaunchKernelGGL(( spline_basis_bw_kernel<scalar_t, DEGREE>) , dim3(BLOCKS(grad_pseudo.numel())), dim3(THREADS), 0, stream, grad_basis_data, pseudo_data, kernel_size_data, is_open_spline_data, grad_pseudo_data, E, D, S, grad_pseudo.numel()); }); }); return grad_pseudo; }
3b9632ea84f00e27075c6f1d8d2604569d3cdb57.cu
#include "basis_cuda.h" #include <ATen/cuda/CUDAContext.h> #include "utils.cuh" #define THREADS 1024 #define BLOCKS(N) (N + THREADS - 1) / THREADS template <typename scalar_t, int64_t degree> struct Basis { static inline __device__ scalar_t forward(scalar_t v, int64_t k_mod) { if (degree == 1) { return 1. - v - k_mod + 2. * v * k_mod; } else if (degree == 2) { if (k_mod == 0) return 0.5 * v * v - v + 0.5; else if (k_mod == 1) return -v * v + v + 0.5; else return 0.5 * v * v; } else if (degree == 3) { if (k_mod == 0) return (1. - v) * (1. - v) * (1. - v) / 6.; else if (k_mod == 1) return (3. * v * v * v - 6. * v * v + 4.) / 6.; else if (k_mod == 2) return (-3. * v * v * v + 3. * v * v + 3. * v + 1.) / 6.; else return v * v * v / 6.; } else { return (scalar_t)-1.; } } static inline __device__ scalar_t backward(scalar_t v, int64_t k_mod) { if (degree == 1) { return 2 * k_mod - 1; } else if (degree == 2) { if (k_mod == 0) return v - 1.; else if (k_mod == 1) return -2. * v + 1.; else return v; } else if (degree == 3) { if (k_mod == 0) return (-v * v + 2. * v - 1.) / 2.; else if (k_mod == 1) return (3. * v * v - 4. * v) / 2.; else if (k_mod == 2) return (-3. * v * v + 2. * v + 1.) / 2.; else return v * v / 2.; } else { return (scalar_t)-1.; } } }; template <typename scalar_t, int64_t degree> __global__ void spline_basis_fw_kernel(const scalar_t *pseudo, const int64_t *kernel_size, const uint8_t *is_open_spline, scalar_t *basis, int64_t *weight_index, int64_t E, int64_t D, int64_t S, int64_t numel) { const int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x; const int64_t e = thread_idx / S; const int64_t s = thread_idx % S; if (thread_idx < numel) { int64_t k = s, wi = 0, wi_offset = 1; scalar_t b = (scalar_t)1.; for (int64_t d = 0; d < D; d++) { const int64_t k_mod = k % (degree + 1); k /= degree + 1; scalar_t v = pseudo[e * D + d]; v *= kernel_size[d] - degree * is_open_spline[d]; wi += (((int64_t)v + k_mod) % kernel_size[d]) * wi_offset; wi_offset *= kernel_size[d]; v -= floor(v); v = Basis<scalar_t, degree>::forward(v, k_mod); b *= v; } basis[thread_idx] = b; weight_index[thread_idx] = wi; } } std::tuple<torch::Tensor, torch::Tensor> spline_basis_fw_cuda(torch::Tensor pseudo, torch::Tensor kernel_size, torch::Tensor is_open_spline, int64_t degree) { CHECK_CUDA(pseudo); CHECK_CUDA(kernel_size); CHECK_CUDA(is_open_spline); cudaSetDevice(pseudo.get_device()); CHECK_INPUT(kernel_size.dim() == 1); CHECK_INPUT(pseudo.size(1) == kernel_size.numel()); CHECK_INPUT(is_open_spline.dim()); CHECK_INPUT(pseudo.size(1) == is_open_spline.numel()); auto E = pseudo.size(0); auto D = pseudo.size(1); auto S = (int64_t)(powf(degree + 1, D) + 0.5); auto basis = at::empty({E, S}, pseudo.options()); auto weight_index = at::empty({E, S}, kernel_size.options()); auto kernel_size_data = kernel_size.data_ptr<int64_t>(); auto is_open_spline_data = is_open_spline.data_ptr<uint8_t>(); auto weight_index_data = weight_index.data_ptr<int64_t>(); auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES(pseudo.scalar_type(), "basis_fw", [&] { auto pseudo_data = pseudo.data_ptr<scalar_t>(); auto basis_data = basis.data_ptr<scalar_t>(); AT_DISPATCH_DEGREE_TYPES(degree, [&] { spline_basis_fw_kernel<scalar_t, DEGREE> <<<BLOCKS(basis.numel()), THREADS, 0, stream>>>( pseudo_data, kernel_size_data, is_open_spline_data, basis_data, weight_index_data, E, D, S, basis.numel()); }); }); return std::make_tuple(basis, weight_index); } template <typename scalar_t, int64_t degree> __global__ void spline_basis_bw_kernel(const scalar_t *grad_basis, const scalar_t *pseudo, const int64_t *kernel_size, const uint8_t *is_open_spline, scalar_t *grad_pseudo, int64_t E, int64_t D, int64_t S, int64_t numel) { const int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x; const int64_t e = thread_idx / D; const int64_t d = thread_idx % D; if (thread_idx < numel) { scalar_t g = (scalar_t)0., tmp; for (ptrdiff_t s = 0; s < S; s++) { int64_t k_mod = (s / (int64_t)(powf(degree + 1, d) + 0.5)) % (degree + 1); scalar_t v = pseudo[e * D + d]; v *= kernel_size[d] - degree * is_open_spline[d]; v -= floor(v); v = Basis<scalar_t, degree>::backward(v, k_mod); tmp = v; for (int64_t d_it = 1; d_it < D; d_it++) { const int64_t d_new = d_it - (d >= d_it); k_mod = (s / (int64_t)(powf(degree + 1, d_new) + 0.5)) % (degree + 1); v = pseudo[e * D + d_new]; v *= kernel_size[d_new] - degree * is_open_spline[d_new]; v -= floor(v); v = Basis<scalar_t, degree>::forward(v, k_mod); tmp *= v; } g += tmp * grad_basis[e * S + s]; } g *= kernel_size[d] - degree * is_open_spline[d]; grad_pseudo[thread_idx] = g; } } torch::Tensor spline_basis_bw_cuda(torch::Tensor grad_basis, torch::Tensor pseudo, torch::Tensor kernel_size, torch::Tensor is_open_spline, int64_t degree) { CHECK_CUDA(grad_basis); CHECK_CUDA(pseudo); CHECK_CUDA(kernel_size); CHECK_CUDA(is_open_spline); cudaSetDevice(grad_basis.get_device()); CHECK_INPUT(grad_basis.size(0) == pseudo.size(0)); CHECK_INPUT(kernel_size.dim() == 1); CHECK_INPUT(pseudo.size(1) == kernel_size.numel()); CHECK_INPUT(is_open_spline.dim()); CHECK_INPUT(pseudo.size(1) == is_open_spline.numel()); auto E = pseudo.size(0); auto D = pseudo.size(1); auto S = grad_basis.size(1); auto grad_pseudo = at::empty({E, D}, pseudo.options()); auto kernel_size_data = kernel_size.data_ptr<int64_t>(); auto is_open_spline_data = is_open_spline.data_ptr<uint8_t>(); auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES(pseudo.scalar_type(), "basis_bw", [&] { auto grad_basis_data = grad_basis.data_ptr<scalar_t>(); auto pseudo_data = pseudo.data_ptr<scalar_t>(); auto grad_pseudo_data = grad_pseudo.data_ptr<scalar_t>(); AT_DISPATCH_DEGREE_TYPES(degree, [&] { spline_basis_bw_kernel<scalar_t, DEGREE> <<<BLOCKS(grad_pseudo.numel()), THREADS, 0, stream>>>( grad_basis_data, pseudo_data, kernel_size_data, is_open_spline_data, grad_pseudo_data, E, D, S, grad_pseudo.numel()); }); }); return grad_pseudo; }
97c42110809746767d6a69af82813834c1dae50a.hip
// !!! This is a file automatically generated by hipify!!! //#include "CudaSplitEncseg.h" //#include "CudaSplitEncsubface.h" //#include "CudaInsertPoint.h" //#include "CudaMesh.h" // ///* Host */ //// This function assumes the input encmarker has be set correctly //// in the initialization //void initSubfaceEncmarkers( // RealD& t_pointlist, // IntD& t_trifacelist, // TetHandleD& t_tri2tetlist, // IntD& t_subfaceencmarker, // IntD& t_tetlist, // int& numofsubface //) //{ // int numberofblocks = (ceil)((float)numofsubface / BLOCK_SIZE); // kernelMarkAllEncsubfaces << <numberofblocks, BLOCK_SIZE >> > ( // thrust::raw_pointer_cast(&t_pointlist[0]), // thrust::raw_pointer_cast(&t_trifacelist[0]), // thrust::raw_pointer_cast(&t_tri2tetlist[0]), // thrust::raw_pointer_cast(&t_subfaceencmarker[0]), // thrust::raw_pointer_cast(&t_tetlist[0]), // numofsubface // ); //} // //// This function splits the encroached subfaces iteratively //void splitEncsubfaces( // RealD& t_pointlist, // TriHandleD& t_point2trilist, // TetHandleD& t_point2tetlist, // PointTypeD& t_pointtypelist, // RealD& t_pointradius, // IntD& t_seglist, // TriHandleD& t_seg2trilist, // TetHandleD& t_seg2tetlist, // IntD& t_seg2parentidxlist, // IntD& t_segparentendpointidxlist, // TriStatusD& t_segstatus, // IntD& t_trifacelist, // TetHandleD& t_tri2tetlist, // TriHandleD& t_tri2trilist, // TriHandleD& t_tri2seglist, // IntD& t_tri2parentidxlist, // IntD& t_triid2parentoffsetlist, // IntD& t_triparentendpointidxlist, // TriStatusD& t_tristatus, // IntD& t_tetlist, // TetHandleD& t_neighborlist, // TriHandleD& t_tet2trilist, // TriHandleD& t_tet2seglist, // TetStatusD& t_tetstatus, // IntD& t_segencmarker, // IntD& t_subfaceencmarker, // int& numofpoints, // int& numofsubseg, // int& numofsubface, // int& numoftet, // MESHBH* behavior, // int iter_tet, // int debug_msg, // bool debug_error, // bool debug_timing //) //{ // int numberofencsubfaces; // number of encroached subfaces // IntD t_encsubfacelist; // IntD t_threadmarker; // // int code = 1; // int iteration = 0; // while (true) // { // // // Update the active encroached subface list. // // Exclude the empty ones (their markers have already been set to -1). // numberofencsubfaces = updateActiveListByMarker_Slot(t_subfaceencmarker, t_encsubfacelist, numofsubface); // if(debug_msg) printf(" Iteration #%d: number of encroached subfaces = %d\n", iteration, numberofencsubfaces); // if (numberofencsubfaces == 0) // break; // // t_threadmarker.resize(numberofencsubfaces); // thrust::fill(t_threadmarker.begin(), t_threadmarker.end(), 1); // // code = // insertPoint( // t_pointlist, // t_point2trilist, // t_point2tetlist, // t_pointtypelist, // t_pointradius, // t_seglist, // t_seg2trilist, // t_seg2tetlist, // t_seg2parentidxlist, // t_segparentendpointidxlist, // t_segstatus, // t_trifacelist, // t_tri2tetlist, // t_tri2trilist, // t_tri2seglist, // t_tri2parentidxlist, // t_triid2parentoffsetlist, // t_triparentendpointidxlist, // t_tristatus, // t_tetlist, // t_neighborlist, // t_tet2trilist, // t_tet2seglist, // t_tetstatus, // t_segencmarker, // t_subfaceencmarker, // t_encsubfacelist, // t_threadmarker, // numberofencsubfaces, // 0, // numberofencsubfaces, // 0, // split subface // numofpoints, // numofsubseg, // numofsubface, // numoftet, // behavior, // -1, // iteration, // iter_tet, // debug_msg, // debug_error, // debug_timing // ); // // if (!code) // break; // // splitEncsegs( // t_pointlist, // t_point2trilist, // t_point2tetlist, // t_pointtypelist, // t_pointradius, // t_seglist, // t_seg2trilist, // t_seg2tetlist, // t_seg2parentidxlist, // t_segparentendpointidxlist, // t_segstatus, // t_trifacelist, // t_tri2tetlist, // t_tri2trilist, // t_tri2seglist, // t_tri2parentidxlist, // t_triid2parentoffsetlist, // t_triparentendpointidxlist, // t_tristatus, // t_tetlist, // t_neighborlist, // t_tet2trilist, // t_tet2seglist, // t_tetstatus, // t_segencmarker, // t_subfaceencmarker, // numofpoints, // numofsubseg, // numofsubface, // numoftet, // behavior, // iteration, // iter_tet, // 0, // debug_error, // false // ); // // hipDeviceSynchronize(); // // iteration++; // } // // if (!code && debug_msg) // printf(" Ended with %d bad subface\n", numberofencsubfaces); //}
97c42110809746767d6a69af82813834c1dae50a.cu
//#include "CudaSplitEncseg.h" //#include "CudaSplitEncsubface.h" //#include "CudaInsertPoint.h" //#include "CudaMesh.h" // ///* Host */ //// This function assumes the input encmarker has be set correctly //// in the initialization //void initSubfaceEncmarkers( // RealD& t_pointlist, // IntD& t_trifacelist, // TetHandleD& t_tri2tetlist, // IntD& t_subfaceencmarker, // IntD& t_tetlist, // int& numofsubface //) //{ // int numberofblocks = (ceil)((float)numofsubface / BLOCK_SIZE); // kernelMarkAllEncsubfaces << <numberofblocks, BLOCK_SIZE >> > ( // thrust::raw_pointer_cast(&t_pointlist[0]), // thrust::raw_pointer_cast(&t_trifacelist[0]), // thrust::raw_pointer_cast(&t_tri2tetlist[0]), // thrust::raw_pointer_cast(&t_subfaceencmarker[0]), // thrust::raw_pointer_cast(&t_tetlist[0]), // numofsubface // ); //} // //// This function splits the encroached subfaces iteratively //void splitEncsubfaces( // RealD& t_pointlist, // TriHandleD& t_point2trilist, // TetHandleD& t_point2tetlist, // PointTypeD& t_pointtypelist, // RealD& t_pointradius, // IntD& t_seglist, // TriHandleD& t_seg2trilist, // TetHandleD& t_seg2tetlist, // IntD& t_seg2parentidxlist, // IntD& t_segparentendpointidxlist, // TriStatusD& t_segstatus, // IntD& t_trifacelist, // TetHandleD& t_tri2tetlist, // TriHandleD& t_tri2trilist, // TriHandleD& t_tri2seglist, // IntD& t_tri2parentidxlist, // IntD& t_triid2parentoffsetlist, // IntD& t_triparentendpointidxlist, // TriStatusD& t_tristatus, // IntD& t_tetlist, // TetHandleD& t_neighborlist, // TriHandleD& t_tet2trilist, // TriHandleD& t_tet2seglist, // TetStatusD& t_tetstatus, // IntD& t_segencmarker, // IntD& t_subfaceencmarker, // int& numofpoints, // int& numofsubseg, // int& numofsubface, // int& numoftet, // MESHBH* behavior, // int iter_tet, // int debug_msg, // bool debug_error, // bool debug_timing //) //{ // int numberofencsubfaces; // number of encroached subfaces // IntD t_encsubfacelist; // IntD t_threadmarker; // // int code = 1; // int iteration = 0; // while (true) // { // // // Update the active encroached subface list. // // Exclude the empty ones (their markers have already been set to -1). // numberofencsubfaces = updateActiveListByMarker_Slot(t_subfaceencmarker, t_encsubfacelist, numofsubface); // if(debug_msg) printf(" Iteration #%d: number of encroached subfaces = %d\n", iteration, numberofencsubfaces); // if (numberofencsubfaces == 0) // break; // // t_threadmarker.resize(numberofencsubfaces); // thrust::fill(t_threadmarker.begin(), t_threadmarker.end(), 1); // // code = // insertPoint( // t_pointlist, // t_point2trilist, // t_point2tetlist, // t_pointtypelist, // t_pointradius, // t_seglist, // t_seg2trilist, // t_seg2tetlist, // t_seg2parentidxlist, // t_segparentendpointidxlist, // t_segstatus, // t_trifacelist, // t_tri2tetlist, // t_tri2trilist, // t_tri2seglist, // t_tri2parentidxlist, // t_triid2parentoffsetlist, // t_triparentendpointidxlist, // t_tristatus, // t_tetlist, // t_neighborlist, // t_tet2trilist, // t_tet2seglist, // t_tetstatus, // t_segencmarker, // t_subfaceencmarker, // t_encsubfacelist, // t_threadmarker, // numberofencsubfaces, // 0, // numberofencsubfaces, // 0, // split subface // numofpoints, // numofsubseg, // numofsubface, // numoftet, // behavior, // -1, // iteration, // iter_tet, // debug_msg, // debug_error, // debug_timing // ); // // if (!code) // break; // // splitEncsegs( // t_pointlist, // t_point2trilist, // t_point2tetlist, // t_pointtypelist, // t_pointradius, // t_seglist, // t_seg2trilist, // t_seg2tetlist, // t_seg2parentidxlist, // t_segparentendpointidxlist, // t_segstatus, // t_trifacelist, // t_tri2tetlist, // t_tri2trilist, // t_tri2seglist, // t_tri2parentidxlist, // t_triid2parentoffsetlist, // t_triparentendpointidxlist, // t_tristatus, // t_tetlist, // t_neighborlist, // t_tet2trilist, // t_tet2seglist, // t_tetstatus, // t_segencmarker, // t_subfaceencmarker, // numofpoints, // numofsubseg, // numofsubface, // numoftet, // behavior, // iteration, // iter_tet, // 0, // debug_error, // false // ); // // cudaDeviceSynchronize(); // // iteration++; // } // // if (!code && debug_msg) // printf(" Ended with %d bad subface\n", numberofencsubfaces); //}
246c0f0c7fb454cf3d2792568ae8f88544c38ca5.hip
// !!! This is a file automatically generated by hipify!!! /* This is based on an example developed by Mark Harris for his NVIDIA blog: http://devblogs.nvidia.com/parallelforall/gpu-pro-tip-cuda-7-streams-simplify-concurrency/ -- I have changed it into a multithreaded implementation with timing */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <omp.h> #include <hip/hip_runtime.h> const int N = 1 << 20; __global__ void kernel(float *x, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = sqrt(pow(3.14159,i)); } } int main() { // initialise CUDA timing, and start timer float milli; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); float *h_data, *d_data; h_data = (float *) malloc(sizeof(float)); hipMalloc(&d_data, sizeof(float)); h_data[0] = 1.0f; // set up 8 OpenMP threads const int num_threads = 8; omp_set_num_threads(num_threads); float *data[num_threads]; // loop over num_threads for (int i = 0; i < num_threads; i++) hipMalloc(&data[i], N * sizeof(float)); #pragma omp parallel for for (int i = 0; i < num_threads; i++) { printf(" thread ID = %d \n",omp_get_thread_num()); // launch one worker kernel per thread hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, 0, data[i], N); } // wait for completion of all kernels hipDeviceSynchronize(); // stop timer and report execution time hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); printf("execution time (ms): %f \n",milli); hipDeviceReset(); return 0; }
246c0f0c7fb454cf3d2792568ae8f88544c38ca5.cu
/* This is based on an example developed by Mark Harris for his NVIDIA blog: http://devblogs.nvidia.com/parallelforall/gpu-pro-tip-cuda-7-streams-simplify-concurrency/ -- I have changed it into a multithreaded implementation with timing */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <omp.h> #include <cuda.h> const int N = 1 << 20; __global__ void kernel(float *x, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = sqrt(pow(3.14159,i)); } } int main() { // initialise CUDA timing, and start timer float milli; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); float *h_data, *d_data; h_data = (float *) malloc(sizeof(float)); cudaMalloc(&d_data, sizeof(float)); h_data[0] = 1.0f; // set up 8 OpenMP threads const int num_threads = 8; omp_set_num_threads(num_threads); float *data[num_threads]; // loop over num_threads for (int i = 0; i < num_threads; i++) cudaMalloc(&data[i], N * sizeof(float)); #pragma omp parallel for for (int i = 0; i < num_threads; i++) { printf(" thread ID = %d \n",omp_get_thread_num()); // launch one worker kernel per thread kernel<<<1, 64>>>(data[i], N); } // wait for completion of all kernels cudaDeviceSynchronize(); // stop timer and report execution time cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); printf("execution time (ms): %f \n",milli); cudaDeviceReset(); return 0; }
cb9d32f38c5072bc7ef3c81b615a32c181ea8723.hip
// !!! This is a file automatically generated by hipify!!! /* entrySearch * * An algorithm to find the minimum or maximum value of an array using a GPU. * This program searches an array of reals of arbitrary length and returns the * minimum or maximum value of the array. * * Adam J. Sierakowski, JHU/APL 2011 */ #include "entrySearch.h" #include <hip/hip_runtime.h> #include <helper_cuda.h> #define MAXTHREADS 128 #define MAXBLOCKS 64 /* A bitwise function to determine the maximum exponent x that satisfies the * inequality 2^x < n. */ int floorLog2(unsigned int n) { int pos = 0; if (n >= 1<<16) { n >>= 16; pos += 16; } if (n >= 1<< 8) { n >>= 8; pos += 8; } if (n >= 1<< 4) { n >>= 4; pos += 4; } if (n >= 1<< 2) { n >>= 2; pos += 2; } if (n >= 1<< 1) { pos += 1; } return ((n == 0) ? (-1) : pos); } /* A bitwise function to determine the minimum number n that satisfies the * inequality n > x, where n = 2^a for arbitrary a. */ unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* A function to determine the proper number of blocks and threads into which * the array should be split when parallelized on the GPU. */ void getNumBlocksAndThreads(int n, int &blocks, int &threads) { threads = (n < MAXTHREADS * 2) ? nextPow2((n + 1) / 2): MAXTHREADS; blocks = (n + threads * 2 - 1) / (threads * 2); } /* A function to create random input data on CPU for program testing. During * generation, the minimum value is recorded and returned for use in verifying * the GPU test result. */ void randArrGen(int size, real *arr, real* minmax) { srand(time(NULL)); for(int i=0; i<size; i++) { arr[i] = (rand() % size) - size / 2; if (arr[i] < minmax[0]) { minmax[0] = arr[i]; } if (arr[i] > minmax[1]) { minmax[1] = arr[i]; } } } /* The base function of the minimum search algorithm. */ real find_min(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_minarr = NULL; checkCudaErrors(hipMalloc((void**)&d_minarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel hipLaunchKernelGGL(( entrySearch_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_minarr, size); getLastCudaError("Kernel execution failed."); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the minimum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_minarr, d_minarr, size); getLastCudaError("Kernel execution failed."); hipDeviceSynchronize(); } // grab final answer real min; checkCudaErrors(hipMemcpy(&min, d_minarr, sizeof(real), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_minarr)); return min; } /* The base function of the maximum search algorithm. */ real find_max(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; checkCudaErrors(hipMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel hipLaunchKernelGGL(( entrySearch_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_maxarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); hipDeviceSynchronize(); } // grab final answer real max; checkCudaErrors(hipMemcpy(&max, d_maxarr, sizeof(real), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_maxarr)); return max; } /* The base function of the maximum search algorithm. */ real find_max_mag(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; checkCudaErrors(hipMalloc((void**)&d_maxarr, 10*sizeof(real))); checkCudaErrors(hipMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel hipLaunchKernelGGL(( entrySearch_max_mag_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_max_mag_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_maxarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); hipDeviceSynchronize(); } // grab final answer real max; checkCudaErrors(hipMemcpy(&max, d_maxarr, sizeof(real), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_maxarr)); return max; } /* The base function of the average algorithm. */ real avg_entries(int size, real *d_iarr) { int blocks = 0; int threads = 0; int size_in = size; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; checkCudaErrors(hipMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel hipLaunchKernelGGL(( entrySearch_avg_entries_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_avg_entries_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_maxarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); hipDeviceSynchronize(); } // grab final answer real max; checkCudaErrors(hipMemcpy(&max, d_maxarr, sizeof(real), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_maxarr)); return max / size_in; } /* The base function of the sum algorithm. */ real sum_entries(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; checkCudaErrors(hipMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; hipDeviceSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel hipLaunchKernelGGL(( entrySearch_avg_entries_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_iarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); hipDeviceSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); hipLaunchKernelGGL(( entrySearch_avg_entries_kernel), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_maxarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); hipDeviceSynchronize(); } // grab final answer real max; checkCudaErrors(hipMemcpy(&max, d_maxarr, sizeof(real), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_maxarr)); return max; } /* The main test function that creates a test array of random values and calls * find_min(...). It displays both the known result as maintained through the * CPU-generated array and the GPU test result. */ /*int main(int argc, char** argv) { hipDeviceProp_t deviceProp; deviceProp.major = 1; deviceProp.minor = 0; // force use of device number zero int dev = 0; checkCudaErrors(hipSetDevice(dev)); checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); printf("\nUsing device %d: \"%s\"\n", dev, deviceProp.name); checkCudaErrors(hipSetDevice(dev)); // number of elements to reduce int size = pow(2, 23); printf("\nSearching %d randomly-generated elements", size); printf(" for the minimum value...\n"); // create random input data on CPU real* h_arr = (real*) malloc(size * sizeof(real)); cpumem += size * sizeof(real); real* minmax = (real*) malloc(2 * sizeof(real)); cpumem += 2 * sizeof(real); randArrGen(size, h_arr, minmax); // load host data to device int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(size, numBlocks, numThreads); unsigned int inbytes = size * sizeof(real); real* d_iarr = NULL; checkCudaErrors(hipMalloc((void**)&d_iarr, inbytes)); gpumem += in_bytes; checkCudaErrors(hipMemcpy(d_iarr, h_arr, inbytes, hipMemcpyHostToDevice)); // run test real gpu_result = 0; int numcount = 100; // the number of iterations to test // timing stuff hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); real elapsedTime; // run GPU test printf("\nComputing GPU result %d times...\n", numcount); hipEventRecord(start, 0); for(int count = 0; count < numcount; count++) { gpu_result = find_min(size, d_iarr); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // run CPU test printf("\nComputing CPU result %d times...\n", numcount); hipEventRecord(start, 0); real cpu_result = size * 2; for(int count = 0; count < numcount; count++) { for(int z = 0; z < size; z++) { if(h_arr[z] < cpu_result) { cpu_result = h_arr[z]; } } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // final minimum values printf("\nKnown result = %0.0f\n", minmax[0]); printf("CPU result = %0.0f\n", cpu_result); printf("GPU result = %0.0f\n", gpu_result); printf("\nSearching %d randomly-generated elements", size); printf(" for the maximum value...\n"); // run GPU test printf("\nComputing GPU result %d times...\n", numcount); hipEventRecord(start, 0); for(int count = 0; count < numcount; count++) { gpu_result = find_max(size, d_iarr); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // run CPU test printf("\nComputing CPU result %d times...\n", numcount); hipEventRecord(start, 0); cpu_result = -size * 2; for(int count = 0; count < numcount; count++) { for(int z = 0; z < size; z++) { if(h_arr[z] > cpu_result) { cpu_result = h_arr[z]; } } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // final maximum values printf("\nKnown result = %0.0f\n", minmax[1]); printf("CPU result = %0.0f\n", cpu_result); printf("GPU result = %0.0f\n", gpu_result); // clean up checkCudaErrors(hipFree(d_iarr)); free(h_arr); free(minmax); hipDeviceReset(); cutilExit(argc, argv); } */
cb9d32f38c5072bc7ef3c81b615a32c181ea8723.cu
/* entrySearch * * An algorithm to find the minimum or maximum value of an array using a GPU. * This program searches an array of reals of arbitrary length and returns the * minimum or maximum value of the array. * * Adam J. Sierakowski, JHU/APL 2011 */ #include "entrySearch.h" #include <cuda.h> #include <helper_cuda.h> #define MAXTHREADS 128 #define MAXBLOCKS 64 /* A bitwise function to determine the maximum exponent x that satisfies the * inequality 2^x < n. */ int floorLog2(unsigned int n) { int pos = 0; if (n >= 1<<16) { n >>= 16; pos += 16; } if (n >= 1<< 8) { n >>= 8; pos += 8; } if (n >= 1<< 4) { n >>= 4; pos += 4; } if (n >= 1<< 2) { n >>= 2; pos += 2; } if (n >= 1<< 1) { pos += 1; } return ((n == 0) ? (-1) : pos); } /* A bitwise function to determine the minimum number n that satisfies the * inequality n > x, where n = 2^a for arbitrary a. */ unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* A function to determine the proper number of blocks and threads into which * the array should be split when parallelized on the GPU. */ void getNumBlocksAndThreads(int n, int &blocks, int &threads) { threads = (n < MAXTHREADS * 2) ? nextPow2((n + 1) / 2): MAXTHREADS; blocks = (n + threads * 2 - 1) / (threads * 2); } /* A function to create random input data on CPU for program testing. During * generation, the minimum value is recorded and returned for use in verifying * the GPU test result. */ void randArrGen(int size, real *arr, real* minmax) { srand(time(NULL)); for(int i=0; i<size; i++) { arr[i] = (rand() % size) - size / 2; if (arr[i] < minmax[0]) { minmax[0] = arr[i]; } if (arr[i] > minmax[1]) { minmax[1] = arr[i]; } } } /* The base function of the minimum search algorithm. */ real find_min(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_minarr = NULL; checkCudaErrors(cudaMalloc((void**)&d_minarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel entrySearch_min_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_minarr, size); getLastCudaError("Kernel execution failed."); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the minimum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_min_kernel<<<dimGrid, dimBlock, smemSize>>>(d_minarr, d_minarr, size); getLastCudaError("Kernel execution failed."); cudaThreadSynchronize(); } // grab final answer real min; checkCudaErrors(cudaMemcpy(&min, d_minarr, sizeof(real), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_minarr)); return min; } /* The base function of the maximum search algorithm. */ real find_max(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; checkCudaErrors(cudaMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel entrySearch_max_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_max_kernel<<<dimGrid, dimBlock, smemSize>>>(d_maxarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); cudaThreadSynchronize(); } // grab final answer real max; checkCudaErrors(cudaMemcpy(&max, d_maxarr, sizeof(real), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_maxarr)); return max; } /* The base function of the maximum search algorithm. */ real find_max_mag(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; checkCudaErrors(cudaMalloc((void**)&d_maxarr, 10*sizeof(real))); checkCudaErrors(cudaMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel entrySearch_max_mag_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_max_mag_kernel<<<dimGrid, dimBlock, smemSize>>>(d_maxarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); cudaThreadSynchronize(); } // grab final answer real max; checkCudaErrors(cudaMemcpy(&max, d_maxarr, sizeof(real), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_maxarr)); return max; } /* The base function of the average algorithm. */ real avg_entries(int size, real *d_iarr) { int blocks = 0; int threads = 0; int size_in = size; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; checkCudaErrors(cudaMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel entrySearch_avg_entries_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_avg_entries_kernel<<<dimGrid, dimBlock, smemSize>>>(d_maxarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); cudaThreadSynchronize(); } // grab final answer real max; checkCudaErrors(cudaMemcpy(&max, d_maxarr, sizeof(real), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_maxarr)); return max / size_in; } /* The base function of the sum algorithm. */ real sum_entries(int size, real *d_iarr) { int blocks = 0; int threads = 0; getNumBlocksAndThreads(size, blocks, threads); // create minarr on device int h_bytes = blocks * sizeof(real); real *d_maxarr = NULL; checkCudaErrors(cudaMalloc((void**)&d_maxarr, h_bytes)); gpumem += h_bytes; cudaThreadSynchronize(); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(real); // run kernel entrySearch_avg_entries_kernel<<<dimGrid, dimBlock, smemSize>>>(d_iarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); cudaThreadSynchronize(); // if there was more than one block, re-run the kernel on the maximum values // from each of the blocks, which now reside in the first block_number indices // in d_minarr while(blocks > 1) { // use only the first block_number indices in min_arr size = blocks; getNumBlocksAndThreads(size, blocks, threads); entrySearch_avg_entries_kernel<<<dimGrid, dimBlock, smemSize>>>(d_maxarr, d_maxarr, size); getLastCudaError("Kernel execution failed."); cudaThreadSynchronize(); } // grab final answer real max; checkCudaErrors(cudaMemcpy(&max, d_maxarr, sizeof(real), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_maxarr)); return max; } /* The main test function that creates a test array of random values and calls * find_min(...). It displays both the known result as maintained through the * CPU-generated array and the GPU test result. */ /*int main(int argc, char** argv) { cudaDeviceProp deviceProp; deviceProp.major = 1; deviceProp.minor = 0; // force use of device number zero int dev = 0; checkCudaErrors(cudaSetDevice(dev)); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); printf("\nUsing device %d: \"%s\"\n", dev, deviceProp.name); checkCudaErrors(cudaSetDevice(dev)); // number of elements to reduce int size = pow(2, 23); printf("\nSearching %d randomly-generated elements", size); printf(" for the minimum value...\n"); // create random input data on CPU real* h_arr = (real*) malloc(size * sizeof(real)); cpumem += size * sizeof(real); real* minmax = (real*) malloc(2 * sizeof(real)); cpumem += 2 * sizeof(real); randArrGen(size, h_arr, minmax); // load host data to device int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(size, numBlocks, numThreads); unsigned int inbytes = size * sizeof(real); real* d_iarr = NULL; checkCudaErrors(cudaMalloc((void**)&d_iarr, inbytes)); gpumem += in_bytes; checkCudaErrors(cudaMemcpy(d_iarr, h_arr, inbytes, cudaMemcpyHostToDevice)); // run test real gpu_result = 0; int numcount = 100; // the number of iterations to test // timing stuff cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); real elapsedTime; // run GPU test printf("\nComputing GPU result %d times...\n", numcount); cudaEventRecord(start, 0); for(int count = 0; count < numcount; count++) { gpu_result = find_min(size, d_iarr); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // run CPU test printf("\nComputing CPU result %d times...\n", numcount); cudaEventRecord(start, 0); real cpu_result = size * 2; for(int count = 0; count < numcount; count++) { for(int z = 0; z < size; z++) { if(h_arr[z] < cpu_result) { cpu_result = h_arr[z]; } } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // final minimum values printf("\nKnown result = %0.0f\n", minmax[0]); printf("CPU result = %0.0f\n", cpu_result); printf("GPU result = %0.0f\n", gpu_result); printf("\nSearching %d randomly-generated elements", size); printf(" for the maximum value...\n"); // run GPU test printf("\nComputing GPU result %d times...\n", numcount); cudaEventRecord(start, 0); for(int count = 0; count < numcount; count++) { gpu_result = find_max(size, d_iarr); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // run CPU test printf("\nComputing CPU result %d times...\n", numcount); cudaEventRecord(start, 0); cpu_result = -size * 2; for(int count = 0; count < numcount; count++) { for(int z = 0; z < size; z++) { if(h_arr[z] > cpu_result) { cpu_result = h_arr[z]; } } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("...completed in %0.0f ms.\n", elapsedTime); // final maximum values printf("\nKnown result = %0.0f\n", minmax[1]); printf("CPU result = %0.0f\n", cpu_result); printf("GPU result = %0.0f\n", gpu_result); // clean up checkCudaErrors(cudaFree(d_iarr)); free(h_arr); free(minmax); cudaThreadExit(); cutilExit(argc, argv); } */
e0e1719fbcc2ae3dec5317bbbcff2d23a6007b2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdlib.h> #include<stdio.h> #include<time.h> using namespace std; __global__ void mul(int *d_in1,int *d_in2,int *d_out){ int idx = threadIdx.x; d_out[idx] = d_in1[idx]*d_in2[idx]; } __global__ void reduce_section(int *d_in,int &d_out,const int start,const int end){ int idx = threadIdx.x; extern __shared__ int s_out[]; s_out[idx] = d_in[start+idx]; __syncthreads(); int out; for(int step=1;step<end-start;step*=2){ if(idx-step>=0){ out = s_out[idx]+s_out[idx-1]; } __syncthreads(); if(idx-step>=0) s_out[idx] = out; __syncthreads(); } if(idx == end-start-1) d_out = s_out[idx]; } int main(){ const int size = 6; int value[size] = {1,2,3,4,5,6}; int cols[size] = {0,2,1,0,1,0}; int rows[5] = {0,2,3,5,size};// int mul_val[3] = {1,2,3}; int mul_valn[size];// printf(":\n"); int flag = 0; for(int i=0;i<4;i++){ for(int i=0;i<3;i++){ if(i == cols[flag]) printf("%d ",value[flag++]); else printf("0 "); } printf("\n"); } printf("\n:\n"); for(int i=0;i<3;i++){ printf("%d\n",mul_val[i]); } printf("\n"); for(int i=0;i<size;i++){ mul_valn[i] = mul_val[cols[i]]; } int *h_in1 = value; int *h_in2 = mul_valn; int *h_out; int *d_in1; int *d_in2; int *d_out_mid; int *d_out; h_out = (int *)malloc(4*sizeof(int)); hipMalloc((int **)&d_in1,size*sizeof(int)); hipMalloc((int **)&d_in2,size*sizeof(int)); hipMalloc((int **)&d_out,4*sizeof(int)); hipMalloc((int **)&d_out_mid,size*sizeof(int)); hipMemcpy(d_in1,h_in1,size*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(d_in2,h_in2,size*sizeof(int),hipMemcpyHostToDevice); dim3 thread(size); hipLaunchKernelGGL(( mul), dim3(1),dim3(thread), 0, 0, d_in1,d_in2,d_out_mid); for(int i=1;i<5;i++){ int sizenew = rows[i]-rows[i-1]; dim3 threadnew(sizenew); hipLaunchKernelGGL(( reduce_section), dim3(1),dim3(threadnew),sizenew, 0, d_out_mid,d_out[i-1],rows[i-1],rows[i]); } hipMemcpy(h_out,d_out,4*sizeof(int),hipMemcpyDeviceToHost); printf(":\n"); for(int i=0;i<4;i++){ printf("%d\n",h_out[i]); } printf("\n"); free(h_out); hipFree(d_in1); hipFree(d_in2); hipFree(d_out_mid); hipFree(d_out); return 0; }
e0e1719fbcc2ae3dec5317bbbcff2d23a6007b2a.cu
#include<stdlib.h> #include<stdio.h> #include<time.h> using namespace std; __global__ void mul(int *d_in1,int *d_in2,int *d_out){ int idx = threadIdx.x; d_out[idx] = d_in1[idx]*d_in2[idx]; } __global__ void reduce_section(int *d_in,int &d_out,const int start,const int end){ int idx = threadIdx.x; extern __shared__ int s_out[]; s_out[idx] = d_in[start+idx]; __syncthreads(); int out; for(int step=1;step<end-start;step*=2){ if(idx-step>=0){ out = s_out[idx]+s_out[idx-1]; } __syncthreads(); if(idx-step>=0) s_out[idx] = out; __syncthreads(); } if(idx == end-start-1) d_out = s_out[idx]; } int main(){ const int size = 6; int value[size] = {1,2,3,4,5,6}; int cols[size] = {0,2,1,0,1,0}; int rows[5] = {0,2,3,5,size};//最后一个元素记录非零元素个数 int mul_val[3] = {1,2,3}; int mul_valn[size];//非零元素相乘的对应元素 printf("左矩阵:\n"); int flag = 0; for(int i=0;i<4;i++){ for(int i=0;i<3;i++){ if(i == cols[flag]) printf("%d ",value[flag++]); else printf("0 "); } printf("\n"); } printf("\n右矩阵:\n"); for(int i=0;i<3;i++){ printf("%d\n",mul_val[i]); } printf("\n"); for(int i=0;i<size;i++){ mul_valn[i] = mul_val[cols[i]]; } int *h_in1 = value; int *h_in2 = mul_valn; int *h_out; int *d_in1; int *d_in2; int *d_out_mid; int *d_out; h_out = (int *)malloc(4*sizeof(int)); cudaMalloc((int **)&d_in1,size*sizeof(int)); cudaMalloc((int **)&d_in2,size*sizeof(int)); cudaMalloc((int **)&d_out,4*sizeof(int)); cudaMalloc((int **)&d_out_mid,size*sizeof(int)); cudaMemcpy(d_in1,h_in1,size*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_in2,h_in2,size*sizeof(int),cudaMemcpyHostToDevice); dim3 thread(size); mul<<<1,thread>>>(d_in1,d_in2,d_out_mid); for(int i=1;i<5;i++){ int sizenew = rows[i]-rows[i-1]; dim3 threadnew(sizenew); reduce_section<<<1,threadnew,sizenew>>>(d_out_mid,d_out[i-1],rows[i-1],rows[i]); } cudaMemcpy(h_out,d_out,4*sizeof(int),cudaMemcpyDeviceToHost); printf("结果:\n"); for(int i=0;i<4;i++){ printf("%d\n",h_out[i]); } printf("\n"); free(h_out); cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_out_mid); cudaFree(d_out); return 0; }
6b4d54843c1af8737054ad293fa4f7de1efd7663.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<math.h> #include<time.h> #include<stdexcept> #include<iostream> #include<cstdlib> //for abs(x) #include<stdio.h> using namespace std; __global__ void kernel_multiplication( int* A, int* B, int* C,int N,int M); int main() { int NUMBER_OF_ELEMENTS; int VECTOR_SIZE; cout<<"Enter the vector size:"; cin>>VECTOR_SIZE; NUMBER_OF_ELEMENTS=VECTOR_SIZE; int SIZE = NUMBER_OF_ELEMENTS*sizeof(int); hipEvent_t start,end,start1,end1; int* hostA = (int*)malloc(VECTOR_SIZE*sizeof(int)); int* hostB = (int*)malloc(SIZE*VECTOR_SIZE*sizeof(int)); int* hostC = (int*)malloc(VECTOR_SIZE*sizeof(int)); int* deviceA,*deviceB,*deviceC; srand(time(0)); int i,j; cout<<"\nVector:\n"; for(i=0;i<VECTOR_SIZE;i++) { hostA[i] = rand()%VECTOR_SIZE; cout<<hostA[i]<<"\t"; } //initialize matrix by random elements for(i=0;i<NUMBER_OF_ELEMENTS;i++) { for(j=0;j<VECTOR_SIZE;j++) { hostB[i*VECTOR_SIZE+j] = rand()%VECTOR_SIZE; } } cout<<"\nMatrix=\n"; for(i=0;i<NUMBER_OF_ELEMENTS;i++) { for(j=0;j<VECTOR_SIZE;j++) { cout<<hostB[i*VECTOR_SIZE+j]<<"\t"; } cout<<"\n"; } hipMalloc(&deviceA,VECTOR_SIZE*sizeof(int)); hipMalloc(&deviceB,NUMBER_OF_ELEMENTS*VECTOR_SIZE*sizeof(int)); hipMalloc(&deviceC,VECTOR_SIZE*sizeof(int)); hipEventCreate(&start); hipEventCreate(&end); hipEventCreate(&start1); hipEventCreate(&end1); hipEventRecord(start); hipMemcpy(deviceA,hostA,VECTOR_SIZE*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(deviceB,hostB,SIZE*VECTOR_SIZE,hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_multiplication), dim3(NUMBER_OF_ELEMENTS),dim3(1), 0, 0, deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS,VECTOR_SIZE); hipDeviceSynchronize(); hipMemcpy(hostC,deviceC,VECTOR_SIZE*sizeof(int),hipMemcpyDeviceToHost); hipEventRecord(end); hipEventSynchronize(end); float t=0; hipEventElapsedTime(&t,start,end); hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); double error = 0; int* answer = (int*) malloc(VECTOR_SIZE*sizeof(int)); hipEventRecord(start1); for(int i=0;i<NUMBER_OF_ELEMENTS;i++) { int sum = 0; for(int j=0;j<VECTOR_SIZE;j++) { sum += hostA[j]*hostB[j*VECTOR_SIZE+i]; } answer[i] = sum; } for(int k=0;k<VECTOR_SIZE;k++) { cout<<k<<")"<< "Expected value = "<<answer[k]<<" Actual value = "<<hostC[k]<<"\n"; error += double(abs(answer[k]-hostC[k])); } error=sqrt(error); cout<<"error = "<<error<<"\n"; hipEventRecord(end1); hipEventSynchronize(end1); float t1=0; hipEventElapsedTime(&t1,start1,end1); cout<<"\nSequential time="<<t1; cout<<"\nParallel time="<<t<<endl; delete[] hostA; delete[] hostB; delete[] hostC; return hipDeviceSynchronize(); } __global__ void kernel_multiplication( int* A, int* B, int* C, int N,int M) { int index = threadIdx.x + blockIdx.x * blockDim.x; int sum = 0; if(index<N) { for(int i=0;i<M;i++) sum+=A[i]*B[(i*M)+index]; C[index] = sum; } }
6b4d54843c1af8737054ad293fa4f7de1efd7663.cu
#include<math.h> #include<time.h> #include<stdexcept> #include<iostream> #include<cstdlib> //for abs(x) #include<stdio.h> using namespace std; __global__ void kernel_multiplication( int* A, int* B, int* C,int N,int M); int main() { int NUMBER_OF_ELEMENTS; int VECTOR_SIZE; cout<<"Enter the vector size:"; cin>>VECTOR_SIZE; NUMBER_OF_ELEMENTS=VECTOR_SIZE; int SIZE = NUMBER_OF_ELEMENTS*sizeof(int); cudaEvent_t start,end,start1,end1; int* hostA = (int*)malloc(VECTOR_SIZE*sizeof(int)); int* hostB = (int*)malloc(SIZE*VECTOR_SIZE*sizeof(int)); int* hostC = (int*)malloc(VECTOR_SIZE*sizeof(int)); int* deviceA,*deviceB,*deviceC; srand(time(0)); int i,j; cout<<"\nVector:\n"; for(i=0;i<VECTOR_SIZE;i++) { hostA[i] = rand()%VECTOR_SIZE; cout<<hostA[i]<<"\t"; } //initialize matrix by random elements for(i=0;i<NUMBER_OF_ELEMENTS;i++) { for(j=0;j<VECTOR_SIZE;j++) { hostB[i*VECTOR_SIZE+j] = rand()%VECTOR_SIZE; } } cout<<"\nMatrix=\n"; for(i=0;i<NUMBER_OF_ELEMENTS;i++) { for(j=0;j<VECTOR_SIZE;j++) { cout<<hostB[i*VECTOR_SIZE+j]<<"\t"; } cout<<"\n"; } cudaMalloc(&deviceA,VECTOR_SIZE*sizeof(int)); cudaMalloc(&deviceB,NUMBER_OF_ELEMENTS*VECTOR_SIZE*sizeof(int)); cudaMalloc(&deviceC,VECTOR_SIZE*sizeof(int)); cudaEventCreate(&start); cudaEventCreate(&end); cudaEventCreate(&start1); cudaEventCreate(&end1); cudaEventRecord(start); cudaMemcpy(deviceA,hostA,VECTOR_SIZE*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(deviceB,hostB,SIZE*VECTOR_SIZE,cudaMemcpyHostToDevice); kernel_multiplication<<<NUMBER_OF_ELEMENTS,1>>>(deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS,VECTOR_SIZE); cudaDeviceSynchronize(); cudaMemcpy(hostC,deviceC,VECTOR_SIZE*sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(end); cudaEventSynchronize(end); float t=0; cudaEventElapsedTime(&t,start,end); cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); double error = 0; int* answer = (int*) malloc(VECTOR_SIZE*sizeof(int)); cudaEventRecord(start1); for(int i=0;i<NUMBER_OF_ELEMENTS;i++) { int sum = 0; for(int j=0;j<VECTOR_SIZE;j++) { sum += hostA[j]*hostB[j*VECTOR_SIZE+i]; } answer[i] = sum; } for(int k=0;k<VECTOR_SIZE;k++) { cout<<k<<")"<< "Expected value = "<<answer[k]<<" Actual value = "<<hostC[k]<<"\n"; error += double(abs(answer[k]-hostC[k])); } error=sqrt(error); cout<<"error = "<<error<<"\n"; cudaEventRecord(end1); cudaEventSynchronize(end1); float t1=0; cudaEventElapsedTime(&t1,start1,end1); cout<<"\nSequential time="<<t1; cout<<"\nParallel time="<<t<<endl; delete[] hostA; delete[] hostB; delete[] hostC; return cudaDeviceSynchronize(); } __global__ void kernel_multiplication( int* A, int* B, int* C, int N,int M) { int index = threadIdx.x + blockIdx.x * blockDim.x; int sum = 0; if(index<N) { for(int i=0;i<M;i++) sum+=A[i]*B[(i*M)+index]; C[index] = sum; } }
44823fb798ebb4e399616f1c10241baf41aeb453.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> c d s */ #include "common_magma.h" __global__ void magma_zlobpcg_shift_kernel( magma_int_t num_rows, magma_int_t num_vecs, magma_int_t shift, magmaDoubleComplex *x ){ int idx = threadIdx.x ; // thread in row int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index if( row<num_rows){ magmaDoubleComplex tmp = x[idx]; __syncthreads(); if( idx > shift-1 ){ idx-=shift; x[idx] = tmp; __syncthreads(); } } } /** Purpose ------- For a Block-LOBPCG, the set of residuals (entries consecutive in memory) shrinks and the vectors are shifted in case shift residuals drop below threshold. The memory layout of x is: / x1[0] x2[0] x3[0] \ | x1[1] x2[1] x3[1] | x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] . | x1[3] x2[3] x3[3] | \ x1[4] x2[4] x3[4] / Arguments --------- @param num_rows magma_int_t number of rows @param num_vecs magma_int_t number of vectors @param shift magma_int_t shift number @param x magmaDoubleComplex* input/output vector x @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zlobpcg_shift( magma_int_t num_rows, magma_int_t num_vecs, magma_int_t shift, magmaDoubleComplex *x ){ magma_int_t num_threads = num_vecs; // every thread handles one row containing the if ( num_threads > 1024 ) printf("error: too many threads requested.\n"); int Ms = num_threads * sizeof( magmaDoubleComplex ); if ( Ms > 1024*8 ) printf("error: too much shared memory requested.\n"); dim3 block( num_threads, 1, 1 ); int dimgrid1 = sqrt(num_rows); int dimgrid2 = (num_rows + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); hipLaunchKernelGGL(( magma_zlobpcg_shift_kernel), dim3(grid), dim3(block), Ms, magma_stream , num_rows, num_vecs, shift, x ); return MAGMA_SUCCESS; }
44823fb798ebb4e399616f1c10241baf41aeb453.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> c d s */ #include "common_magma.h" __global__ void magma_zlobpcg_shift_kernel( magma_int_t num_rows, magma_int_t num_vecs, magma_int_t shift, magmaDoubleComplex *x ){ int idx = threadIdx.x ; // thread in row int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index if( row<num_rows){ magmaDoubleComplex tmp = x[idx]; __syncthreads(); if( idx > shift-1 ){ idx-=shift; x[idx] = tmp; __syncthreads(); } } } /** Purpose ------- For a Block-LOBPCG, the set of residuals (entries consecutive in memory) shrinks and the vectors are shifted in case shift residuals drop below threshold. The memory layout of x is: / x1[0] x2[0] x3[0] \ | x1[1] x2[1] x3[1] | x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] . | x1[3] x2[3] x3[3] | \ x1[4] x2[4] x3[4] / Arguments --------- @param num_rows magma_int_t number of rows @param num_vecs magma_int_t number of vectors @param shift magma_int_t shift number @param x magmaDoubleComplex* input/output vector x @ingroup magmasparse_zaux ********************************************************************/ extern "C" magma_int_t magma_zlobpcg_shift( magma_int_t num_rows, magma_int_t num_vecs, magma_int_t shift, magmaDoubleComplex *x ){ magma_int_t num_threads = num_vecs; // every thread handles one row containing the if ( num_threads > 1024 ) printf("error: too many threads requested.\n"); int Ms = num_threads * sizeof( magmaDoubleComplex ); if ( Ms > 1024*8 ) printf("error: too much shared memory requested.\n"); dim3 block( num_threads, 1, 1 ); int dimgrid1 = sqrt(num_rows); int dimgrid2 = (num_rows + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); magma_zlobpcg_shift_kernel<<< grid, block, Ms, magma_stream >>> ( num_rows, num_vecs, shift, x ); return MAGMA_SUCCESS; }
ef89b072dfddf351bd729fe2e99521c94603c9e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/inner_product.h> #include <thrust/execution_policy.h> #ifdef THRUST_TEST_DEVICE_SIDE template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename T, typename Iterator3> __global__ void inner_product_kernel(ExecutionPolicy exec, Iterator1 first1, Iterator1 last1, Iterator2 first2, T init, Iterator3 result) { *result = thrust::inner_product(exec, first1, last1, first2, init); } template<typename ExecutionPolicy> void TestInnerProductDevice(ExecutionPolicy exec) { size_t n = 1000; thrust::host_vector<int> h_v1 = unittest::random_integers<int>(n); thrust::host_vector<int> h_v2 = unittest::random_integers<int>(n); thrust::device_vector<int> d_v1 = h_v1; thrust::device_vector<int> d_v2 = h_v2; thrust::device_vector<int> result(1); int init = 13; int expected = thrust::inner_product(h_v1.begin(), h_v1.end(), h_v2.begin(), init); hipLaunchKernelGGL(( inner_product_kernel), dim3(1),dim3(1), 0, 0, exec, d_v1.begin(), d_v1.end(), d_v2.begin(), init, result.begin()); { hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); } ASSERT_EQUAL(expected, result[0]); } void TestInnerProductDeviceSeq() { TestInnerProductDevice(thrust::seq); }; DECLARE_UNITTEST(TestInnerProductDeviceSeq); void TestInnerProductDeviceDevice() { TestInnerProductDevice(thrust::device); }; DECLARE_UNITTEST(TestInnerProductDeviceDevice); #endif void TestInnerProductCudaStreams() { thrust::device_vector<int> v1(3); thrust::device_vector<int> v2(3); v1[0] = 1; v1[1] = -2; v1[2] = 3; v2[0] = -4; v2[1] = 5; v2[2] = 6; hipStream_t s; hipStreamCreate(&s); int init = 3; int result = thrust::inner_product(thrust::hip::par.on(s), v1.begin(), v1.end(), v2.begin(), init); ASSERT_EQUAL(result, 7); hipStreamDestroy(s); } DECLARE_UNITTEST(TestInnerProductCudaStreams);
ef89b072dfddf351bd729fe2e99521c94603c9e0.cu
#include <unittest/unittest.h> #include <thrust/inner_product.h> #include <thrust/execution_policy.h> #ifdef THRUST_TEST_DEVICE_SIDE template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename T, typename Iterator3> __global__ void inner_product_kernel(ExecutionPolicy exec, Iterator1 first1, Iterator1 last1, Iterator2 first2, T init, Iterator3 result) { *result = thrust::inner_product(exec, first1, last1, first2, init); } template<typename ExecutionPolicy> void TestInnerProductDevice(ExecutionPolicy exec) { size_t n = 1000; thrust::host_vector<int> h_v1 = unittest::random_integers<int>(n); thrust::host_vector<int> h_v2 = unittest::random_integers<int>(n); thrust::device_vector<int> d_v1 = h_v1; thrust::device_vector<int> d_v2 = h_v2; thrust::device_vector<int> result(1); int init = 13; int expected = thrust::inner_product(h_v1.begin(), h_v1.end(), h_v2.begin(), init); inner_product_kernel<<<1,1>>>(exec, d_v1.begin(), d_v1.end(), d_v2.begin(), init, result.begin()); { cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); } ASSERT_EQUAL(expected, result[0]); } void TestInnerProductDeviceSeq() { TestInnerProductDevice(thrust::seq); }; DECLARE_UNITTEST(TestInnerProductDeviceSeq); void TestInnerProductDeviceDevice() { TestInnerProductDevice(thrust::device); }; DECLARE_UNITTEST(TestInnerProductDeviceDevice); #endif void TestInnerProductCudaStreams() { thrust::device_vector<int> v1(3); thrust::device_vector<int> v2(3); v1[0] = 1; v1[1] = -2; v1[2] = 3; v2[0] = -4; v2[1] = 5; v2[2] = 6; cudaStream_t s; cudaStreamCreate(&s); int init = 3; int result = thrust::inner_product(thrust::cuda::par.on(s), v1.begin(), v1.end(), v2.begin(), init); ASSERT_EQUAL(result, 7); cudaStreamDestroy(s); } DECLARE_UNITTEST(TestInnerProductCudaStreams);
36ffc719f8d2b0970ad3b000f9dc0534a13904d2.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <rocblas.h> #include <cusolverDn.h> #include "../cublasHelper.h" #include <exceptions/cuda_exception.h> #include <helpers/logger.h> #include <execution/AffinityManager.h> #include "config.h" #ifdef HAVE_CUDNN #include <cudnn.h> #endif namespace sd { std::mutex CublasHelper::_mutex; static void* handle_() { auto _handle = new hipblasHandle_t(); auto status = hipblasCreate(_handle); // initialize CUBLAS context if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle creation failed !", status); return reinterpret_cast<void *>(_handle); } static void* solver_() { auto cusolverH = new hipsolverDnHandle_t(); auto status = hipsolverDnCreate(cusolverH); if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("cuSolver handle creation failed !", status); return cusolverH; } static void* cudnn_() { #ifdef HAVE_CUDNN auto cudnnH = new cudnnHandle_t(); auto status = cudnnCreate(cudnnH); if (status != CUDNN_STATUS_SUCCESS) throw cuda_exception::build("cuDNN handle creation failed !", status); return cudnnH; #endif return nullptr; } static void destroyHandle_(void* handle) { auto ch = reinterpret_cast<hipblasHandle_t *>(handle); auto status = hipblasDestroy(*ch); if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle destruction failed !", status); delete ch; } CublasHelper::CublasHelper() { //nd4j_printf("Initializing cuBLAS\n",""); auto numDevices = AffinityManager::numberOfDevices(); auto currentDevice = AffinityManager::currentDeviceId(); _cache.resize(numDevices); _solvers.resize(numDevices); _cudnn.resize(numDevices); for (int e = 0; e < numDevices; e++) { AffinityManager::setCurrentNativeDevice(e); _cache[e] = handle_(); _solvers[e] = solver_(); _cudnn[e] = cudnn_(); } // don't forget to restore back original device AffinityManager::setCurrentNativeDevice(currentDevice); } CublasHelper::~CublasHelper() { auto numDevices = AffinityManager::numberOfDevices(); for (int e = 0; e < numDevices; e++) destroyHandle_(_cache[e]); } CublasHelper& CublasHelper::getInstance() { static CublasHelper instance; return instance; } void* CublasHelper::cudnn() { auto deviceId = AffinityManager::currentDeviceId(); if (deviceId < 0 || deviceId > _cudnn.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _cudnn[deviceId]; } void* CublasHelper::handle() { auto deviceId = AffinityManager::currentDeviceId(); return handle(deviceId); } void* CublasHelper::solver() { auto deviceId = AffinityManager::currentDeviceId(); if (deviceId < 0 || deviceId > _solvers.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _solvers[deviceId]; } void* CublasHelper::handle(int deviceId) { if (deviceId < 0 || deviceId > _cache.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _cache[deviceId]; } }
36ffc719f8d2b0970ad3b000f9dc0534a13904d2.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <cublas_v2.h> #include <cusolverDn.h> #include "../cublasHelper.h" #include <exceptions/cuda_exception.h> #include <helpers/logger.h> #include <execution/AffinityManager.h> #include "config.h" #ifdef HAVE_CUDNN #include <cudnn.h> #endif namespace sd { std::mutex CublasHelper::_mutex; static void* handle_() { auto _handle = new cublasHandle_t(); auto status = cublasCreate_v2(_handle); // initialize CUBLAS context if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle creation failed !", status); return reinterpret_cast<void *>(_handle); } static void* solver_() { auto cusolverH = new cusolverDnHandle_t(); auto status = cusolverDnCreate(cusolverH); if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("cuSolver handle creation failed !", status); return cusolverH; } static void* cudnn_() { #ifdef HAVE_CUDNN auto cudnnH = new cudnnHandle_t(); auto status = cudnnCreate(cudnnH); if (status != CUDNN_STATUS_SUCCESS) throw cuda_exception::build("cuDNN handle creation failed !", status); return cudnnH; #endif return nullptr; } static void destroyHandle_(void* handle) { auto ch = reinterpret_cast<cublasHandle_t *>(handle); auto status = cublasDestroy_v2(*ch); if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle destruction failed !", status); delete ch; } CublasHelper::CublasHelper() { //nd4j_printf("Initializing cuBLAS\n",""); auto numDevices = AffinityManager::numberOfDevices(); auto currentDevice = AffinityManager::currentDeviceId(); _cache.resize(numDevices); _solvers.resize(numDevices); _cudnn.resize(numDevices); for (int e = 0; e < numDevices; e++) { AffinityManager::setCurrentNativeDevice(e); _cache[e] = handle_(); _solvers[e] = solver_(); _cudnn[e] = cudnn_(); } // don't forget to restore back original device AffinityManager::setCurrentNativeDevice(currentDevice); } CublasHelper::~CublasHelper() { auto numDevices = AffinityManager::numberOfDevices(); for (int e = 0; e < numDevices; e++) destroyHandle_(_cache[e]); } CublasHelper& CublasHelper::getInstance() { static CublasHelper instance; return instance; } void* CublasHelper::cudnn() { auto deviceId = AffinityManager::currentDeviceId(); if (deviceId < 0 || deviceId > _cudnn.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _cudnn[deviceId]; } void* CublasHelper::handle() { auto deviceId = AffinityManager::currentDeviceId(); return handle(deviceId); } void* CublasHelper::solver() { auto deviceId = AffinityManager::currentDeviceId(); if (deviceId < 0 || deviceId > _solvers.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _solvers[deviceId]; } void* CublasHelper::handle(int deviceId) { if (deviceId < 0 || deviceId > _cache.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _cache[deviceId]; } }
d1a3c6208b8d264646dcc4813fd578f7968b8e7f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #define ROW 773 #define COL 26 #define TRAIN_ROW 541 #define TEST_ROW 232 // define nodes #define INPUT_NODES 26 #define HIDDEN_NODES 10 #define OUTPUT_NODES 1 #define ALPHA 0.1 // Activation Functions __device__ float sigmoid_device(float x){ return 1/(1 + exp(-x)); } float sigmoid(float x){ return 1/(1 + exp(-x)); } __device__ float diff_Sigmoid(float x){ return x * (1 - x); } __global__ void cuda_forward_1(float* input, float* weight1, float* layer1, float* bias_layer1){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < HIDDEN_NODES){ float act = 0.0; for(int j = 0; j < INPUT_NODES; j++){ act += input[j]*weight1[ind_x*INPUT_NODES + j] ; } layer1[ind_x] = sigmoid_device(act+ bias_layer1[ind_x]); } } __global__ void cuda_forward_2(float* weight2, float* layer1, float* layer2, float* bias_layer2){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < OUTPUT_NODES){ float act = 0.0; for(int j = 0; j < HIDDEN_NODES; j++){ act += layer1[j]*weight2[ind_x* HIDDEN_NODES + j] ; } layer2[ind_x] = sigmoid_device(act+ bias_layer2[ind_x]); } } __global__ void cuda_backprop_out(float* d3, float *layer2, float *label){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < OUTPUT_NODES){ float err = layer2[ind_x] - label[ind_x]; d3[ind_x] = err; } return; } __global__ void cuda_backprop_hidden(float* d2, float* layer1, float* weight2, float* d3){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < HIDDEN_NODES){ float error_hidden = 0.0; for(int j = 0; j < OUTPUT_NODES; j++){ error_hidden += d3[j]*weight2[j*HIDDEN_NODES + ind_x]; } d2[ind_x] = error_hidden * (layer1[ind_x] * (1 - layer1[ind_x])); } } __global__ void update_weight2(float* weight2, float* layer1, float* d3){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < OUTPUT_NODES){ for(int j = 0; j < HIDDEN_NODES; j++){ weight2[ind_x*HIDDEN_NODES + j] -= layer1[j]*d3[ind_x]*ALPHA; } } } __global__ void update_weight1(float* weight1, float* input, float* d2){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < HIDDEN_NODES){ for(int j = 0; j < INPUT_NODES; j++){ weight1[ind_x*INPUT_NODES + j] -= input[j]*d2[ind_x]*ALPHA; } } } void predict(float *input_matrix, float *pred_arr, float *weight1, float *weight2, float layer1[HIDDEN_NODES], float layer2[OUTPUT_NODES]) { //this will be each extracted input row float input[COL]; //float output=0; // iterate through input matrix row by row, extracting each row for training for(int row = 0; row < TEST_ROW; row++){ for(int col = 0; col < COL; col++){ input[col] = input_matrix[row*COL + col]; } // FORWARD PROPAGATION: for(int i = 0; i < HIDDEN_NODES; i++){ float act = 0.0; for(int j = 0; j < INPUT_NODES; j++){ act += input[j]*weight1[i * INPUT_NODES + j]; } layer1[i] = sigmoid(act); } for(int i = 0; i < OUTPUT_NODES; i++){ float act = 0.0; for(int j = 0; j < HIDDEN_NODES; j++){ act += layer1[j]*weight2[i * HIDDEN_NODES + j]; } layer2[i] = sigmoid(act); } //store predictions in an array for(int i = 0; i < OUTPUT_NODES; i++){ if(layer2[i]>0.5){ pred_arr[row] = 1; } else{ pred_arr[row] = 0; } } } return; } float train_nn(float *input_matrix, float label[TRAIN_ROW], float *weight1, float *weight2, float layer1[HIDDEN_NODES], float layer2[OUTPUT_NODES], int p_epoch) { //this will be each extracted input row float input[COL]; for(int epoch=0; epoch < p_epoch; epoch++){ // iterate through input matrix row by row, extracting each row for training for(int row = 0; row < TRAIN_ROW; row++){ for(int col = 0; col < COL; col++){ input[col] = input_matrix[row*COL + col]; } //this is for one row instance of forward and backprop // FORWARD PROPAGATION: for(int i = 0; i < HIDDEN_NODES; i++){ float act = 0.0; for(int j = 0; j < INPUT_NODES; j++){ act += input[j]*weight1[i*INPUT_NODES + j]; } layer1[i] = sigmoid(act); } for(int i = 0; i < OUTPUT_NODES; i++){ float act = 0.0; for(int j = 0; j < HIDDEN_NODES; j++){ act += layer1[j]*weight2[i* HIDDEN_NODES + j]; } layer2[i] = sigmoid(act); } // BACKPROPAGATION: // calculate errors float d3[OUTPUT_NODES]; for(int i = 0; i < OUTPUT_NODES; i++){ float error_output = layer2[i] - label[row]; d3[i] = error_output; } float d2[HIDDEN_NODES]; for(int i = 0; i < HIDDEN_NODES; i++){ float error_hidden = 0.0; for(int j = 0; j < OUTPUT_NODES; j++){ error_hidden += d3[j]*weight2[j*HIDDEN_NODES + i]; } d2[i] = error_hidden * (layer1[i] * (1 - layer1[i])); } // update weights for(int i = 0; i < OUTPUT_NODES; i++){ for(int j = 0; j < HIDDEN_NODES; j++){ weight2[i*HIDDEN_NODES + j] -= layer1[j]*d3[i]*ALPHA; } } for(int i = 0; i < HIDDEN_NODES; i++){ for(int j = 0; j < INPUT_NODES; j++){ weight1[i*INPUT_NODES + j] -= input[j]*d2[i]*ALPHA; } } } } return 0; } void import_data(float *train_arr, float *train_y_arr, float *test_arr , float *test_y_arr){ FILE* str = fopen("train_data.csv", "r"); char line[1024]; int count = 0; while (fgets(line, 1024, str)) { char* tmp = strdup(line); char* c = strtok(tmp,","); //train_arr[count] = new float[1]; while(c != NULL){ train_arr[count] = (float)atof(c); count ++; c = strtok(NULL, ","); } free(tmp); } //IMPORT TRAINING LABELS FILE* str_y = fopen("train_y.csv", "r"); char line_y[1024]; int count_y = 0; while (fgets(line_y, 1024, str_y)) { char* tmp = strdup(line_y); char* c = strtok(tmp,","); while(c != NULL){ train_y_arr[count_y] = (float)atof(c); count_y ++; c = strtok(NULL, ","); } free(tmp); } //IMPORT TESTING DATA FILE* str_t = fopen("test_data.csv", "r"); char line_t[1024]; int count_t = 0; while (fgets(line_t, 1024, str_t)) { char* tmp = strdup(line_t); char* c = strtok(tmp,","); while(c != NULL){ test_arr[count_t] = (float)atof(c); count_t ++; c = strtok(NULL, ","); } free(tmp); } //IMPORT TEST LABELS FILE* str_ty = fopen("test_y.csv", "r"); char line_ty[1024]; int count_ty = 0; while (fgets(line_ty, 1024, str_ty)) { char* tmp = strdup(line_ty); char* c = strtok(tmp,","); while(c != NULL){ test_y_arr[count_ty] = (float)atof(c); count_ty ++; c = strtok(NULL, ","); } free(tmp); } } int main(int argc, char *argv[]){ float train_arr[TRAIN_ROW*COL]; float train_y_arr[TRAIN_ROW*1]; float test_arr[TEST_ROW*COL]; float test_y_arr[TEST_ROW*1]; float weight_layer1[HIDDEN_NODES*INPUT_NODES]; float weight_layer2[OUTPUT_NODES*HIDDEN_NODES]; float bias_layer1[HIDDEN_NODES]; float bias_layer2[OUTPUT_NODES]; float layer1[HIDDEN_NODES]; float layer2[OUTPUT_NODES]; float d3[OUTPUT_NODES]; float d2[HIDDEN_NODES]; float** train_arr_device = new float*[TRAIN_ROW]; float** train_arr_y_device = new float*[TRAIN_ROW]; float* weight1_device; float* weight2_device; float* layer1_device; float* layer2_device; float* d3_device; float* d2_device; float* bias_layer1_device; float* bias_layer2_device; hipDeviceReset(); float** train_final = new float* [TRAIN_ROW]; float** train_y_final = new float* [TRAIN_ROW]; float *output = (float *)malloc(sizeof(TRAIN_ROW*sizeof(float))); float *output_test = (float *)malloc(sizeof(TEST_ROW*sizeof(float))); //IMPORT TRAINING DATA import_data(train_arr, train_y_arr, test_arr, test_y_arr); for (size_t i = 0; i < TRAIN_ROW; i++) { train_final[i] = new float[COL]; train_y_final[i] = new float[COL]; for (size_t j = 0; j < COL; j++) { train_final[i][j] = train_arr[i*COL + j]; } for (size_t k = 0; k < 1; k++) { train_y_final[i][k] = train_y_arr[i]; } } // generate random weights and biases for(int i = 0; i < HIDDEN_NODES; i++){ for(int j = 0; j < INPUT_NODES; j++){ weight_layer1[i*INPUT_NODES + j] = ((double)rand())/((double)RAND_MAX); } } for(int i = 0; i < OUTPUT_NODES; i++){ for(int j = 0; j < HIDDEN_NODES; j++){ weight_layer2[i*HIDDEN_NODES + j] = ((double)rand())/((double)RAND_MAX); } } for(int i = 0; i < HIDDEN_NODES; i++){ bias_layer1[i] = ((double)rand())/((double)RAND_MAX); } for(int i = 0; i < OUTPUT_NODES; i++){ bias_layer2[i] = ((double)rand())/((double)RAND_MAX); } for (size_t i = 0; i < TRAIN_ROW; i++) { hipMalloc(&train_arr_device[i], sizeof(float)*COL); hipMemcpy(train_arr_device[i], train_final[i], sizeof(float)*COL, hipMemcpyHostToDevice); hipMalloc(&train_arr_y_device[i], sizeof(float)*1); hipMemcpy(train_arr_y_device[i], train_y_final[i], sizeof(float)*1, hipMemcpyHostToDevice); } //hipMalloc(&train_arr_y_device, sizeof(float)*TRAIN_ROW*1); //hipMemcpy(train_arr_y_device, train_y_arr, sizeof(float)*TRAIN_ROW*1, hipMemcpyHostToDevice); hipMalloc(&weight1_device, sizeof(float)*HIDDEN_NODES*INPUT_NODES); hipMemcpy(weight1_device, weight_layer1, sizeof(float)*HIDDEN_NODES*INPUT_NODES, hipMemcpyHostToDevice); hipMalloc(&weight2_device, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES); hipMemcpy(weight2_device, weight_layer2, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, hipMemcpyHostToDevice); hipMalloc(&layer1_device, sizeof(float)*HIDDEN_NODES); hipMemcpy(layer1_device, layer1, sizeof(float)*HIDDEN_NODES, hipMemcpyHostToDevice); hipMalloc(&layer2_device, sizeof(float)*OUTPUT_NODES); hipMemcpy(layer2_device, layer2, sizeof(float)*OUTPUT_NODES, hipMemcpyHostToDevice); hipMalloc(&d3_device, sizeof(float)*OUTPUT_NODES); hipMemcpy(d3_device, d3, sizeof(float)*OUTPUT_NODES, hipMemcpyHostToDevice); hipMalloc(&d2_device, sizeof(float)*HIDDEN_NODES); hipMemcpy(d2_device, d2, sizeof(float)*HIDDEN_NODES, hipMemcpyHostToDevice); hipMalloc(&bias_layer1_device, sizeof(float)*HIDDEN_NODES); hipMemcpy(bias_layer1_device, bias_layer1, sizeof(float)*HIDDEN_NODES, hipMemcpyHostToDevice); hipMalloc(&bias_layer2_device, sizeof(float)*HIDDEN_NODES); hipMemcpy(bias_layer2_device, bias_layer2, sizeof(float)*HIDDEN_NODES, hipMemcpyHostToDevice); // NEURAL NETWORK //ceil(541/14) = 39 //ceil(26/14) = 2 dim3 dimGrid(39,2,1); dim3 dimBlock(14,14,1); /*printf("%s\n","Weight Layer 1:" ); for (size_t i = 0; i < HIDDEN_NODES; i++) { for (size_t j = 0; j < INPUT_NODES; j++) { printf("%f ",weight_layer1[i*INPUT_NODES + j] ); } printf("\n"); } printf("%s\n","Weight Layer 2:" ); for (size_t i = 0; i < OUTPUT_NODES; i++) { for (size_t j = 0; j < HIDDEN_NODES; j++) { printf("%f ",weight_layer2[i*HIDDEN_NODES + j] ); } printf("\n"); }*/ int epoch = 400; printf(" TRAINING WITH %d EPOCHS:\n__________________________________________________________________________\n__________________________________________________________________________\n\n", epoch); hipEvent_t beginLaunch, endLaunch; hipEventCreate(&beginLaunch); hipEventCreate(&endLaunch); hipEventRecord(beginLaunch,0); float mse_total; float mse_old = 100000; float mse_difference = 100000; float mse_abs = 10000; int max_epoch = 0; while(mse_abs > 0.0001 && max_epoch < epoch){ // //for (size_t i = 0; i < epoch; i++) { mse_total = 0.0; for (size_t j = 0; j < TRAIN_ROW; j++) { //TRAIN_ROW hipLaunchKernelGGL(( cuda_forward_1), dim3(dimBlock) , dim3(dimGrid), 0, 0, train_arr_device[j], weight1_device, layer1_device, bias_layer1_device); //hipMemcpy(layer1, layer1_device, sizeof(float)*HIDDEN_NODES, hipMemcpyDeviceToHost); //hipMemcpy(layer1_device, layer1, sizeof(float)*HIDDEN_NODES, hipMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", hipGetErrorString(hipGetLastError())); hipLaunchKernelGGL(( cuda_forward_2), dim3(dimBlock) , dim3(dimGrid), 0, 0, weight2_device, layer1_device, layer2_device, bias_layer2_device); //hipMemcpy(layer2, layer2_device, sizeof(float)*OUTPUT_NODES, hipMemcpyDeviceToHost); //hipMemcpy(layer2_device, layer2, sizeof(float)*OUTPUT_NODES, hipMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", hipGetErrorString(hipGetLastError())); hipLaunchKernelGGL(( cuda_backprop_out), dim3(dimBlock) , dim3(dimGrid), 0, 0, d3_device, layer2_device, train_arr_y_device[j]); hipMemcpy(d3, d3_device, sizeof(float)*OUTPUT_NODES, hipMemcpyDeviceToHost); hipMemcpy(d3_device, d3, sizeof(float)*OUTPUT_NODES, hipMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", hipGetErrorString(hipGetLastError())); mse_total += abs(0.5*d3[0]*d3[0]); //printf("%f\n", d3[0]); hipLaunchKernelGGL(( cuda_backprop_hidden), dim3(dimBlock) , dim3(dimGrid), 0, 0, d2_device, layer1_device, weight2_device, d3_device); //hipMemcpy(d2, d2, sizeof(float)*HIDDEN_NODES, hipMemcpyDeviceToHost); //hipMemcpy(d2, d2, sizeof(float)*HIDDEN_NODES, hipMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", hipGetErrorString(hipGetLastError())); hipLaunchKernelGGL(( update_weight2), dim3(dimBlock) , dim3(dimGrid), 0, 0, weight2_device, layer1_device, d3_device); //hipMemcpy(weight_layer2, weight2_device, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, hipMemcpyDeviceToHost); //hipMemcpy(weight2_device, weight_layer2, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, hipMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", hipGetErrorString(hipGetLastError())); hipLaunchKernelGGL(( update_weight1), dim3(dimBlock) , dim3(dimGrid), 0, 0, weight1_device, train_arr_device[j], d2_device); //hipMemcpy(weight_layer1, weight1_device, sizeof(float)*HIDDEN_NODES*INPUT_NODES, hipMemcpyDeviceToHost); //hipMemcpy(weight1_device, weight_layer1, sizeof(float)*HIDDEN_NODES*INPUT_NODES, hipMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", hipGetErrorString(hipGetLastError())); } printf("%f\n", mse_total); mse_difference = mse_old - mse_total; mse_abs = abs(mse_difference); mse_old = mse_total; max_epoch += 1; printf("MSE ABS DIFFERENCE FOR EPOCH: %f\n", mse_abs); } float mse_final = mse_total; hipEventRecord(endLaunch,0); hipEventSynchronize(endLaunch); hipMemcpy(weight_layer1, weight1_device, sizeof(float)*HIDDEN_NODES*INPUT_NODES, hipMemcpyDeviceToHost); hipMemcpy(weight_layer2, weight2_device, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, hipMemcpyDeviceToHost); float time_share = 0; hipEventElapsedTime(&time_share, beginLaunch, endLaunch); printf("The time taken to train with %d epochs is: %fms\n", max_epoch, time_share); printf("MSE FINAL: %f\n", mse_final); //printf("Device Variable Copying:\t%s\n", hipGetErrorString(hipGetLastError())); /*printf("%s\n","Weight Layer 1:" ); for (size_t i = 0; i < HIDDEN_NODES; i++) { for (size_t j = 0; j < INPUT_NODES; j++) { printf("%f ",weight_layer1[i*INPUT_NODES + j] ); } printf("\n"); } printf("%s\n","Weight Layer 2:" ); for (size_t i = 0; i < OUTPUT_NODES; i++) { for (size_t j = 0; j < HIDDEN_NODES; j++) { printf("%f ",weight_layer2[i*HIDDEN_NODES + j] ); } printf("\n"); }*/ predict(test_arr, output, weight_layer1, weight_layer2, layer1, layer2); int count_final=0; for(int i = 0; i < TEST_ROW; i++){ //printf("predicted %f\n", output[i]); //printf("actual %f\n", train_y_arr[i]); if(output[i] == test_y_arr[i]){ count_final +=1; } } float prediction = (float)count_final/TEST_ROW; printf("The final prediction accuracy is: %f \n", prediction); free(output); hipFree(train_arr_device); hipFree(train_arr_y_device); hipFree(weight1_device); hipFree(weight2_device); hipFree(bias_layer1_device); hipFree(bias_layer2_device); hipFree(layer1_device); hipFree(layer2_device); hipFree(d3_device); hipFree(d2_device); return 0; }
d1a3c6208b8d264646dcc4813fd578f7968b8e7f.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <cuda_runtime.h> #define ROW 773 #define COL 26 #define TRAIN_ROW 541 #define TEST_ROW 232 // define nodes #define INPUT_NODES 26 #define HIDDEN_NODES 10 #define OUTPUT_NODES 1 #define ALPHA 0.1 // Activation Functions __device__ float sigmoid_device(float x){ return 1/(1 + exp(-x)); } float sigmoid(float x){ return 1/(1 + exp(-x)); } __device__ float diff_Sigmoid(float x){ return x * (1 - x); } __global__ void cuda_forward_1(float* input, float* weight1, float* layer1, float* bias_layer1){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < HIDDEN_NODES){ float act = 0.0; for(int j = 0; j < INPUT_NODES; j++){ act += input[j]*weight1[ind_x*INPUT_NODES + j] ; } layer1[ind_x] = sigmoid_device(act+ bias_layer1[ind_x]); } } __global__ void cuda_forward_2(float* weight2, float* layer1, float* layer2, float* bias_layer2){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < OUTPUT_NODES){ float act = 0.0; for(int j = 0; j < HIDDEN_NODES; j++){ act += layer1[j]*weight2[ind_x* HIDDEN_NODES + j] ; } layer2[ind_x] = sigmoid_device(act+ bias_layer2[ind_x]); } } __global__ void cuda_backprop_out(float* d3, float *layer2, float *label){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < OUTPUT_NODES){ float err = layer2[ind_x] - label[ind_x]; d3[ind_x] = err; } return; } __global__ void cuda_backprop_hidden(float* d2, float* layer1, float* weight2, float* d3){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < HIDDEN_NODES){ float error_hidden = 0.0; for(int j = 0; j < OUTPUT_NODES; j++){ error_hidden += d3[j]*weight2[j*HIDDEN_NODES + ind_x]; } d2[ind_x] = error_hidden * (layer1[ind_x] * (1 - layer1[ind_x])); } } __global__ void update_weight2(float* weight2, float* layer1, float* d3){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < OUTPUT_NODES){ for(int j = 0; j < HIDDEN_NODES; j++){ weight2[ind_x*HIDDEN_NODES + j] -= layer1[j]*d3[ind_x]*ALPHA; } } } __global__ void update_weight1(float* weight1, float* input, float* d2){ int ind_x = blockIdx.x * blockDim.x + threadIdx.x; if(ind_x < HIDDEN_NODES){ for(int j = 0; j < INPUT_NODES; j++){ weight1[ind_x*INPUT_NODES + j] -= input[j]*d2[ind_x]*ALPHA; } } } void predict(float *input_matrix, float *pred_arr, float *weight1, float *weight2, float layer1[HIDDEN_NODES], float layer2[OUTPUT_NODES]) { //this will be each extracted input row float input[COL]; //float output=0; // iterate through input matrix row by row, extracting each row for training for(int row = 0; row < TEST_ROW; row++){ for(int col = 0; col < COL; col++){ input[col] = input_matrix[row*COL + col]; } // FORWARD PROPAGATION: for(int i = 0; i < HIDDEN_NODES; i++){ float act = 0.0; for(int j = 0; j < INPUT_NODES; j++){ act += input[j]*weight1[i * INPUT_NODES + j]; } layer1[i] = sigmoid(act); } for(int i = 0; i < OUTPUT_NODES; i++){ float act = 0.0; for(int j = 0; j < HIDDEN_NODES; j++){ act += layer1[j]*weight2[i * HIDDEN_NODES + j]; } layer2[i] = sigmoid(act); } //store predictions in an array for(int i = 0; i < OUTPUT_NODES; i++){ if(layer2[i]>0.5){ pred_arr[row] = 1; } else{ pred_arr[row] = 0; } } } return; } float train_nn(float *input_matrix, float label[TRAIN_ROW], float *weight1, float *weight2, float layer1[HIDDEN_NODES], float layer2[OUTPUT_NODES], int p_epoch) { //this will be each extracted input row float input[COL]; for(int epoch=0; epoch < p_epoch; epoch++){ // iterate through input matrix row by row, extracting each row for training for(int row = 0; row < TRAIN_ROW; row++){ for(int col = 0; col < COL; col++){ input[col] = input_matrix[row*COL + col]; } //this is for one row instance of forward and backprop // FORWARD PROPAGATION: for(int i = 0; i < HIDDEN_NODES; i++){ float act = 0.0; for(int j = 0; j < INPUT_NODES; j++){ act += input[j]*weight1[i*INPUT_NODES + j]; } layer1[i] = sigmoid(act); } for(int i = 0; i < OUTPUT_NODES; i++){ float act = 0.0; for(int j = 0; j < HIDDEN_NODES; j++){ act += layer1[j]*weight2[i* HIDDEN_NODES + j]; } layer2[i] = sigmoid(act); } // BACKPROPAGATION: // calculate errors float d3[OUTPUT_NODES]; for(int i = 0; i < OUTPUT_NODES; i++){ float error_output = layer2[i] - label[row]; d3[i] = error_output; } float d2[HIDDEN_NODES]; for(int i = 0; i < HIDDEN_NODES; i++){ float error_hidden = 0.0; for(int j = 0; j < OUTPUT_NODES; j++){ error_hidden += d3[j]*weight2[j*HIDDEN_NODES + i]; } d2[i] = error_hidden * (layer1[i] * (1 - layer1[i])); } // update weights for(int i = 0; i < OUTPUT_NODES; i++){ for(int j = 0; j < HIDDEN_NODES; j++){ weight2[i*HIDDEN_NODES + j] -= layer1[j]*d3[i]*ALPHA; } } for(int i = 0; i < HIDDEN_NODES; i++){ for(int j = 0; j < INPUT_NODES; j++){ weight1[i*INPUT_NODES + j] -= input[j]*d2[i]*ALPHA; } } } } return 0; } void import_data(float *train_arr, float *train_y_arr, float *test_arr , float *test_y_arr){ FILE* str = fopen("train_data.csv", "r"); char line[1024]; int count = 0; while (fgets(line, 1024, str)) { char* tmp = strdup(line); char* c = strtok(tmp,","); //train_arr[count] = new float[1]; while(c != NULL){ train_arr[count] = (float)atof(c); count ++; c = strtok(NULL, ","); } free(tmp); } //IMPORT TRAINING LABELS FILE* str_y = fopen("train_y.csv", "r"); char line_y[1024]; int count_y = 0; while (fgets(line_y, 1024, str_y)) { char* tmp = strdup(line_y); char* c = strtok(tmp,","); while(c != NULL){ train_y_arr[count_y] = (float)atof(c); count_y ++; c = strtok(NULL, ","); } free(tmp); } //IMPORT TESTING DATA FILE* str_t = fopen("test_data.csv", "r"); char line_t[1024]; int count_t = 0; while (fgets(line_t, 1024, str_t)) { char* tmp = strdup(line_t); char* c = strtok(tmp,","); while(c != NULL){ test_arr[count_t] = (float)atof(c); count_t ++; c = strtok(NULL, ","); } free(tmp); } //IMPORT TEST LABELS FILE* str_ty = fopen("test_y.csv", "r"); char line_ty[1024]; int count_ty = 0; while (fgets(line_ty, 1024, str_ty)) { char* tmp = strdup(line_ty); char* c = strtok(tmp,","); while(c != NULL){ test_y_arr[count_ty] = (float)atof(c); count_ty ++; c = strtok(NULL, ","); } free(tmp); } } int main(int argc, char *argv[]){ float train_arr[TRAIN_ROW*COL]; float train_y_arr[TRAIN_ROW*1]; float test_arr[TEST_ROW*COL]; float test_y_arr[TEST_ROW*1]; float weight_layer1[HIDDEN_NODES*INPUT_NODES]; float weight_layer2[OUTPUT_NODES*HIDDEN_NODES]; float bias_layer1[HIDDEN_NODES]; float bias_layer2[OUTPUT_NODES]; float layer1[HIDDEN_NODES]; float layer2[OUTPUT_NODES]; float d3[OUTPUT_NODES]; float d2[HIDDEN_NODES]; float** train_arr_device = new float*[TRAIN_ROW]; float** train_arr_y_device = new float*[TRAIN_ROW]; float* weight1_device; float* weight2_device; float* layer1_device; float* layer2_device; float* d3_device; float* d2_device; float* bias_layer1_device; float* bias_layer2_device; cudaDeviceReset(); float** train_final = new float* [TRAIN_ROW]; float** train_y_final = new float* [TRAIN_ROW]; float *output = (float *)malloc(sizeof(TRAIN_ROW*sizeof(float))); float *output_test = (float *)malloc(sizeof(TEST_ROW*sizeof(float))); //IMPORT TRAINING DATA import_data(train_arr, train_y_arr, test_arr, test_y_arr); for (size_t i = 0; i < TRAIN_ROW; i++) { train_final[i] = new float[COL]; train_y_final[i] = new float[COL]; for (size_t j = 0; j < COL; j++) { train_final[i][j] = train_arr[i*COL + j]; } for (size_t k = 0; k < 1; k++) { train_y_final[i][k] = train_y_arr[i]; } } // generate random weights and biases for(int i = 0; i < HIDDEN_NODES; i++){ for(int j = 0; j < INPUT_NODES; j++){ weight_layer1[i*INPUT_NODES + j] = ((double)rand())/((double)RAND_MAX); } } for(int i = 0; i < OUTPUT_NODES; i++){ for(int j = 0; j < HIDDEN_NODES; j++){ weight_layer2[i*HIDDEN_NODES + j] = ((double)rand())/((double)RAND_MAX); } } for(int i = 0; i < HIDDEN_NODES; i++){ bias_layer1[i] = ((double)rand())/((double)RAND_MAX); } for(int i = 0; i < OUTPUT_NODES; i++){ bias_layer2[i] = ((double)rand())/((double)RAND_MAX); } for (size_t i = 0; i < TRAIN_ROW; i++) { cudaMalloc(&train_arr_device[i], sizeof(float)*COL); cudaMemcpy(train_arr_device[i], train_final[i], sizeof(float)*COL, cudaMemcpyHostToDevice); cudaMalloc(&train_arr_y_device[i], sizeof(float)*1); cudaMemcpy(train_arr_y_device[i], train_y_final[i], sizeof(float)*1, cudaMemcpyHostToDevice); } //cudaMalloc(&train_arr_y_device, sizeof(float)*TRAIN_ROW*1); //cudaMemcpy(train_arr_y_device, train_y_arr, sizeof(float)*TRAIN_ROW*1, cudaMemcpyHostToDevice); cudaMalloc(&weight1_device, sizeof(float)*HIDDEN_NODES*INPUT_NODES); cudaMemcpy(weight1_device, weight_layer1, sizeof(float)*HIDDEN_NODES*INPUT_NODES, cudaMemcpyHostToDevice); cudaMalloc(&weight2_device, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES); cudaMemcpy(weight2_device, weight_layer2, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, cudaMemcpyHostToDevice); cudaMalloc(&layer1_device, sizeof(float)*HIDDEN_NODES); cudaMemcpy(layer1_device, layer1, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice); cudaMalloc(&layer2_device, sizeof(float)*OUTPUT_NODES); cudaMemcpy(layer2_device, layer2, sizeof(float)*OUTPUT_NODES, cudaMemcpyHostToDevice); cudaMalloc(&d3_device, sizeof(float)*OUTPUT_NODES); cudaMemcpy(d3_device, d3, sizeof(float)*OUTPUT_NODES, cudaMemcpyHostToDevice); cudaMalloc(&d2_device, sizeof(float)*HIDDEN_NODES); cudaMemcpy(d2_device, d2, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice); cudaMalloc(&bias_layer1_device, sizeof(float)*HIDDEN_NODES); cudaMemcpy(bias_layer1_device, bias_layer1, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice); cudaMalloc(&bias_layer2_device, sizeof(float)*HIDDEN_NODES); cudaMemcpy(bias_layer2_device, bias_layer2, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice); // NEURAL NETWORK //ceil(541/14) = 39 //ceil(26/14) = 2 dim3 dimGrid(39,2,1); dim3 dimBlock(14,14,1); /*printf("%s\n","Weight Layer 1:" ); for (size_t i = 0; i < HIDDEN_NODES; i++) { for (size_t j = 0; j < INPUT_NODES; j++) { printf("%f ",weight_layer1[i*INPUT_NODES + j] ); } printf("\n"); } printf("%s\n","Weight Layer 2:" ); for (size_t i = 0; i < OUTPUT_NODES; i++) { for (size_t j = 0; j < HIDDEN_NODES; j++) { printf("%f ",weight_layer2[i*HIDDEN_NODES + j] ); } printf("\n"); }*/ int epoch = 400; printf(" TRAINING WITH %d EPOCHS:\n__________________________________________________________________________\n__________________________________________________________________________\n\n", epoch); cudaEvent_t beginLaunch, endLaunch; cudaEventCreate(&beginLaunch); cudaEventCreate(&endLaunch); cudaEventRecord(beginLaunch,0); float mse_total; float mse_old = 100000; float mse_difference = 100000; float mse_abs = 10000; int max_epoch = 0; while(mse_abs > 0.0001 && max_epoch < epoch){ // //for (size_t i = 0; i < epoch; i++) { mse_total = 0.0; for (size_t j = 0; j < TRAIN_ROW; j++) { //TRAIN_ROW cuda_forward_1<<<dimBlock , dimGrid>>>(train_arr_device[j], weight1_device, layer1_device, bias_layer1_device); //cudaMemcpy(layer1, layer1_device, sizeof(float)*HIDDEN_NODES, cudaMemcpyDeviceToHost); //cudaMemcpy(layer1_device, layer1, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError())); cuda_forward_2<<<dimBlock , dimGrid>>>(weight2_device, layer1_device, layer2_device, bias_layer2_device); //cudaMemcpy(layer2, layer2_device, sizeof(float)*OUTPUT_NODES, cudaMemcpyDeviceToHost); //cudaMemcpy(layer2_device, layer2, sizeof(float)*OUTPUT_NODES, cudaMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError())); cuda_backprop_out<<<dimBlock , dimGrid>>>(d3_device, layer2_device, train_arr_y_device[j]); cudaMemcpy(d3, d3_device, sizeof(float)*OUTPUT_NODES, cudaMemcpyDeviceToHost); cudaMemcpy(d3_device, d3, sizeof(float)*OUTPUT_NODES, cudaMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError())); mse_total += abs(0.5*d3[0]*d3[0]); //printf("%f\n", d3[0]); cuda_backprop_hidden<<<dimBlock , dimGrid>>>(d2_device, layer1_device, weight2_device, d3_device); //cudaMemcpy(d2, d2, sizeof(float)*HIDDEN_NODES, cudaMemcpyDeviceToHost); //cudaMemcpy(d2, d2, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError())); update_weight2<<<dimBlock , dimGrid>>>(weight2_device, layer1_device, d3_device); //cudaMemcpy(weight_layer2, weight2_device, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, cudaMemcpyDeviceToHost); //cudaMemcpy(weight2_device, weight_layer2, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, cudaMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError())); update_weight1<<<dimBlock , dimGrid>>>(weight1_device, train_arr_device[j], d2_device); //cudaMemcpy(weight_layer1, weight1_device, sizeof(float)*HIDDEN_NODES*INPUT_NODES, cudaMemcpyDeviceToHost); //cudaMemcpy(weight1_device, weight_layer1, sizeof(float)*HIDDEN_NODES*INPUT_NODES, cudaMemcpyHostToDevice); //printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError())); } printf("%f\n", mse_total); mse_difference = mse_old - mse_total; mse_abs = abs(mse_difference); mse_old = mse_total; max_epoch += 1; printf("MSE ABS DIFFERENCE FOR EPOCH: %f\n", mse_abs); } float mse_final = mse_total; cudaEventRecord(endLaunch,0); cudaEventSynchronize(endLaunch); cudaMemcpy(weight_layer1, weight1_device, sizeof(float)*HIDDEN_NODES*INPUT_NODES, cudaMemcpyDeviceToHost); cudaMemcpy(weight_layer2, weight2_device, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, cudaMemcpyDeviceToHost); float time_share = 0; cudaEventElapsedTime(&time_share, beginLaunch, endLaunch); printf("The time taken to train with %d epochs is: %fms\n", max_epoch, time_share); printf("MSE FINAL: %f\n", mse_final); //printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError())); /*printf("%s\n","Weight Layer 1:" ); for (size_t i = 0; i < HIDDEN_NODES; i++) { for (size_t j = 0; j < INPUT_NODES; j++) { printf("%f ",weight_layer1[i*INPUT_NODES + j] ); } printf("\n"); } printf("%s\n","Weight Layer 2:" ); for (size_t i = 0; i < OUTPUT_NODES; i++) { for (size_t j = 0; j < HIDDEN_NODES; j++) { printf("%f ",weight_layer2[i*HIDDEN_NODES + j] ); } printf("\n"); }*/ predict(test_arr, output, weight_layer1, weight_layer2, layer1, layer2); int count_final=0; for(int i = 0; i < TEST_ROW; i++){ //printf("predicted %f\n", output[i]); //printf("actual %f\n", train_y_arr[i]); if(output[i] == test_y_arr[i]){ count_final +=1; } } float prediction = (float)count_final/TEST_ROW; printf("The final prediction accuracy is: %f \n", prediction); free(output); cudaFree(train_arr_device); cudaFree(train_arr_y_device); cudaFree(weight1_device); cudaFree(weight2_device); cudaFree(bias_layer1_device); cudaFree(bias_layer2_device); cudaFree(layer1_device); cudaFree(layer2_device); cudaFree(d3_device); cudaFree(d2_device); return 0; }
ba8329f91053bbce0dd51b7b378aec1023107642.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "compression/delta_afl/delta_afl_encoding.hpp" #include "delta_afl_gpu.cuh" #include "core/cuda_array.hpp" #include "util/statistics/cuda_array_statistics.hpp" #include "util/transform/cuda_array_transform.hpp" #include "util/copy/cuda_array_copy.hpp" #include "util/stencil/stencil.hpp" #include "core/float_cast.hpp" #include "core/cuda_launcher.cuh" #include "core/cuda_macros.cuh" namespace ddj { __global__ void _delta_afl_splitFloatKernel(float* data, size_t size, int* mantissa, int* exponent, int* sign) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; floatCastUnion fu { data[idx] }; mantissa[idx] = fu.parts.mantisa; exponent[idx] = fu.parts.exponent; sign[idx] = fu.parts.sign; } __global__ void _delta_afl_composeFloatKernel(int* mantissa, int* exponent, int* sign, size_t size, float* result) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; floatCastUnion fu; fu.parts.sign = sign[idx]; fu.parts.exponent = exponent[idx]; fu.parts.mantisa = mantissa[idx]; result[idx] = fu.value; } template<typename T> SharedCudaPtrVector<char> DeltaAflEncoding::Encode(SharedCudaPtr<T> data) { CUDA_ASSERT_RETURN(hipGetLastError()); LOG4CPLUS_INFO_FMT(_logger, "DELTA AFL encoding START: data size = %lu", data->size()); LOG4CPLUS_TRACE_FMT(_logger, "DELTA AFL data to encode: %s", CudaArray().ToString(data->copy()).c_str()); if (data->size() <= 0) return SharedCudaPtrVector<char> { CudaPtr<char>::make_shared(), CudaPtr<char>::make_shared() }; // Get minimal bit count needed to encode data char minBit = CudaArrayStatistics().MinBitCnt<T>(data) + 1; // (char)13; //SharedCudaPtr<int> initial_data = CudaArrayGenerator().GenerateDescendingDeviceArray(max_size); const int WARP_SIZE = 32; int cword = sizeof(T) * 8; unsigned int bit_length = CudaArrayStatistics().MinBitCnt<T>(data); int max_size = data->size(); unsigned long data_size = max_size * sizeof(int); unsigned long data_chunk = cword * WARP_SIZE; unsigned long compressed_data_size = (max_size < data_chunk ? data_chunk : max_size); compressed_data_size = ((compressed_data_size * bit_length + (data_chunk)-1) / (data_chunk)) * WARP_SIZE * sizeof(T) + (cword) * sizeof(T); int compression_blocks_count = (compressed_data_size + (sizeof(T) * WARP_SIZE) - 1) / (sizeof(T) * WARP_SIZE); auto result = CudaPtr<char>::make_shared(compressed_data_size); auto metadata = CudaPtr<char>::make_shared(4 * sizeof(char)); auto dataBlockStart= CudaPtr<char>::make_shared(compression_blocks_count * sizeof(T)); char* host_metadata; CUDA_CALL(hipHostMalloc(&host_metadata, 4)); host_metadata[0] = minBit; run_delta_afl_compress_gpu<T, 32>(minBit, data->get(), (T*) result->get(), (T*) dataBlockStart->get(), compressed_data_size / sizeof(T)); metadata->fillFromHost(host_metadata, 4 * sizeof(char)); CUDA_CALL(hipHostFree(host_metadata)); hipDeviceSynchronize(); CUDA_ASSERT_RETURN(hipGetLastError()); LOG4CPLUS_INFO(_logger, "DELTA AFL encoding END"); return SharedCudaPtrVector<char> { metadata, result, dataBlockStart }; } template<typename T> SharedCudaPtr<T> DecodeDeltaAfl(T* data, T* dataBlockStart, size_t size, unsigned int minBit) { // Calculate length long long comprBits = size * 8; unsigned long length = comprBits / minBit; auto result = CudaPtr<T>::make_shared(length); run_delta_afl_decompress_gpu<T, 32>(minBit, data, dataBlockStart, (T*) result->get(), length); hipDeviceSynchronize(); CUDA_ASSERT_RETURN(hipGetLastError()); return result; } template<typename T> SharedCudaPtr<T> DeltaAflEncoding::Decode(SharedCudaPtrVector<char> input) { LOG4CPLUS_INFO_FMT(_logger, "DELTA AFL decoding START: input[0] size = %lu, input[1] size = %lu", input[0]->size(), input[1]->size()); if (input[1]->size() <= 0) return CudaPtr<T>::make_shared(); auto metadata = input[0]->copyToHost(); auto data = input[1]; auto dataBlockStart = input[2]; // Get min bit and rest int minBit = (*metadata)[0]; // Perform decoding auto result = DecodeDeltaAfl<T>((T*) data->get(), (T*) dataBlockStart->get(), data->size(), minBit); LOG4CPLUS_INFO(_logger, "DELTA AFL decoding END"); return result; } template<> SharedCudaPtrVector<char> DeltaAflEncoding::Encode(SharedCudaPtr<float> data) { CUDA_ASSERT_RETURN(hipGetLastError()); LOG4CPLUS_INFO_FMT(_logger, "DELTA AFL (FLOAT) encoding START: data size = %lu", data->size()); if (data->size() <= 0) return SharedCudaPtrVector<char> { CudaPtr<char>::make_shared(), CudaPtr<char>::make_shared() }; auto minMax = CudaArrayStatistics().MinMax(data); char allPositive = std::get < 0 > (minMax) >= 0 ? 1 : 0; char allNegative = std::get < 1 > (minMax) < 0 ? 2 : 0; char sign = allPositive + allNegative; auto signResult = CudaPtr<int>::make_shared(data->size()); auto exponentResult = CudaPtr<int>::make_shared(data->size()); auto mantissaResult = CudaPtr<int>::make_shared(data->size()); // Now we split every float number to three integers - sign, exponent and mantissa this->_policy.setSize(data->size()); hipLaunch(this->_policy, _delta_afl_splitFloatKernel, data->get(), data->size(), mantissaResult->get(), exponentResult->get(), signResult->get()); hipDeviceSynchronize(); // We do AFL encoding on mantissa and exponent int arrays auto resultVector = Encode(mantissaResult); auto resultVector2 = Encode(exponentResult); resultVector.insert(resultVector.end(), resultVector2.begin(), resultVector2.end()); // Save the size of mantissa after compression // When all numbers are positive or negative we save sign only in metadata as one char // Else we save a stencil containing which numbers are negative SharedCudaPtr<char> metadata; metadata = CudaPtr<char>::make_shared(4 * sizeof(size_t) + 1); size_t size = resultVector[1]->size(); size_t size2 = resultVector[2]->size(); size_t size3 = resultVector2[1]->size(); size_t size4 = resultVector2[2]->size(); // std:: cout << "SIZES1: " << size << "\n"; // std:: cout << "SIZES2: " << size2 << "\n"; // std:: cout << "SIZES1: " << size3 << "\n"; // std:: cout << "SIZES2: " << size4 << "\n"; CUDA_CALL(hipMemcpy(metadata->get(), &size, sizeof(size_t), CPY_HTD)); CUDA_CALL(hipMemcpy(metadata->get() + sizeof(size_t), &size2, sizeof(size_t), CPY_HTD)); CUDA_CALL(hipMemcpy(metadata->get() + 2*sizeof(size_t), &size3, sizeof(size_t), CPY_HTD)); CUDA_CALL(hipMemcpy(metadata->get() + 3*sizeof(size_t), &size4, sizeof(size_t), CPY_HTD)); CUDA_CALL(hipMemcpy(metadata->get() + 4*sizeof(size_t), &sign, 1, CPY_HTD)); if (sign == 0) { auto stencil = Stencil(signResult).pack(); metadata = CudaArrayCopy().Concatenate(SharedCudaPtrVector<char> { metadata, stencil }); } CUDA_ASSERT_RETURN(hipGetLastError()); LOG4CPLUS_INFO(_logger, "DELTA AFL (FLOAT) enoding END"); return SharedCudaPtrVector<char> { metadata, CudaArrayCopy().Concatenate( resultVector) }; } template<> SharedCudaPtr<float> DeltaAflEncoding::Decode(SharedCudaPtrVector<char> input) { LOG4CPLUS_INFO_FMT(_logger, "DELTA AFL (FLOAT) decoding START: input[0] size = %lu, input[1] size = %lu", input[0]->size(), input[1]->size()); if (input[1]->size() <= 0) return CudaPtr<float>::make_shared(); int offset = 0, step = sizeof(char); auto metadata = input[0]; auto data = input[1]; // read metadata information char sign; long int compressedMantissaSize, compressedMantissaDataStartSize, compressedExponentSize, compressedExponentDataStartSize; CUDA_CALL(hipMemcpy(&compressedMantissaSize, metadata->get(), sizeof(size_t), CPY_DTH)); CUDA_CALL(hipMemcpy(&compressedMantissaDataStartSize, metadata->get() + sizeof(size_t), sizeof(size_t), CPY_DTH)); CUDA_CALL(hipMemcpy(&compressedExponentSize, metadata->get()+ 2*sizeof(size_t), sizeof(size_t), CPY_DTH)); CUDA_CALL(hipMemcpy(&compressedExponentDataStartSize, metadata->get() + 3*sizeof(size_t), sizeof(size_t), CPY_DTH)); CUDA_CALL(hipMemcpy(&sign, metadata->get()+ 4*sizeof(size_t), 1, CPY_DTH)); std::cout << "Size of mantissa : " << compressedMantissaSize << "\n"; std::cout << "Size of mantissa start : " << compressedMantissaDataStartSize << "\n"; std::cout << "Size of exponent : " << compressedExponentSize << "\n"; std::cout << "Size of exponent start: " << compressedExponentDataStartSize << "\n"; // read mantissa metadata information char minBit, rest; CUDA_CALL(hipMemcpy(&minBit, data->get()+offset, step, CPY_DTH)); offset += step; CUDA_CALL(hipMemcpy(&rest, data->get()+offset, step, CPY_DTH)); offset += 3 * step; // decode mantissa auto mantissaDecoded = DecodeDeltaAfl<int>((int*) (data->get() + offset), (int*) (data->get() + offset + compressedMantissaSize), compressedMantissaSize, minBit); //long int compressedExponentSize = data->size() - compressedMantissaSize - 8; offset += compressedMantissaSize + compressedMantissaDataStartSize; // read exponent metadata information CUDA_CALL(hipMemcpy(&minBit, data->get()+offset, step, CPY_DTH)); offset += step; CUDA_CALL(hipMemcpy(&rest, data->get()+offset, step, CPY_DTH)); offset += 3 * step; // decode exponent auto exponentDecoded = DecodeDeltaAfl<int>((int*) (data->get() + offset), (int*) (data->get() + offset + compressedExponentSize ), compressedExponentSize, minBit); // recover signs Stencil stencil; size_t size = mantissaDecoded->size(); if (sign) stencil = Stencil( CudaArrayTransform().Transform<int, int>( CudaPtr<int>::make_shared(size), FillOperator<int, int> { (int) sign - 1 })); else stencil = Stencil(metadata, sizeof(size_t) + 1); // compose exponent, mantissa and sign to floats auto result = CudaPtr<float>::make_shared(size); this->_policy.setSize(size); hipLaunch(this->_policy, _delta_afl_composeFloatKernel, mantissaDecoded->get(), exponentDecoded->get(), stencil->get(), size, result->get()); hipDeviceSynchronize(); CUDA_ASSERT_RETURN(hipGetLastError()); LOG4CPLUS_INFO(_logger, "DELTA AFL decoding END"); return result; } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeInt(SharedCudaPtr<int> data) { return this->Encode<int>(data); } SharedCudaPtr<int> DeltaAflEncoding::DecodeInt(SharedCudaPtrVector<char> data) { return this->Decode<int>(data); } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeTime(SharedCudaPtr<time_t> data) { return this->Encode<time_t>(data); } SharedCudaPtr<time_t> DeltaAflEncoding::DecodeTime(SharedCudaPtrVector<char> data) { return this->Decode<time_t>(data); } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeFloat(SharedCudaPtr<float> data) { return this->Encode<float>(data); } SharedCudaPtr<float> DeltaAflEncoding::DecodeFloat(SharedCudaPtrVector<char> data) { return this->Decode<float>(data); } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeDouble( SharedCudaPtr<double> data) { return SharedCudaPtrVector<char>(); } SharedCudaPtr<double> DeltaAflEncoding::DecodeDouble( SharedCudaPtrVector<char> data) { return SharedCudaPtr<double>(); } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeShort(SharedCudaPtr<short> data) { return this->Encode<short>(data); } SharedCudaPtr<short> DeltaAflEncoding::DecodeShort(SharedCudaPtrVector<char> data) { return this->Decode<short>(data); } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeChar(SharedCudaPtr<char> data) { return this->Encode<char>(data); } SharedCudaPtr<char> DeltaAflEncoding::DecodeChar(SharedCudaPtrVector<char> data) { return this->Decode<char>(data); } size_t DeltaAflEncoding::GetMetadataSize(SharedCudaPtr<char> data, DataType type) { if (data->size() <= 0) return 0; switch (type) { case DataType::d_int: return 4 * sizeof(char); case DataType::d_float: return sizeof(size_t) + 1; default: throw NotImplementedException( "No DictEncoding::GetCompressedSize implementation for that type"); } } size_t DeltaAflEncoding::GetCompressedSize(SharedCudaPtr<char> data, DataType type) { if (data->size() <= 0) return 0; switch (type) { case DataType::d_int: return GetCompressedSizeIntegral(CastSharedCudaPtr<char, int>(data)); case DataType::d_float: return GetCompressedSizeFloatingPoint( CastSharedCudaPtr<char, float>(data)); default: throw NotImplementedException( "No DictEncoding::GetCompressedSize implementation for that type"); } } template<typename T> size_t DeltaAflEncoding::GetCompressedSizeIntegral(SharedCudaPtr<T> data) { char minBit = CudaArrayStatistics().MinBitCnt<T>(data) + 1; // (char)13; //SharedCudaPtr<int> initial_data = CudaArrayGenerator().GenerateDescendingDeviceArray(max_size); const int WARP_SIZE = 32; int cword = sizeof(T) * 8; unsigned int bit_length = CudaArrayStatistics().MinBitCnt<T>(data); int max_size = data->size(); unsigned long data_size = max_size * sizeof(int); unsigned long data_chunk = cword * WARP_SIZE; unsigned long compressed_data_size = (max_size < data_chunk ? data_chunk : max_size); compressed_data_size = ((compressed_data_size * bit_length + (data_chunk)-1) / (data_chunk)) * WARP_SIZE * sizeof(T) + (cword) * sizeof(T); int compression_blocks_count = (compressed_data_size + (sizeof(T) * WARP_SIZE) - 1) / (sizeof(T) * WARP_SIZE); return compressed_data_size + compression_blocks_count; } template<typename T> size_t DeltaAflEncoding::GetCompressedSizeFloatingPoint(SharedCudaPtr<T> data) { auto minMax = CudaArrayStatistics().MinMax(data); auto signResult = CudaPtr<int>::make_shared(data->size()); auto exponentResult = CudaPtr<int>::make_shared(data->size()); auto mantissaResult = CudaPtr<int>::make_shared(data->size()); // Now we split every float number to three integers - sign, exponent and mantissa this->_policy.setSize(data->size()); hipLaunch(this->_policy, _delta_afl_splitFloatKernel, data->get(), data->size(), mantissaResult->get(), exponentResult->get(), signResult->get()); hipDeviceSynchronize(); size_t size = GetCompressedSizeIntegral(exponentResult) + GetCompressedSizeIntegral(mantissaResult); size += GetMetadataSize(CastSharedCudaPtr<int, char>(exponentResult), DataType::d_int); size += GetMetadataSize(CastSharedCudaPtr<int, char>(mantissaResult), DataType::d_int); return size; } #define DELTA_AFL_ENCODING_SPEC(X) \ template SharedCudaPtrVector<char> DeltaAflEncoding::Encode<X>(SharedCudaPtr<X>); \ template SharedCudaPtr<X> DeltaAflEncoding::Decode<X>(SharedCudaPtrVector<char>); FOR_EACH(DELTA_AFL_ENCODING_SPEC, char, short, int, long, unsigned int) } /* namespace ddj */
ba8329f91053bbce0dd51b7b378aec1023107642.cu
#include "compression/delta_afl/delta_afl_encoding.hpp" #include "delta_afl_gpu.cuh" #include "core/cuda_array.hpp" #include "util/statistics/cuda_array_statistics.hpp" #include "util/transform/cuda_array_transform.hpp" #include "util/copy/cuda_array_copy.hpp" #include "util/stencil/stencil.hpp" #include "core/float_cast.hpp" #include "core/cuda_launcher.cuh" #include "core/cuda_macros.cuh" namespace ddj { __global__ void _delta_afl_splitFloatKernel(float* data, size_t size, int* mantissa, int* exponent, int* sign) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; floatCastUnion fu { data[idx] }; mantissa[idx] = fu.parts.mantisa; exponent[idx] = fu.parts.exponent; sign[idx] = fu.parts.sign; } __global__ void _delta_afl_composeFloatKernel(int* mantissa, int* exponent, int* sign, size_t size, float* result) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; floatCastUnion fu; fu.parts.sign = sign[idx]; fu.parts.exponent = exponent[idx]; fu.parts.mantisa = mantissa[idx]; result[idx] = fu.value; } template<typename T> SharedCudaPtrVector<char> DeltaAflEncoding::Encode(SharedCudaPtr<T> data) { CUDA_ASSERT_RETURN(cudaGetLastError()); LOG4CPLUS_INFO_FMT(_logger, "DELTA AFL encoding START: data size = %lu", data->size()); LOG4CPLUS_TRACE_FMT(_logger, "DELTA AFL data to encode: %s", CudaArray().ToString(data->copy()).c_str()); if (data->size() <= 0) return SharedCudaPtrVector<char> { CudaPtr<char>::make_shared(), CudaPtr<char>::make_shared() }; // Get minimal bit count needed to encode data char minBit = CudaArrayStatistics().MinBitCnt<T>(data) + 1; // (char)13; //SharedCudaPtr<int> initial_data = CudaArrayGenerator().GenerateDescendingDeviceArray(max_size); const int WARP_SIZE = 32; int cword = sizeof(T) * 8; unsigned int bit_length = CudaArrayStatistics().MinBitCnt<T>(data); int max_size = data->size(); unsigned long data_size = max_size * sizeof(int); unsigned long data_chunk = cword * WARP_SIZE; unsigned long compressed_data_size = (max_size < data_chunk ? data_chunk : max_size); compressed_data_size = ((compressed_data_size * bit_length + (data_chunk)-1) / (data_chunk)) * WARP_SIZE * sizeof(T) + (cword) * sizeof(T); int compression_blocks_count = (compressed_data_size + (sizeof(T) * WARP_SIZE) - 1) / (sizeof(T) * WARP_SIZE); auto result = CudaPtr<char>::make_shared(compressed_data_size); auto metadata = CudaPtr<char>::make_shared(4 * sizeof(char)); auto dataBlockStart= CudaPtr<char>::make_shared(compression_blocks_count * sizeof(T)); char* host_metadata; CUDA_CALL(cudaMallocHost(&host_metadata, 4)); host_metadata[0] = minBit; run_delta_afl_compress_gpu<T, 32>(minBit, data->get(), (T*) result->get(), (T*) dataBlockStart->get(), compressed_data_size / sizeof(T)); metadata->fillFromHost(host_metadata, 4 * sizeof(char)); CUDA_CALL(cudaFreeHost(host_metadata)); cudaDeviceSynchronize(); CUDA_ASSERT_RETURN(cudaGetLastError()); LOG4CPLUS_INFO(_logger, "DELTA AFL encoding END"); return SharedCudaPtrVector<char> { metadata, result, dataBlockStart }; } template<typename T> SharedCudaPtr<T> DecodeDeltaAfl(T* data, T* dataBlockStart, size_t size, unsigned int minBit) { // Calculate length long long comprBits = size * 8; unsigned long length = comprBits / minBit; auto result = CudaPtr<T>::make_shared(length); run_delta_afl_decompress_gpu<T, 32>(minBit, data, dataBlockStart, (T*) result->get(), length); cudaDeviceSynchronize(); CUDA_ASSERT_RETURN(cudaGetLastError()); return result; } template<typename T> SharedCudaPtr<T> DeltaAflEncoding::Decode(SharedCudaPtrVector<char> input) { LOG4CPLUS_INFO_FMT(_logger, "DELTA AFL decoding START: input[0] size = %lu, input[1] size = %lu", input[0]->size(), input[1]->size()); if (input[1]->size() <= 0) return CudaPtr<T>::make_shared(); auto metadata = input[0]->copyToHost(); auto data = input[1]; auto dataBlockStart = input[2]; // Get min bit and rest int minBit = (*metadata)[0]; // Perform decoding auto result = DecodeDeltaAfl<T>((T*) data->get(), (T*) dataBlockStart->get(), data->size(), minBit); LOG4CPLUS_INFO(_logger, "DELTA AFL decoding END"); return result; } template<> SharedCudaPtrVector<char> DeltaAflEncoding::Encode(SharedCudaPtr<float> data) { CUDA_ASSERT_RETURN(cudaGetLastError()); LOG4CPLUS_INFO_FMT(_logger, "DELTA AFL (FLOAT) encoding START: data size = %lu", data->size()); if (data->size() <= 0) return SharedCudaPtrVector<char> { CudaPtr<char>::make_shared(), CudaPtr<char>::make_shared() }; auto minMax = CudaArrayStatistics().MinMax(data); char allPositive = std::get < 0 > (minMax) >= 0 ? 1 : 0; char allNegative = std::get < 1 > (minMax) < 0 ? 2 : 0; char sign = allPositive + allNegative; auto signResult = CudaPtr<int>::make_shared(data->size()); auto exponentResult = CudaPtr<int>::make_shared(data->size()); auto mantissaResult = CudaPtr<int>::make_shared(data->size()); // Now we split every float number to three integers - sign, exponent and mantissa this->_policy.setSize(data->size()); cudaLaunch(this->_policy, _delta_afl_splitFloatKernel, data->get(), data->size(), mantissaResult->get(), exponentResult->get(), signResult->get()); cudaDeviceSynchronize(); // We do AFL encoding on mantissa and exponent int arrays auto resultVector = Encode(mantissaResult); auto resultVector2 = Encode(exponentResult); resultVector.insert(resultVector.end(), resultVector2.begin(), resultVector2.end()); // Save the size of mantissa after compression // When all numbers are positive or negative we save sign only in metadata as one char // Else we save a stencil containing which numbers are negative SharedCudaPtr<char> metadata; metadata = CudaPtr<char>::make_shared(4 * sizeof(size_t) + 1); size_t size = resultVector[1]->size(); size_t size2 = resultVector[2]->size(); size_t size3 = resultVector2[1]->size(); size_t size4 = resultVector2[2]->size(); // std:: cout << "SIZES1: " << size << "\n"; // std:: cout << "SIZES2: " << size2 << "\n"; // std:: cout << "SIZES1: " << size3 << "\n"; // std:: cout << "SIZES2: " << size4 << "\n"; CUDA_CALL(cudaMemcpy(metadata->get(), &size, sizeof(size_t), CPY_HTD)); CUDA_CALL(cudaMemcpy(metadata->get() + sizeof(size_t), &size2, sizeof(size_t), CPY_HTD)); CUDA_CALL(cudaMemcpy(metadata->get() + 2*sizeof(size_t), &size3, sizeof(size_t), CPY_HTD)); CUDA_CALL(cudaMemcpy(metadata->get() + 3*sizeof(size_t), &size4, sizeof(size_t), CPY_HTD)); CUDA_CALL(cudaMemcpy(metadata->get() + 4*sizeof(size_t), &sign, 1, CPY_HTD)); if (sign == 0) { auto stencil = Stencil(signResult).pack(); metadata = CudaArrayCopy().Concatenate(SharedCudaPtrVector<char> { metadata, stencil }); } CUDA_ASSERT_RETURN(cudaGetLastError()); LOG4CPLUS_INFO(_logger, "DELTA AFL (FLOAT) enoding END"); return SharedCudaPtrVector<char> { metadata, CudaArrayCopy().Concatenate( resultVector) }; } template<> SharedCudaPtr<float> DeltaAflEncoding::Decode(SharedCudaPtrVector<char> input) { LOG4CPLUS_INFO_FMT(_logger, "DELTA AFL (FLOAT) decoding START: input[0] size = %lu, input[1] size = %lu", input[0]->size(), input[1]->size()); if (input[1]->size() <= 0) return CudaPtr<float>::make_shared(); int offset = 0, step = sizeof(char); auto metadata = input[0]; auto data = input[1]; // read metadata information char sign; long int compressedMantissaSize, compressedMantissaDataStartSize, compressedExponentSize, compressedExponentDataStartSize; CUDA_CALL(cudaMemcpy(&compressedMantissaSize, metadata->get(), sizeof(size_t), CPY_DTH)); CUDA_CALL(cudaMemcpy(&compressedMantissaDataStartSize, metadata->get() + sizeof(size_t), sizeof(size_t), CPY_DTH)); CUDA_CALL(cudaMemcpy(&compressedExponentSize, metadata->get()+ 2*sizeof(size_t), sizeof(size_t), CPY_DTH)); CUDA_CALL(cudaMemcpy(&compressedExponentDataStartSize, metadata->get() + 3*sizeof(size_t), sizeof(size_t), CPY_DTH)); CUDA_CALL(cudaMemcpy(&sign, metadata->get()+ 4*sizeof(size_t), 1, CPY_DTH)); std::cout << "Size of mantissa : " << compressedMantissaSize << "\n"; std::cout << "Size of mantissa start : " << compressedMantissaDataStartSize << "\n"; std::cout << "Size of exponent : " << compressedExponentSize << "\n"; std::cout << "Size of exponent start: " << compressedExponentDataStartSize << "\n"; // read mantissa metadata information char minBit, rest; CUDA_CALL(cudaMemcpy(&minBit, data->get()+offset, step, CPY_DTH)); offset += step; CUDA_CALL(cudaMemcpy(&rest, data->get()+offset, step, CPY_DTH)); offset += 3 * step; // decode mantissa auto mantissaDecoded = DecodeDeltaAfl<int>((int*) (data->get() + offset), (int*) (data->get() + offset + compressedMantissaSize), compressedMantissaSize, minBit); //long int compressedExponentSize = data->size() - compressedMantissaSize - 8; offset += compressedMantissaSize + compressedMantissaDataStartSize; // read exponent metadata information CUDA_CALL(cudaMemcpy(&minBit, data->get()+offset, step, CPY_DTH)); offset += step; CUDA_CALL(cudaMemcpy(&rest, data->get()+offset, step, CPY_DTH)); offset += 3 * step; // decode exponent auto exponentDecoded = DecodeDeltaAfl<int>((int*) (data->get() + offset), (int*) (data->get() + offset + compressedExponentSize ), compressedExponentSize, minBit); // recover signs Stencil stencil; size_t size = mantissaDecoded->size(); if (sign) stencil = Stencil( CudaArrayTransform().Transform<int, int>( CudaPtr<int>::make_shared(size), FillOperator<int, int> { (int) sign - 1 })); else stencil = Stencil(metadata, sizeof(size_t) + 1); // compose exponent, mantissa and sign to floats auto result = CudaPtr<float>::make_shared(size); this->_policy.setSize(size); cudaLaunch(this->_policy, _delta_afl_composeFloatKernel, mantissaDecoded->get(), exponentDecoded->get(), stencil->get(), size, result->get()); cudaDeviceSynchronize(); CUDA_ASSERT_RETURN(cudaGetLastError()); LOG4CPLUS_INFO(_logger, "DELTA AFL decoding END"); return result; } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeInt(SharedCudaPtr<int> data) { return this->Encode<int>(data); } SharedCudaPtr<int> DeltaAflEncoding::DecodeInt(SharedCudaPtrVector<char> data) { return this->Decode<int>(data); } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeTime(SharedCudaPtr<time_t> data) { return this->Encode<time_t>(data); } SharedCudaPtr<time_t> DeltaAflEncoding::DecodeTime(SharedCudaPtrVector<char> data) { return this->Decode<time_t>(data); } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeFloat(SharedCudaPtr<float> data) { return this->Encode<float>(data); } SharedCudaPtr<float> DeltaAflEncoding::DecodeFloat(SharedCudaPtrVector<char> data) { return this->Decode<float>(data); } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeDouble( SharedCudaPtr<double> data) { return SharedCudaPtrVector<char>(); } SharedCudaPtr<double> DeltaAflEncoding::DecodeDouble( SharedCudaPtrVector<char> data) { return SharedCudaPtr<double>(); } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeShort(SharedCudaPtr<short> data) { return this->Encode<short>(data); } SharedCudaPtr<short> DeltaAflEncoding::DecodeShort(SharedCudaPtrVector<char> data) { return this->Decode<short>(data); } SharedCudaPtrVector<char> DeltaAflEncoding::EncodeChar(SharedCudaPtr<char> data) { return this->Encode<char>(data); } SharedCudaPtr<char> DeltaAflEncoding::DecodeChar(SharedCudaPtrVector<char> data) { return this->Decode<char>(data); } size_t DeltaAflEncoding::GetMetadataSize(SharedCudaPtr<char> data, DataType type) { if (data->size() <= 0) return 0; switch (type) { case DataType::d_int: return 4 * sizeof(char); case DataType::d_float: return sizeof(size_t) + 1; default: throw NotImplementedException( "No DictEncoding::GetCompressedSize implementation for that type"); } } size_t DeltaAflEncoding::GetCompressedSize(SharedCudaPtr<char> data, DataType type) { if (data->size() <= 0) return 0; switch (type) { case DataType::d_int: return GetCompressedSizeIntegral(CastSharedCudaPtr<char, int>(data)); case DataType::d_float: return GetCompressedSizeFloatingPoint( CastSharedCudaPtr<char, float>(data)); default: throw NotImplementedException( "No DictEncoding::GetCompressedSize implementation for that type"); } } template<typename T> size_t DeltaAflEncoding::GetCompressedSizeIntegral(SharedCudaPtr<T> data) { char minBit = CudaArrayStatistics().MinBitCnt<T>(data) + 1; // (char)13; //SharedCudaPtr<int> initial_data = CudaArrayGenerator().GenerateDescendingDeviceArray(max_size); const int WARP_SIZE = 32; int cword = sizeof(T) * 8; unsigned int bit_length = CudaArrayStatistics().MinBitCnt<T>(data); int max_size = data->size(); unsigned long data_size = max_size * sizeof(int); unsigned long data_chunk = cword * WARP_SIZE; unsigned long compressed_data_size = (max_size < data_chunk ? data_chunk : max_size); compressed_data_size = ((compressed_data_size * bit_length + (data_chunk)-1) / (data_chunk)) * WARP_SIZE * sizeof(T) + (cword) * sizeof(T); int compression_blocks_count = (compressed_data_size + (sizeof(T) * WARP_SIZE) - 1) / (sizeof(T) * WARP_SIZE); return compressed_data_size + compression_blocks_count; } template<typename T> size_t DeltaAflEncoding::GetCompressedSizeFloatingPoint(SharedCudaPtr<T> data) { auto minMax = CudaArrayStatistics().MinMax(data); auto signResult = CudaPtr<int>::make_shared(data->size()); auto exponentResult = CudaPtr<int>::make_shared(data->size()); auto mantissaResult = CudaPtr<int>::make_shared(data->size()); // Now we split every float number to three integers - sign, exponent and mantissa this->_policy.setSize(data->size()); cudaLaunch(this->_policy, _delta_afl_splitFloatKernel, data->get(), data->size(), mantissaResult->get(), exponentResult->get(), signResult->get()); cudaDeviceSynchronize(); size_t size = GetCompressedSizeIntegral(exponentResult) + GetCompressedSizeIntegral(mantissaResult); size += GetMetadataSize(CastSharedCudaPtr<int, char>(exponentResult), DataType::d_int); size += GetMetadataSize(CastSharedCudaPtr<int, char>(mantissaResult), DataType::d_int); return size; } #define DELTA_AFL_ENCODING_SPEC(X) \ template SharedCudaPtrVector<char> DeltaAflEncoding::Encode<X>(SharedCudaPtr<X>); \ template SharedCudaPtr<X> DeltaAflEncoding::Decode<X>(SharedCudaPtrVector<char>); FOR_EACH(DELTA_AFL_ENCODING_SPEC, char, short, int, long, unsigned int) } /* namespace ddj */
36a79ad60b78f902a461a4bebc13ec60f73e4e9c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zsymmetrize_tiles.cu normal z -> d, Fri Sep 11 18:29:21 2015 @author Mark Gates */ #include "common_magma.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ntile x ceil(m/NB). Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void dsymmetrize_tiles_lower( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = (*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void dsymmetrize_tiles_upper( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; while( dA < dAend ) { *dA = (*dAT); // lower := upper dA += ldda; dAT += 1; } } } /** Purpose ------- DSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA DOUBLE_PRECISION array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dsymmetrize_tiles_q( magma_uplo_t uplo, magma_int_t m, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB ); dim3 grid( ntile, magma_ceildiv( m, NB ) ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( dsymmetrize_tiles_upper), dim3(grid), dim3(threads), 0, queue , m, dA, ldda, mstride, nstride ); } else { hipLaunchKernelGGL(( dsymmetrize_tiles_lower), dim3(grid), dim3(threads), 0, queue , m, dA, ldda, mstride, nstride ); } } /** @see magmablas_dsymmetrize_tiles_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dsymmetrize_tiles( magma_uplo_t uplo, magma_int_t m, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride ) { magmablas_dsymmetrize_tiles_q( uplo, m, dA, ldda, ntile, mstride, nstride, magma_stream ); }
36a79ad60b78f902a461a4bebc13ec60f73e4e9c.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zsymmetrize_tiles.cu normal z -> d, Fri Sep 11 18:29:21 2015 @author Mark Gates */ #include "common_magma.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ntile x ceil(m/NB). Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void dsymmetrize_tiles_lower( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = (*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void dsymmetrize_tiles_upper( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; while( dA < dAend ) { *dA = (*dAT); // lower := upper dA += ldda; dAT += 1; } } } /** Purpose ------- DSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA DOUBLE_PRECISION array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dsymmetrize_tiles_q( magma_uplo_t uplo, magma_int_t m, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB ); dim3 grid( ntile, magma_ceildiv( m, NB ) ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { dsymmetrize_tiles_upper<<< grid, threads, 0, queue >>>( m, dA, ldda, mstride, nstride ); } else { dsymmetrize_tiles_lower<<< grid, threads, 0, queue >>>( m, dA, ldda, mstride, nstride ); } } /** @see magmablas_dsymmetrize_tiles_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dsymmetrize_tiles( magma_uplo_t uplo, magma_int_t m, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride ) { magmablas_dsymmetrize_tiles_q( uplo, m, dA, ldda, ntile, mstride, nstride, magma_stream ); }
1462a6908fe34512ee152b8d9cd33194651bfbc4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <sys/time.h> #include <cstdio> #include "jacobi.h" #include "error_checks.h" // Change this to 0 if CPU reference result is not needed #define COMPUTE_CPU_REFERENCE 1 #define MAX_ITERATIONS 3000 // CPU kernel void sweepCPU(double* phi, const double *phiPrev, const double *source, double h2, int N) { int i, j; int index, i1, i2, i3, i4; for (j = 1; j < N-1; j++) { for (i = 1; i < N-1; i++) { index = i + j*N; i1 = (i-1) + j * N; i2 = (i+1) + j * N; i3 = i + (j-1) * N; i4 = i + (j+1) * N; phi[index] = 0.25 * (phiPrev[i1] + phiPrev[i2] + phiPrev[i3] + phiPrev[i4] - h2 * source[index]); } } } // GPU kernel __global__ void sweepGPU(double *phi, const double *phiPrev, const double *source, double h2, int N) { #error Add here the GPU version of the update routine (see sweepCPU above) } double compareArrays(const double *a, const double *b, int N) { double error = 0.0; int i; for (i = 0; i < N*N; i++) { error += fabs(a[i] - b[i]); } return error/(N*N); } double diffCPU(const double *phi, const double *phiPrev, int N) { int i; double sum = 0; double diffsum = 0; for (i = 0; i < N*N; i++) { diffsum += (phi[i] - phiPrev[i]) * (phi[i] - phiPrev[i]); sum += phi[i] * phi[i]; } return sqrt(diffsum/sum); } int main() { timeval t1, t2; // Structs for timing const int N = 512; double h = 1.0 / (N - 1); int iterations; const double tolerance = 5e-4; // Stopping condition int i, j, index; const int blocksize = 16; double *phi = new double[N*N]; double *phiPrev = new double[N*N]; double *source = new double[N*N]; double *phi_cuda = new double[N*N]; double *phi_d, *phiPrev_d, *source_d; // Size of the arrays in bytes const int size = N*N*sizeof(double); double diff; // Source initialization for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { double x, y; x = (i - N / 2) * h; y = (j - N / 2) * h; index = j + i * N; if (((x - 0.25) * (x - 0.25) + y * y) < 0.1 * 0.1) source[index] = 1e10*h*h; else if (((x + 0.25) * (x + 0.25) + y * y) < 0.1 * 0.1) source[index] = -1e10*h*h; else source[index] = 0.0; } } CUDA_CHECK( hipMalloc( (void**)&source_d, size) ); CUDA_CHECK( hipMemcpy(source_d, source, size, hipMemcpyHostToDevice) ); // Reset values to zero for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { index = j + i * N; phi[index] = 0.0; phiPrev[index] = 0.0; } } CUDA_CHECK( hipMalloc( (void**)&phi_d, size) ); CUDA_CHECK( hipMalloc( (void**)&phiPrev_d, size) ); CUDA_CHECK( hipMemcpy(phi_d, phi, size, hipMemcpyHostToDevice) ); CUDA_CHECK( hipMemcpy(phiPrev_d, phiPrev, size, hipMemcpyHostToDevice) ); // CPU version if(COMPUTE_CPU_REFERENCE) { gettimeofday(&t1, NULL); // Do sweeps untill difference is under the tolerance diff = tolerance * 2; iterations = 0; while (diff > tolerance && iterations < MAX_ITERATIONS) { sweepCPU(phiPrev, phi, source, h * h, N); sweepCPU(phi, phiPrev, source, h * h, N); iterations += 2; if (iterations % 100 == 0) { diff = diffCPU(phi, phiPrev, N); printf("%d %g\n", iterations, diff); } } gettimeofday(&t2, NULL); printf("CPU Jacobi: %g seconds, %d iterations\n", t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec) / 1.0e6, iterations); } // GPU version dim3 dimBlock(blocksize, blocksize); dim3 dimGrid((N + blocksize - 1) / blocksize, (N + blocksize - 1) / blocksize); //do sweeps until diff under tolerance diff = tolerance * 2; iterations = 0; gettimeofday(&t1, NULL); while (diff > tolerance && iterations < MAX_ITERATIONS) { // See above how the CPU update kernel is called // and implement similar calling sequence for the GPU code //// Add routines here #error Add GPU kernel calls here (see CPU version above) iterations += 2; if (iterations % 100 == 0) { // diffGPU is defined in the header file, it uses // Thrust library for reduction computation diff = diffGPU<double>(phiPrev_d, phi_d, N); CHECK_ERROR_MSG("Difference computation"); printf("%d %g\n", iterations, diff); } } //// Add here the routine to copy back the results #error Copy back the results gettimeofday(&t2, NULL); printf("GPU Jacobi: %g seconds, %d iterations\n", t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec) / 1.0e6, iterations); //// Add here the clean up code for all allocated CUDA resources #error Add here the clean up code if (COMPUTE_CPU_REFERENCE) { printf("Average difference is %g\n", compareArrays(phi, phi_cuda, N)); } delete[] phi; delete[] phi_cuda; delete[] phiPrev; delete[] source; return EXIT_SUCCESS; }
1462a6908fe34512ee152b8d9cd33194651bfbc4.cu
#include <sys/time.h> #include <cstdio> #include "jacobi.h" #include "error_checks.h" // Change this to 0 if CPU reference result is not needed #define COMPUTE_CPU_REFERENCE 1 #define MAX_ITERATIONS 3000 // CPU kernel void sweepCPU(double* phi, const double *phiPrev, const double *source, double h2, int N) { int i, j; int index, i1, i2, i3, i4; for (j = 1; j < N-1; j++) { for (i = 1; i < N-1; i++) { index = i + j*N; i1 = (i-1) + j * N; i2 = (i+1) + j * N; i3 = i + (j-1) * N; i4 = i + (j+1) * N; phi[index] = 0.25 * (phiPrev[i1] + phiPrev[i2] + phiPrev[i3] + phiPrev[i4] - h2 * source[index]); } } } // GPU kernel __global__ void sweepGPU(double *phi, const double *phiPrev, const double *source, double h2, int N) { #error Add here the GPU version of the update routine (see sweepCPU above) } double compareArrays(const double *a, const double *b, int N) { double error = 0.0; int i; for (i = 0; i < N*N; i++) { error += fabs(a[i] - b[i]); } return error/(N*N); } double diffCPU(const double *phi, const double *phiPrev, int N) { int i; double sum = 0; double diffsum = 0; for (i = 0; i < N*N; i++) { diffsum += (phi[i] - phiPrev[i]) * (phi[i] - phiPrev[i]); sum += phi[i] * phi[i]; } return sqrt(diffsum/sum); } int main() { timeval t1, t2; // Structs for timing const int N = 512; double h = 1.0 / (N - 1); int iterations; const double tolerance = 5e-4; // Stopping condition int i, j, index; const int blocksize = 16; double *phi = new double[N*N]; double *phiPrev = new double[N*N]; double *source = new double[N*N]; double *phi_cuda = new double[N*N]; double *phi_d, *phiPrev_d, *source_d; // Size of the arrays in bytes const int size = N*N*sizeof(double); double diff; // Source initialization for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { double x, y; x = (i - N / 2) * h; y = (j - N / 2) * h; index = j + i * N; if (((x - 0.25) * (x - 0.25) + y * y) < 0.1 * 0.1) source[index] = 1e10*h*h; else if (((x + 0.25) * (x + 0.25) + y * y) < 0.1 * 0.1) source[index] = -1e10*h*h; else source[index] = 0.0; } } CUDA_CHECK( cudaMalloc( (void**)&source_d, size) ); CUDA_CHECK( cudaMemcpy(source_d, source, size, cudaMemcpyHostToDevice) ); // Reset values to zero for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { index = j + i * N; phi[index] = 0.0; phiPrev[index] = 0.0; } } CUDA_CHECK( cudaMalloc( (void**)&phi_d, size) ); CUDA_CHECK( cudaMalloc( (void**)&phiPrev_d, size) ); CUDA_CHECK( cudaMemcpy(phi_d, phi, size, cudaMemcpyHostToDevice) ); CUDA_CHECK( cudaMemcpy(phiPrev_d, phiPrev, size, cudaMemcpyHostToDevice) ); // CPU version if(COMPUTE_CPU_REFERENCE) { gettimeofday(&t1, NULL); // Do sweeps untill difference is under the tolerance diff = tolerance * 2; iterations = 0; while (diff > tolerance && iterations < MAX_ITERATIONS) { sweepCPU(phiPrev, phi, source, h * h, N); sweepCPU(phi, phiPrev, source, h * h, N); iterations += 2; if (iterations % 100 == 0) { diff = diffCPU(phi, phiPrev, N); printf("%d %g\n", iterations, diff); } } gettimeofday(&t2, NULL); printf("CPU Jacobi: %g seconds, %d iterations\n", t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec) / 1.0e6, iterations); } // GPU version dim3 dimBlock(blocksize, blocksize); dim3 dimGrid((N + blocksize - 1) / blocksize, (N + blocksize - 1) / blocksize); //do sweeps until diff under tolerance diff = tolerance * 2; iterations = 0; gettimeofday(&t1, NULL); while (diff > tolerance && iterations < MAX_ITERATIONS) { // See above how the CPU update kernel is called // and implement similar calling sequence for the GPU code //// Add routines here #error Add GPU kernel calls here (see CPU version above) iterations += 2; if (iterations % 100 == 0) { // diffGPU is defined in the header file, it uses // Thrust library for reduction computation diff = diffGPU<double>(phiPrev_d, phi_d, N); CHECK_ERROR_MSG("Difference computation"); printf("%d %g\n", iterations, diff); } } //// Add here the routine to copy back the results #error Copy back the results gettimeofday(&t2, NULL); printf("GPU Jacobi: %g seconds, %d iterations\n", t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec) / 1.0e6, iterations); //// Add here the clean up code for all allocated CUDA resources #error Add here the clean up code if (COMPUTE_CPU_REFERENCE) { printf("Average difference is %g\n", compareArrays(phi, phi_cuda, N)); } delete[] phi; delete[] phi_cuda; delete[] phiPrev; delete[] source; return EXIT_SUCCESS; }
9287117048f613a9345696b9f89ffe685077a345.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" namespace pcl { namespace device { __device__ __forceinline__ float getMinTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmin = ( (dir.x > 0 ? 0.f : volume_max.x) - origin.x) / dir.x; float tymin = ( (dir.y > 0 ? 0.f : volume_max.y) - origin.y) / dir.y; float tzmin = ( (dir.z > 0 ? 0.f : volume_max.z) - origin.z) / dir.z; return fmax ( fmax (txmin, tymin), tzmin); } __device__ __forceinline__ float getMaxTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmax = ( (dir.x > 0 ? volume_max.x : 0.f) - origin.x) / dir.x; float tymax = ( (dir.y > 0 ? volume_max.y : 0.f) - origin.y) / dir.y; float tzmax = ( (dir.z > 0 ? volume_max.z : 0.f) - origin.z) / dir.z; return fmin (fmin (txmax, tymax), tzmax); } struct RayCaster { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 }; Mat33 Rcurr; float3 tcurr; float time_step; float3 volume_size; float3 cell_size; int cols, rows; PtrStep<short2> volume; Intr intr; mutable PtrStep<float> nmap; mutable PtrStep<float> vmap; __device__ __forceinline__ float3 get_ray_next (int x, int y) const { float3 ray_next; ray_next.x = (x - intr.cx) / intr.fx; ray_next.y = (y - intr.cy) / intr.fy; ray_next.z = 1; return ray_next; } __device__ __forceinline__ bool checkInds (const int3& g) const { return (g.x >= 0 && g.y >= 0 && g.z >= 0 && g.x < VOLUME_X && g.y < VOLUME_Y && g.z < VOLUME_Z); } __device__ __forceinline__ float readTsdf (int x, int y, int z, pcl::gpu::tsdf_buffer buffer) const { const short2* tmp_pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]); short2* pos = const_cast<short2*> (tmp_pos); shift_tsdf_pointer(&pos, buffer); return unpack_tsdf (*pos); } __device__ __forceinline__ int3 getVoxel (float3 point) const { int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity int vy = __float2int_rd (point.y / cell_size.y); int vz = __float2int_rd (point.z / cell_size.z); return make_int3 (vx, vy, vz); } __device__ __forceinline__ float interpolateTrilineary (const float3& origin, const float3& dir, float time, pcl::gpu::tsdf_buffer buffer) const { return interpolateTrilineary (origin + dir * time, buffer); } __device__ __forceinline__ float interpolateTrilineary (const float3& point, pcl::gpu::tsdf_buffer buffer) const { int3 g = getVoxel (point); if (g.x <= 0 || g.x >= buffer.voxels_size.x - 1) return numeric_limits<float>::quiet_NaN (); if (g.y <= 0 || g.y >= buffer.voxels_size.y - 1) return numeric_limits<float>::quiet_NaN (); if (g.z <= 0 || g.z >= buffer.voxels_size.z - 1) return numeric_limits<float>::quiet_NaN (); float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; g.x = (point.x < vx) ? (g.x - 1) : g.x; g.y = (point.y < vy) ? (g.y - 1) : g.y; g.z = (point.z < vz) ? (g.z - 1) : g.z; float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float res = readTsdf (g.x + 0, g.y + 0, g.z + 0, buffer) * (1 - a) * (1 - b) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1, buffer) * (1 - a) * (1 - b) * c + readTsdf (g.x + 0, g.y + 1, g.z + 0, buffer) * (1 - a) * b * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1, buffer) * (1 - a) * b * c + readTsdf (g.x + 1, g.y + 0, g.z + 0, buffer) * a * (1 - b) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1, buffer) * a * (1 - b) * c + readTsdf (g.x + 1, g.y + 1, g.z + 0, buffer) * a * b * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1, buffer) * a * b * c; return res; } __device__ __forceinline__ void operator () (pcl::gpu::tsdf_buffer buffer) const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; if (x >= cols || y >= rows) return; vmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN (); nmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN (); float3 ray_start = tcurr; float3 ray_next = Rcurr * get_ray_next (x, y) + tcurr; float3 ray_dir = normalized (ray_next - ray_start); //ensure that it isn't a degenerate case ray_dir.x = (ray_dir.x == 0.f) ? 1e-15 : ray_dir.x; ray_dir.y = (ray_dir.y == 0.f) ? 1e-15 : ray_dir.y; ray_dir.z = (ray_dir.z == 0.f) ? 1e-15 : ray_dir.z; // computer time when entry and exit volume float time_start_volume = getMinTime (volume_size, ray_start, ray_dir); float time_exit_volume = getMaxTime (volume_size, ray_start, ray_dir); const float min_dist = 0.f; //in meters time_start_volume = fmax (time_start_volume, min_dist); if (time_start_volume >= time_exit_volume) return; float time_curr = time_start_volume; int3 g = getVoxel (ray_start + ray_dir * time_curr); g.x = max (0, min (g.x, buffer.voxels_size.x - 1)); g.y = max (0, min (g.y, buffer.voxels_size.y - 1)); g.z = max (0, min (g.z, buffer.voxels_size.z - 1)); float tsdf = readTsdf (g.x, g.y, g.z, buffer); //infinite loop guard const float max_time = 3 * (volume_size.x + volume_size.y + volume_size.z); for (; time_curr < max_time; time_curr += time_step) { float tsdf_prev = tsdf; int3 g = getVoxel ( ray_start + ray_dir * (time_curr + time_step) ); if (!checkInds (g)) break; tsdf = readTsdf (g.x, g.y, g.z, buffer); if (tsdf_prev < 0.f && tsdf > 0.f) break; if (tsdf_prev > 0.f && tsdf < 0.f) //zero crossing { float Ftdt = interpolateTrilineary (ray_start, ray_dir, time_curr + time_step, buffer); if (isnan (Ftdt)) break; float Ft = interpolateTrilineary (ray_start, ray_dir, time_curr, buffer); if (isnan (Ft)) break; //float Ts = time_curr - time_step * Ft/(Ftdt - Ft); float Ts = time_curr - time_step * Ft / (Ftdt - Ft); float3 vetex_found = ray_start + ray_dir * Ts; vmap.ptr (y )[x] = vetex_found.x; vmap.ptr (y + rows)[x] = vetex_found.y; vmap.ptr (y + 2 * rows)[x] = vetex_found.z; int3 g = getVoxel ( ray_start + ray_dir * time_curr ); if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < buffer.voxels_size.x - 2 && g.y < buffer.voxels_size.y - 2 && g.z < buffer.voxels_size.z - 2) { float3 t; float3 n; t = vetex_found; t.x += cell_size.x; float Fx1 = interpolateTrilineary (t, buffer); t = vetex_found; t.x -= cell_size.x; float Fx2 = interpolateTrilineary (t, buffer); n.x = (Fx1 - Fx2); t = vetex_found; t.y += cell_size.y; float Fy1 = interpolateTrilineary (t, buffer); t = vetex_found; t.y -= cell_size.y; float Fy2 = interpolateTrilineary (t, buffer); n.y = (Fy1 - Fy2); t = vetex_found; t.z += cell_size.z; float Fz1 = interpolateTrilineary (t, buffer); t = vetex_found; t.z -= cell_size.z; float Fz2 = interpolateTrilineary (t, buffer); n.z = (Fz1 - Fz2); n = normalized (n); nmap.ptr (y )[x] = n.x; nmap.ptr (y + rows)[x] = n.y; nmap.ptr (y + 2 * rows)[x] = n.z; } break; } } /* for(;;) */ } }; __global__ void rayCastKernel (const RayCaster rc, pcl::gpu::tsdf_buffer buffer) { rc (buffer); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr, float tranc_dist, const float3& volume_size, const PtrStep<short2>& volume, const pcl::gpu::tsdf_buffer* buffer, MapArr& vmap, MapArr& nmap) { RayCaster rc; rc.Rcurr = Rcurr; rc.tcurr = tcurr; rc.time_step = tranc_dist * 0.8f; rc.volume_size = volume_size; rc.cell_size.x = volume_size.x / buffer->voxels_size.x; rc.cell_size.y = volume_size.y / buffer->voxels_size.y; rc.cell_size.z = volume_size.z / buffer->voxels_size.z; rc.cols = vmap.cols (); rc.rows = vmap.rows () / 3; rc.intr = intr; rc.volume = volume; rc.vmap = vmap; rc.nmap = nmap; dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y); dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y)); hipLaunchKernelGGL(( rayCastKernel), dim3(grid), dim3(block), 0, 0, rc, *buffer); cudaSafeCall (hipGetLastError ()); //cudaSafeCall(hipDeviceSynchronize()); }
9287117048f613a9345696b9f89ffe685077a345.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" namespace pcl { namespace device { __device__ __forceinline__ float getMinTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmin = ( (dir.x > 0 ? 0.f : volume_max.x) - origin.x) / dir.x; float tymin = ( (dir.y > 0 ? 0.f : volume_max.y) - origin.y) / dir.y; float tzmin = ( (dir.z > 0 ? 0.f : volume_max.z) - origin.z) / dir.z; return fmax ( fmax (txmin, tymin), tzmin); } __device__ __forceinline__ float getMaxTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmax = ( (dir.x > 0 ? volume_max.x : 0.f) - origin.x) / dir.x; float tymax = ( (dir.y > 0 ? volume_max.y : 0.f) - origin.y) / dir.y; float tzmax = ( (dir.z > 0 ? volume_max.z : 0.f) - origin.z) / dir.z; return fmin (fmin (txmax, tymax), tzmax); } struct RayCaster { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 }; Mat33 Rcurr; float3 tcurr; float time_step; float3 volume_size; float3 cell_size; int cols, rows; PtrStep<short2> volume; Intr intr; mutable PtrStep<float> nmap; mutable PtrStep<float> vmap; __device__ __forceinline__ float3 get_ray_next (int x, int y) const { float3 ray_next; ray_next.x = (x - intr.cx) / intr.fx; ray_next.y = (y - intr.cy) / intr.fy; ray_next.z = 1; return ray_next; } __device__ __forceinline__ bool checkInds (const int3& g) const { return (g.x >= 0 && g.y >= 0 && g.z >= 0 && g.x < VOLUME_X && g.y < VOLUME_Y && g.z < VOLUME_Z); } __device__ __forceinline__ float readTsdf (int x, int y, int z, pcl::gpu::tsdf_buffer buffer) const { const short2* tmp_pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]); short2* pos = const_cast<short2*> (tmp_pos); shift_tsdf_pointer(&pos, buffer); return unpack_tsdf (*pos); } __device__ __forceinline__ int3 getVoxel (float3 point) const { int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity int vy = __float2int_rd (point.y / cell_size.y); int vz = __float2int_rd (point.z / cell_size.z); return make_int3 (vx, vy, vz); } __device__ __forceinline__ float interpolateTrilineary (const float3& origin, const float3& dir, float time, pcl::gpu::tsdf_buffer buffer) const { return interpolateTrilineary (origin + dir * time, buffer); } __device__ __forceinline__ float interpolateTrilineary (const float3& point, pcl::gpu::tsdf_buffer buffer) const { int3 g = getVoxel (point); if (g.x <= 0 || g.x >= buffer.voxels_size.x - 1) return numeric_limits<float>::quiet_NaN (); if (g.y <= 0 || g.y >= buffer.voxels_size.y - 1) return numeric_limits<float>::quiet_NaN (); if (g.z <= 0 || g.z >= buffer.voxels_size.z - 1) return numeric_limits<float>::quiet_NaN (); float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; g.x = (point.x < vx) ? (g.x - 1) : g.x; g.y = (point.y < vy) ? (g.y - 1) : g.y; g.z = (point.z < vz) ? (g.z - 1) : g.z; float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float res = readTsdf (g.x + 0, g.y + 0, g.z + 0, buffer) * (1 - a) * (1 - b) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1, buffer) * (1 - a) * (1 - b) * c + readTsdf (g.x + 0, g.y + 1, g.z + 0, buffer) * (1 - a) * b * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1, buffer) * (1 - a) * b * c + readTsdf (g.x + 1, g.y + 0, g.z + 0, buffer) * a * (1 - b) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1, buffer) * a * (1 - b) * c + readTsdf (g.x + 1, g.y + 1, g.z + 0, buffer) * a * b * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1, buffer) * a * b * c; return res; } __device__ __forceinline__ void operator () (pcl::gpu::tsdf_buffer buffer) const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; if (x >= cols || y >= rows) return; vmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN (); nmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN (); float3 ray_start = tcurr; float3 ray_next = Rcurr * get_ray_next (x, y) + tcurr; float3 ray_dir = normalized (ray_next - ray_start); //ensure that it isn't a degenerate case ray_dir.x = (ray_dir.x == 0.f) ? 1e-15 : ray_dir.x; ray_dir.y = (ray_dir.y == 0.f) ? 1e-15 : ray_dir.y; ray_dir.z = (ray_dir.z == 0.f) ? 1e-15 : ray_dir.z; // computer time when entry and exit volume float time_start_volume = getMinTime (volume_size, ray_start, ray_dir); float time_exit_volume = getMaxTime (volume_size, ray_start, ray_dir); const float min_dist = 0.f; //in meters time_start_volume = fmax (time_start_volume, min_dist); if (time_start_volume >= time_exit_volume) return; float time_curr = time_start_volume; int3 g = getVoxel (ray_start + ray_dir * time_curr); g.x = max (0, min (g.x, buffer.voxels_size.x - 1)); g.y = max (0, min (g.y, buffer.voxels_size.y - 1)); g.z = max (0, min (g.z, buffer.voxels_size.z - 1)); float tsdf = readTsdf (g.x, g.y, g.z, buffer); //infinite loop guard const float max_time = 3 * (volume_size.x + volume_size.y + volume_size.z); for (; time_curr < max_time; time_curr += time_step) { float tsdf_prev = tsdf; int3 g = getVoxel ( ray_start + ray_dir * (time_curr + time_step) ); if (!checkInds (g)) break; tsdf = readTsdf (g.x, g.y, g.z, buffer); if (tsdf_prev < 0.f && tsdf > 0.f) break; if (tsdf_prev > 0.f && tsdf < 0.f) //zero crossing { float Ftdt = interpolateTrilineary (ray_start, ray_dir, time_curr + time_step, buffer); if (isnan (Ftdt)) break; float Ft = interpolateTrilineary (ray_start, ray_dir, time_curr, buffer); if (isnan (Ft)) break; //float Ts = time_curr - time_step * Ft/(Ftdt - Ft); float Ts = time_curr - time_step * Ft / (Ftdt - Ft); float3 vetex_found = ray_start + ray_dir * Ts; vmap.ptr (y )[x] = vetex_found.x; vmap.ptr (y + rows)[x] = vetex_found.y; vmap.ptr (y + 2 * rows)[x] = vetex_found.z; int3 g = getVoxel ( ray_start + ray_dir * time_curr ); if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < buffer.voxels_size.x - 2 && g.y < buffer.voxels_size.y - 2 && g.z < buffer.voxels_size.z - 2) { float3 t; float3 n; t = vetex_found; t.x += cell_size.x; float Fx1 = interpolateTrilineary (t, buffer); t = vetex_found; t.x -= cell_size.x; float Fx2 = interpolateTrilineary (t, buffer); n.x = (Fx1 - Fx2); t = vetex_found; t.y += cell_size.y; float Fy1 = interpolateTrilineary (t, buffer); t = vetex_found; t.y -= cell_size.y; float Fy2 = interpolateTrilineary (t, buffer); n.y = (Fy1 - Fy2); t = vetex_found; t.z += cell_size.z; float Fz1 = interpolateTrilineary (t, buffer); t = vetex_found; t.z -= cell_size.z; float Fz2 = interpolateTrilineary (t, buffer); n.z = (Fz1 - Fz2); n = normalized (n); nmap.ptr (y )[x] = n.x; nmap.ptr (y + rows)[x] = n.y; nmap.ptr (y + 2 * rows)[x] = n.z; } break; } } /* for(;;) */ } }; __global__ void rayCastKernel (const RayCaster rc, pcl::gpu::tsdf_buffer buffer) { rc (buffer); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr, float tranc_dist, const float3& volume_size, const PtrStep<short2>& volume, const pcl::gpu::tsdf_buffer* buffer, MapArr& vmap, MapArr& nmap) { RayCaster rc; rc.Rcurr = Rcurr; rc.tcurr = tcurr; rc.time_step = tranc_dist * 0.8f; rc.volume_size = volume_size; rc.cell_size.x = volume_size.x / buffer->voxels_size.x; rc.cell_size.y = volume_size.y / buffer->voxels_size.y; rc.cell_size.z = volume_size.z / buffer->voxels_size.z; rc.cols = vmap.cols (); rc.rows = vmap.rows () / 3; rc.intr = intr; rc.volume = volume; rc.vmap = vmap; rc.nmap = nmap; dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y); dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y)); rayCastKernel<<<grid, block>>>(rc, *buffer); cudaSafeCall (cudaGetLastError ()); //cudaSafeCall(cudaDeviceSynchronize()); }
eccf03986ff127632230784cfed0190d1757f6c1.hip
// !!! This is a file automatically generated by hipify!!! /** This code is adapted from research by Jayadharini Jaiganesh and Martin Burtscher See below for their license info: ECL-CC code: ECL-CC is a connected components graph algorithm. The CUDA implementation thereof is quite fast. It operates on graphs stored in binary CSR format. Copyright (c) 2017-2020, Texas State University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Jayadharini Jaiganesh and Martin Burtscher URL: The latest version of this code is available at https://userweb.cs.txstate.edu/~burtscher/research/ECL-CC/. Publication: This work is described in detail in the following paper. Jayadharini Jaiganesh and Martin Burtscher. A High-Performance Connected Components Implementation for GPUs. Proceedings of the 2018 ACM International Symposium on High-Performance Parallel and Distributed Computing, pp. 92-104. June 2018. */ #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> static const int Device = 0; static const int ThreadsPerBlock = 256; static const int warpsize = 32; static __device__ int topL, posL, topH, posH; /* initialize with first smaller neighbor ID */ static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock) void init(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat) { const int from = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int incr = gridDim.x * ThreadsPerBlock; for (int v = from; v < nodes; v += incr) { const int beg = nidx[v]; const int end = nidx[v + 1]; int m = v; int i = beg; while ((m == v) && (i < end)) { m = min(m, nlist[i]); i++; } nstat[v] = m; } if (from == 0) {topL = 0; posL = 0; topH = nodes - 1; posH = nodes - 1;} } /* intermediate pointer jumping */ static inline __device__ int representative(const int idx, int* const __restrict__ nstat) { int curr = nstat[idx]; if (curr != idx) { int next, prev = idx; while (curr > (next = nstat[curr])) { nstat[prev] = next; prev = curr; curr = next; } } return curr; } /* process low-degree vertices at thread granularity and fill worklists */ static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock) void compute1(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat, int* const __restrict__ wl) { const int from = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int incr = gridDim.x * ThreadsPerBlock; for (int v = from; v < nodes; v += incr) { const int vstat = nstat[v]; if (v != vstat) { const int beg = nidx[v]; const int end = nidx[v + 1]; int deg = end - beg; if (deg > 16) { int idx; if (deg <= 352) { idx = atomicAdd(&topL, 1); } else { idx = atomicAdd(&topH, -1); } wl[idx] = v; } else { int vstat = representative(v, nstat); for (int i = beg; i < end; i++) { const int nli = nlist[i]; if (v > nli) { int ostat = representative(nli, nstat); bool repeat; do { repeat = false; if (vstat != ostat) { int ret; if (vstat < ostat) { if ((ret = atomicCAS(&nstat[ostat], ostat, vstat)) != ostat) { ostat = ret; repeat = true; } } else { if ((ret = atomicCAS(&nstat[vstat], vstat, ostat)) != vstat) { vstat = ret; repeat = true; } } } } while (repeat); } } } } } } /* process medium-degree vertices at warp granularity */ static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock) void compute2(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat, const int* const __restrict__ wl) { const int lane = threadIdx.x % warpsize; int idx; if (lane == 0) idx = atomicAdd(&posL, 1); idx = __shfl_sync(0xffffffff, idx, 0); while (idx < topL) { const int v = wl[idx]; int vstat = representative(v, nstat); for (int i = nidx[v] + lane; i < nidx[v + 1]; i += warpsize) { const int nli = nlist[i]; if (v > nli) { int ostat = representative(nli, nstat); bool repeat; do { repeat = false; if (vstat != ostat) { int ret; if (vstat < ostat) { if ((ret = atomicCAS(&nstat[ostat], ostat, vstat)) != ostat) { ostat = ret; repeat = true; } } else { if ((ret = atomicCAS(&nstat[vstat], vstat, ostat)) != vstat) { vstat = ret; repeat = true; } } } } while (repeat); } } if (lane == 0) idx = atomicAdd(&posL, 1); idx = __shfl_sync(0xffffffff, idx, 0); } } /* process high-degree vertices at block granularity */ static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock) void compute3(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat, const int* const __restrict__ wl) { __shared__ int vB; if (threadIdx.x == 0) { const int idx = atomicAdd(&posH, -1); vB = (idx > topH) ? wl[idx] : -1; } __syncthreads(); while (vB >= 0) { const int v = vB; __syncthreads(); int vstat = representative(v, nstat); for (int i = nidx[v] + threadIdx.x; i < nidx[v + 1]; i += ThreadsPerBlock) { const int nli = nlist[i]; if (v > nli) { int ostat = representative(nli, nstat); bool repeat; do { repeat = false; if (vstat != ostat) { int ret; if (vstat < ostat) { if ((ret = atomicCAS(&nstat[ostat], ostat, vstat)) != ostat) { ostat = ret; repeat = true; } } else { if ((ret = atomicCAS(&nstat[vstat], vstat, ostat)) != vstat) { vstat = ret; repeat = true; } } } } while (repeat); } } if (threadIdx.x == 0) { const int idx = atomicAdd(&posH, -1); vB = (idx > topH) ? wl[idx] : -1; } __syncthreads(); } } /* link all vertices to sink */ static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock) void flatten(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat) { const int from = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int incr = gridDim.x * ThreadsPerBlock; for (int v = from; v < nodes; v += incr) { int next, vstat = nstat[v]; const int old = vstat; while (vstat > (next = nstat[vstat])) { vstat = next; } if (old != vstat) nstat[v] = vstat; } } static void computeCC(const int nodes, const int edges, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat) { hipSetDevice(Device); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, Device); if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {fprintf(stderr, "ERROR: there is no CUDA capable device\n\n"); exit(-1);} const int SMs = deviceProp.multiProcessorCount; const int mTSM = deviceProp.maxThreadsPerMultiProcessor; int* nidx_d; int* nlist_d; int* nstat_d; int* wl_d; if (hipSuccess != hipMalloc((void **)&nidx_d, (nodes + 1) * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate nidx_d\n\n"); exit(-1);} if (hipSuccess != hipMalloc((void **)&nlist_d, edges * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate nlist_d\n\n"); exit(-1);} if (hipSuccess != hipMalloc((void **)&nstat_d, nodes * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate nstat_d,\n\n"); exit(-1);} if (hipSuccess != hipMalloc((void **)&wl_d, nodes * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate wl_d,\n\n"); exit(-1);} if (hipSuccess != hipMemcpy(nidx_d, nidx, (nodes + 1) * sizeof(int), hipMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n\n"); exit(-1);} if (hipSuccess != hipMemcpy(nlist_d, nlist, edges * sizeof(int), hipMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n\n"); exit(-1);} hipFuncSetCacheConfig(init, hipFuncCachePreferL1); hipFuncSetCacheConfig(compute1, hipFuncCachePreferL1); hipFuncSetCacheConfig(compute2, hipFuncCachePreferL1); hipFuncSetCacheConfig(compute3, hipFuncCachePreferL1); hipFuncSetCacheConfig(flatten, hipFuncCachePreferL1); const int blocks = SMs * mTSM / ThreadsPerBlock; hipLaunchKernelGGL(( init), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, nodes, nidx_d, nlist_d, nstat_d); hipLaunchKernelGGL(( compute1), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, nodes, nidx_d, nlist_d, nstat_d, wl_d); hipLaunchKernelGGL(( compute2), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, nodes, nidx_d, nlist_d, nstat_d, wl_d); hipLaunchKernelGGL(( compute3), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, nodes, nidx_d, nlist_d, nstat_d, wl_d); hipLaunchKernelGGL(( flatten), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, nodes, nidx_d, nlist_d, nstat_d); if (hipSuccess != hipMemcpy(nstat, nstat_d, nodes * sizeof(int), hipMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n\n"); exit(-1);} hipFree(wl_d); hipFree(nstat_d); hipFree(nlist_d); hipFree(nidx_d); }
eccf03986ff127632230784cfed0190d1757f6c1.cu
/** This code is adapted from research by Jayadharini Jaiganesh and Martin Burtscher See below for their license info: ECL-CC code: ECL-CC is a connected components graph algorithm. The CUDA implementation thereof is quite fast. It operates on graphs stored in binary CSR format. Copyright (c) 2017-2020, Texas State University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Jayadharini Jaiganesh and Martin Burtscher URL: The latest version of this code is available at https://userweb.cs.txstate.edu/~burtscher/research/ECL-CC/. Publication: This work is described in detail in the following paper. Jayadharini Jaiganesh and Martin Burtscher. A High-Performance Connected Components Implementation for GPUs. Proceedings of the 2018 ACM International Symposium on High-Performance Parallel and Distributed Computing, pp. 92-104. June 2018. */ #include <stdlib.h> #include <stdio.h> #include <cuda.h> static const int Device = 0; static const int ThreadsPerBlock = 256; static const int warpsize = 32; static __device__ int topL, posL, topH, posH; /* initialize with first smaller neighbor ID */ static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock) void init(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat) { const int from = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int incr = gridDim.x * ThreadsPerBlock; for (int v = from; v < nodes; v += incr) { const int beg = nidx[v]; const int end = nidx[v + 1]; int m = v; int i = beg; while ((m == v) && (i < end)) { m = min(m, nlist[i]); i++; } nstat[v] = m; } if (from == 0) {topL = 0; posL = 0; topH = nodes - 1; posH = nodes - 1;} } /* intermediate pointer jumping */ static inline __device__ int representative(const int idx, int* const __restrict__ nstat) { int curr = nstat[idx]; if (curr != idx) { int next, prev = idx; while (curr > (next = nstat[curr])) { nstat[prev] = next; prev = curr; curr = next; } } return curr; } /* process low-degree vertices at thread granularity and fill worklists */ static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock) void compute1(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat, int* const __restrict__ wl) { const int from = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int incr = gridDim.x * ThreadsPerBlock; for (int v = from; v < nodes; v += incr) { const int vstat = nstat[v]; if (v != vstat) { const int beg = nidx[v]; const int end = nidx[v + 1]; int deg = end - beg; if (deg > 16) { int idx; if (deg <= 352) { idx = atomicAdd(&topL, 1); } else { idx = atomicAdd(&topH, -1); } wl[idx] = v; } else { int vstat = representative(v, nstat); for (int i = beg; i < end; i++) { const int nli = nlist[i]; if (v > nli) { int ostat = representative(nli, nstat); bool repeat; do { repeat = false; if (vstat != ostat) { int ret; if (vstat < ostat) { if ((ret = atomicCAS(&nstat[ostat], ostat, vstat)) != ostat) { ostat = ret; repeat = true; } } else { if ((ret = atomicCAS(&nstat[vstat], vstat, ostat)) != vstat) { vstat = ret; repeat = true; } } } } while (repeat); } } } } } } /* process medium-degree vertices at warp granularity */ static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock) void compute2(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat, const int* const __restrict__ wl) { const int lane = threadIdx.x % warpsize; int idx; if (lane == 0) idx = atomicAdd(&posL, 1); idx = __shfl_sync(0xffffffff, idx, 0); while (idx < topL) { const int v = wl[idx]; int vstat = representative(v, nstat); for (int i = nidx[v] + lane; i < nidx[v + 1]; i += warpsize) { const int nli = nlist[i]; if (v > nli) { int ostat = representative(nli, nstat); bool repeat; do { repeat = false; if (vstat != ostat) { int ret; if (vstat < ostat) { if ((ret = atomicCAS(&nstat[ostat], ostat, vstat)) != ostat) { ostat = ret; repeat = true; } } else { if ((ret = atomicCAS(&nstat[vstat], vstat, ostat)) != vstat) { vstat = ret; repeat = true; } } } } while (repeat); } } if (lane == 0) idx = atomicAdd(&posL, 1); idx = __shfl_sync(0xffffffff, idx, 0); } } /* process high-degree vertices at block granularity */ static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock) void compute3(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat, const int* const __restrict__ wl) { __shared__ int vB; if (threadIdx.x == 0) { const int idx = atomicAdd(&posH, -1); vB = (idx > topH) ? wl[idx] : -1; } __syncthreads(); while (vB >= 0) { const int v = vB; __syncthreads(); int vstat = representative(v, nstat); for (int i = nidx[v] + threadIdx.x; i < nidx[v + 1]; i += ThreadsPerBlock) { const int nli = nlist[i]; if (v > nli) { int ostat = representative(nli, nstat); bool repeat; do { repeat = false; if (vstat != ostat) { int ret; if (vstat < ostat) { if ((ret = atomicCAS(&nstat[ostat], ostat, vstat)) != ostat) { ostat = ret; repeat = true; } } else { if ((ret = atomicCAS(&nstat[vstat], vstat, ostat)) != vstat) { vstat = ret; repeat = true; } } } } while (repeat); } } if (threadIdx.x == 0) { const int idx = atomicAdd(&posH, -1); vB = (idx > topH) ? wl[idx] : -1; } __syncthreads(); } } /* link all vertices to sink */ static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock) void flatten(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat) { const int from = threadIdx.x + blockIdx.x * ThreadsPerBlock; const int incr = gridDim.x * ThreadsPerBlock; for (int v = from; v < nodes; v += incr) { int next, vstat = nstat[v]; const int old = vstat; while (vstat > (next = nstat[vstat])) { vstat = next; } if (old != vstat) nstat[v] = vstat; } } static void computeCC(const int nodes, const int edges, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat) { cudaSetDevice(Device); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, Device); if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {fprintf(stderr, "ERROR: there is no CUDA capable device\n\n"); exit(-1);} const int SMs = deviceProp.multiProcessorCount; const int mTSM = deviceProp.maxThreadsPerMultiProcessor; int* nidx_d; int* nlist_d; int* nstat_d; int* wl_d; if (cudaSuccess != cudaMalloc((void **)&nidx_d, (nodes + 1) * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate nidx_d\n\n"); exit(-1);} if (cudaSuccess != cudaMalloc((void **)&nlist_d, edges * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate nlist_d\n\n"); exit(-1);} if (cudaSuccess != cudaMalloc((void **)&nstat_d, nodes * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate nstat_d,\n\n"); exit(-1);} if (cudaSuccess != cudaMalloc((void **)&wl_d, nodes * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate wl_d,\n\n"); exit(-1);} if (cudaSuccess != cudaMemcpy(nidx_d, nidx, (nodes + 1) * sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n\n"); exit(-1);} if (cudaSuccess != cudaMemcpy(nlist_d, nlist, edges * sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n\n"); exit(-1);} cudaFuncSetCacheConfig(init, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(compute1, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(compute2, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(compute3, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(flatten, cudaFuncCachePreferL1); const int blocks = SMs * mTSM / ThreadsPerBlock; init<<<blocks, ThreadsPerBlock>>>(nodes, nidx_d, nlist_d, nstat_d); compute1<<<blocks, ThreadsPerBlock>>>(nodes, nidx_d, nlist_d, nstat_d, wl_d); compute2<<<blocks, ThreadsPerBlock>>>(nodes, nidx_d, nlist_d, nstat_d, wl_d); compute3<<<blocks, ThreadsPerBlock>>>(nodes, nidx_d, nlist_d, nstat_d, wl_d); flatten<<<blocks, ThreadsPerBlock>>>(nodes, nidx_d, nlist_d, nstat_d); if (cudaSuccess != cudaMemcpy(nstat, nstat_d, nodes * sizeof(int), cudaMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n\n"); exit(-1);} cudaFree(wl_d); cudaFree(nstat_d); cudaFree(nlist_d); cudaFree(nidx_d); }
13ce391352f3297a361cd945f65fac28d96a086c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************* * CLATCH.cu * KORAL * * Author: Kareem Omar * [email protected] * https://github.com/komrad36 * * Last updated Dec 27, 2016 *******************************************************************/ // // ## Summary ## // KORAL is a novel, extremely fast, highly accurate, scale- and // rotation-invariant, CPU-GPU cooperative detector-descriptor. // // Detection is based on the author's custom multi-scale KFAST corner // detector, with rapid bilinear interpolation performed by the GPU // asynchronously while the CPU works on KFAST. // // ## Usage ## // Basic use of KORAL is extremely easy, although, of course, for a // larger high-performance pipeline, users will benefit from // calling KORAL functions directly and modifying it to suit their needs. // // To detect and describe, simply #include "KORAL.h" and // then do: // // KORAL koral(scale_factor, scale_levels); // koral.go(image, width, height, KFAST_threshold); // // where scale_factor is the factor by which each scale leve // is reduced from the previous, scale_levels is the total // number of such scale levels used, image is a pointer to // uint8_t (grayscale) image data, and KFAST_threshold // is the threshold supplied to the KFAST feature detector. // // After this call, keypoints are avaiable in a vector at // koral.kps, while descriptors are available at // koral.desc. // // Portions of KORAL require SSE, AVX, AVX2, and CUDA. // The author is working on reduced-performance versions // with lesser requirements, but as the intent of this work // is primarily novel performance capability, modern // hardware and this full version are highly recommended. // // Description is performed by the GPU using the novel CLATCH // (CUDA LATCH) binary descriptor kernel. // // Rotation invariance is provided by a novel vectorized // SSE angle weight detector. // // All components have been written and carefully tuned by the author // for maximum performance and have no external dependencies. Some have // been modified for integration into KORAL, // but the original standalone projects are all availble on // the author's GitHub (https://github.com/komrad36). // // These individual components are: // -KFAST (https://github.com/komrad36/KFAST) // -CUDALERP (https://github.com/komrad36/CUDALERP) // -FeatureAngle (https://github.com/komrad36/FeatureAngle) // -CLATCH (https://github.com/komrad36/CLATCH) // // In addition, the natural next step of matching descriptors // is available in the author's currently separate // project, CUDAK2NN (https://github.com/komrad36/CUDAK2NN). // // A key insight responsible for much of the performance of // this insanely fast system is due to Christopher Parker // (https://github.com/csp256), to whom I am extremely grateful. // // The file 'main.cpp' is a simple test driver // illustrating example usage.It requires OpenCV // for image read and keypoint display.KORAL itself, // however, does not require OpenCV or any other // external dependencies. // // Note that KORAL is a work in progress. // Suggestions and improvements are welcomed. // // ## License ## // The FAST detector was created by Edward Rosten and Tom Drummond // as described in the 2006 paper by Rosten and Drummond: // "Machine learning for high-speed corner detection" // Edward Rosten and Tom Drummond // https://www.edwardrosten.com/work/rosten_2006_machine.pdf // // The FAST detector is BSD licensed: // // Copyright(c) 2006, 2008, 2009, 2010 Edward Rosten // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met : // // // *Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // *Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and / or other materials provided with the distribution. // // *Neither the name of the University of Cambridge nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // // // // KORAL is licensed under the MIT License : https://opensource.org/licenses/mit-license.php // // Copyright(c) 2016 Kareem Omar, Christopher Parker // // Permission is hereby granted, free of charge, // to any person obtaining a copy of this software and associated documentation // files(the "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, distribute, // sublicense, and / or sell copies of the Software, and to permit persons to whom // the Software is furnished to do so, subject to the following conditions : // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, // INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A // PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT // HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // // // Note again that KORAL is a work in progress. // Suggestions and improvements are welcomed. // #include "koralROS/CLATCH.h" __global__ void #ifndef __INTELLISENSE__ __launch_bounds__(512, 4) #endif CLATCH_kernel(const hipTextureObject_t d_all_tex[8], const hipTextureObject_t d_triplets, const Keypoint* const __restrict d_kps, uint32_t* const __restrict__ d_desc) { volatile __shared__ uint8_t s_ROI[4608]; const Keypoint pt = d_kps[blockIdx.x]; const hipTextureObject_t d_img_tex = d_all_tex[pt.scale]; const float s = sin(pt.angle), c = cos(pt.angle); for (int32_t i = 0; i <= 48; i += 16) { for (int32_t k = 0; k <= 32; k += 32) { const float x_offset = static_cast<float>(static_cast<int>(threadIdx.x) + k - 32); const float y_offset = static_cast<float>(static_cast<int>(threadIdx.y) + i - 32); s_ROI[(threadIdx.y + i) * 72 + threadIdx.x + k] = tex2D<uint8_t>(d_img_tex, static_cast<int>((pt.x + (x_offset*c - y_offset*s)) + 0.5f), static_cast<int>((pt.y + (x_offset*s + y_offset*c)) + 0.5f)); } } uint32_t ROI_base = 144 * (threadIdx.x & 3) + (threadIdx.x >> 2), triplet_base = threadIdx.y << 5, desc = 0; __syncthreads(); for (int32_t i = 0; i < 4; ++i, triplet_base += 8) { int32_t accum[8]; for (uint32_t j = 0; j < 8; ++j) { const ushort4 t = tex1D<ushort4>(d_triplets, triplet_base + j); const int32_t b1 = s_ROI[ROI_base + t.y], b2 = s_ROI[ROI_base + t.y + 72] ; const int32_t a1 = s_ROI[ROI_base + t.x] - b1, a2 = s_ROI[ROI_base + t.x + 72] - b2; const int32_t c1 = s_ROI[ROI_base + t.z] - b1, c2 = s_ROI[ROI_base + t.z + 72] - b2; accum[j] = a1 * a1 - c1 * c1 + a2 * a2 - c2 * c2; } for (int32_t k = 1; k <= 4; k <<= 1) { for (int32_t s = 0; s < 8; s += k) accum[s] += __shfl_xor(accum[s], k); if (threadIdx.x & k) for (int32_t s = 0; s < 8; s += k << 1) accum[s] = accum[s + k]; } accum[0] += __shfl_xor(accum[0], 8); desc |= (accum[0] + __shfl_xor(accum[0], 16) < 0) << ((i << 3) + (threadIdx.x & 7)); } for (int32_t s = 1; s <= 4; s <<= 1) desc |= __shfl_xor(desc, s); if (threadIdx.x == 0) d_desc[(blockIdx.x << 4) + threadIdx.y] = desc; } void CLATCH(hipTextureObject_t d_all_tex[8], const hipTextureObject_t d_triplets, const Keypoint* const __restrict d_kps, const int num_kps, uint64_t* const __restrict d_desc) { hipLaunchKernelGGL(( CLATCH_kernel), dim3(num_kps), dim3({ 32), 16 } , 0, d_all_tex, d_triplets, d_kps, reinterpret_cast<uint32_t*>(d_desc)); }
13ce391352f3297a361cd945f65fac28d96a086c.cu
/******************************************************************* * CLATCH.cu * KORAL * * Author: Kareem Omar * [email protected] * https://github.com/komrad36 * * Last updated Dec 27, 2016 *******************************************************************/ // // ## Summary ## // KORAL is a novel, extremely fast, highly accurate, scale- and // rotation-invariant, CPU-GPU cooperative detector-descriptor. // // Detection is based on the author's custom multi-scale KFAST corner // detector, with rapid bilinear interpolation performed by the GPU // asynchronously while the CPU works on KFAST. // // ## Usage ## // Basic use of KORAL is extremely easy, although, of course, for a // larger high-performance pipeline, users will benefit from // calling KORAL functions directly and modifying it to suit their needs. // // To detect and describe, simply #include "KORAL.h" and // then do: // // KORAL koral(scale_factor, scale_levels); // koral.go(image, width, height, KFAST_threshold); // // where scale_factor is the factor by which each scale leve // is reduced from the previous, scale_levels is the total // number of such scale levels used, image is a pointer to // uint8_t (grayscale) image data, and KFAST_threshold // is the threshold supplied to the KFAST feature detector. // // After this call, keypoints are avaiable in a vector at // koral.kps, while descriptors are available at // koral.desc. // // Portions of KORAL require SSE, AVX, AVX2, and CUDA. // The author is working on reduced-performance versions // with lesser requirements, but as the intent of this work // is primarily novel performance capability, modern // hardware and this full version are highly recommended. // // Description is performed by the GPU using the novel CLATCH // (CUDA LATCH) binary descriptor kernel. // // Rotation invariance is provided by a novel vectorized // SSE angle weight detector. // // All components have been written and carefully tuned by the author // for maximum performance and have no external dependencies. Some have // been modified for integration into KORAL, // but the original standalone projects are all availble on // the author's GitHub (https://github.com/komrad36). // // These individual components are: // -KFAST (https://github.com/komrad36/KFAST) // -CUDALERP (https://github.com/komrad36/CUDALERP) // -FeatureAngle (https://github.com/komrad36/FeatureAngle) // -CLATCH (https://github.com/komrad36/CLATCH) // // In addition, the natural next step of matching descriptors // is available in the author's currently separate // project, CUDAK2NN (https://github.com/komrad36/CUDAK2NN). // // A key insight responsible for much of the performance of // this insanely fast system is due to Christopher Parker // (https://github.com/csp256), to whom I am extremely grateful. // // The file 'main.cpp' is a simple test driver // illustrating example usage.It requires OpenCV // for image read and keypoint display.KORAL itself, // however, does not require OpenCV or any other // external dependencies. // // Note that KORAL is a work in progress. // Suggestions and improvements are welcomed. // // ## License ## // The FAST detector was created by Edward Rosten and Tom Drummond // as described in the 2006 paper by Rosten and Drummond: // "Machine learning for high-speed corner detection" // Edward Rosten and Tom Drummond // https://www.edwardrosten.com/work/rosten_2006_machine.pdf // // The FAST detector is BSD licensed: // // Copyright(c) 2006, 2008, 2009, 2010 Edward Rosten // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met : // // // *Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // *Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and / or other materials provided with the distribution. // // *Neither the name of the University of Cambridge nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // // // // KORAL is licensed under the MIT License : https://opensource.org/licenses/mit-license.php // // Copyright(c) 2016 Kareem Omar, Christopher Parker // // Permission is hereby granted, free of charge, // to any person obtaining a copy of this software and associated documentation // files(the "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, distribute, // sublicense, and / or sell copies of the Software, and to permit persons to whom // the Software is furnished to do so, subject to the following conditions : // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, // INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A // PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT // HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // // // Note again that KORAL is a work in progress. // Suggestions and improvements are welcomed. // #include "koralROS/CLATCH.h" __global__ void #ifndef __INTELLISENSE__ __launch_bounds__(512, 4) #endif CLATCH_kernel(const cudaTextureObject_t d_all_tex[8], const cudaTextureObject_t d_triplets, const Keypoint* const __restrict d_kps, uint32_t* const __restrict__ d_desc) { volatile __shared__ uint8_t s_ROI[4608]; const Keypoint pt = d_kps[blockIdx.x]; const cudaTextureObject_t d_img_tex = d_all_tex[pt.scale]; const float s = sin(pt.angle), c = cos(pt.angle); for (int32_t i = 0; i <= 48; i += 16) { for (int32_t k = 0; k <= 32; k += 32) { const float x_offset = static_cast<float>(static_cast<int>(threadIdx.x) + k - 32); const float y_offset = static_cast<float>(static_cast<int>(threadIdx.y) + i - 32); s_ROI[(threadIdx.y + i) * 72 + threadIdx.x + k] = tex2D<uint8_t>(d_img_tex, static_cast<int>((pt.x + (x_offset*c - y_offset*s)) + 0.5f), static_cast<int>((pt.y + (x_offset*s + y_offset*c)) + 0.5f)); } } uint32_t ROI_base = 144 * (threadIdx.x & 3) + (threadIdx.x >> 2), triplet_base = threadIdx.y << 5, desc = 0; __syncthreads(); for (int32_t i = 0; i < 4; ++i, triplet_base += 8) { int32_t accum[8]; for (uint32_t j = 0; j < 8; ++j) { const ushort4 t = tex1D<ushort4>(d_triplets, triplet_base + j); const int32_t b1 = s_ROI[ROI_base + t.y], b2 = s_ROI[ROI_base + t.y + 72] ; const int32_t a1 = s_ROI[ROI_base + t.x] - b1, a2 = s_ROI[ROI_base + t.x + 72] - b2; const int32_t c1 = s_ROI[ROI_base + t.z] - b1, c2 = s_ROI[ROI_base + t.z + 72] - b2; accum[j] = a1 * a1 - c1 * c1 + a2 * a2 - c2 * c2; } for (int32_t k = 1; k <= 4; k <<= 1) { for (int32_t s = 0; s < 8; s += k) accum[s] += __shfl_xor(accum[s], k); if (threadIdx.x & k) for (int32_t s = 0; s < 8; s += k << 1) accum[s] = accum[s + k]; } accum[0] += __shfl_xor(accum[0], 8); desc |= (accum[0] + __shfl_xor(accum[0], 16) < 0) << ((i << 3) + (threadIdx.x & 7)); } for (int32_t s = 1; s <= 4; s <<= 1) desc |= __shfl_xor(desc, s); if (threadIdx.x == 0) d_desc[(blockIdx.x << 4) + threadIdx.y] = desc; } void CLATCH(cudaTextureObject_t d_all_tex[8], const cudaTextureObject_t d_triplets, const Keypoint* const __restrict d_kps, const int num_kps, uint64_t* const __restrict d_desc) { CLATCH_kernel<<<num_kps, { 32, 16 } >>>(d_all_tex, d_triplets, d_kps, reinterpret_cast<uint32_t*>(d_desc)); }
a26ceebd4630b3cdcb18d71adc37fef16757e738.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_t2; int xdim0_update_halo_kernel1_t2_h = -1; __constant__ int ydim0_update_halo_kernel1_t2; int ydim0_update_halo_kernel1_t2_h = -1; __constant__ int xdim1_update_halo_kernel1_t2; int xdim1_update_halo_kernel1_t2_h = -1; __constant__ int ydim1_update_halo_kernel1_t2; int ydim1_update_halo_kernel1_t2_h = -1; __constant__ int xdim2_update_halo_kernel1_t2; int xdim2_update_halo_kernel1_t2_h = -1; __constant__ int ydim2_update_halo_kernel1_t2; int ydim2_update_halo_kernel1_t2_h = -1; __constant__ int xdim3_update_halo_kernel1_t2; int xdim3_update_halo_kernel1_t2_h = -1; __constant__ int ydim3_update_halo_kernel1_t2; int ydim3_update_halo_kernel1_t2_h = -1; __constant__ int xdim4_update_halo_kernel1_t2; int xdim4_update_halo_kernel1_t2_h = -1; __constant__ int ydim4_update_halo_kernel1_t2; int ydim4_update_halo_kernel1_t2_h = -1; __constant__ int xdim5_update_halo_kernel1_t2; int xdim5_update_halo_kernel1_t2_h = -1; __constant__ int ydim5_update_halo_kernel1_t2; int ydim5_update_halo_kernel1_t2_h = -1; __constant__ int xdim6_update_halo_kernel1_t2; int xdim6_update_halo_kernel1_t2_h = -1; __constant__ int ydim6_update_halo_kernel1_t2; int ydim6_update_halo_kernel1_t2_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_t2*(y)+xdim0_update_halo_kernel1_t2*ydim0_update_halo_kernel1_t2*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_t2*(y)+xdim1_update_halo_kernel1_t2*ydim1_update_halo_kernel1_t2*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_t2*(y)+xdim2_update_halo_kernel1_t2*ydim2_update_halo_kernel1_t2*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_t2*(y)+xdim3_update_halo_kernel1_t2*ydim3_update_halo_kernel1_t2*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_t2*(y)+xdim4_update_halo_kernel1_t2*ydim4_update_halo_kernel1_t2*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_t2*(y)+xdim5_update_halo_kernel1_t2*ydim5_update_halo_kernel1_t2*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_t2*(y)+xdim6_update_halo_kernel1_t2*ydim6_update_halo_kernel1_t2*(z)) //user function __device__ inline void update_halo_kernel1_t2_gpu(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed , const int* fields) { if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(0,-3,0)]; if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(0,-3,0)]; if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(0,-3,0)]; if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(0,-3,0)]; if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(0,-3,0)]; if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(0,-3,0)]; if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(0,-3,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_t2( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, double* __restrict arg6, const int* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_t2 + idx_z * 1*1 * xdim0_update_halo_kernel1_t2 * ydim0_update_halo_kernel1_t2; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_t2 + idx_z * 1*1 * xdim1_update_halo_kernel1_t2 * ydim1_update_halo_kernel1_t2; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_t2 + idx_z * 1*1 * xdim2_update_halo_kernel1_t2 * ydim2_update_halo_kernel1_t2; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_t2 + idx_z * 1*1 * xdim3_update_halo_kernel1_t2 * ydim3_update_halo_kernel1_t2; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_t2 + idx_z * 1*1 * xdim4_update_halo_kernel1_t2 * ydim4_update_halo_kernel1_t2; arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_t2 + idx_z * 1*1 * xdim5_update_halo_kernel1_t2 * ydim5_update_halo_kernel1_t2; arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_t2 + idx_z * 1*1 * xdim6_update_halo_kernel1_t2 * ydim6_update_halo_kernel1_t2; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_t2_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_t2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_t2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif //Timing double t1,t2,c1,c2; ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,8,range,14)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(14,"update_halo_kernel1_t2"); OPS_kernels[14].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel1_t2_h || ydim0 != ydim0_update_halo_kernel1_t2_h || xdim1 != xdim1_update_halo_kernel1_t2_h || ydim1 != ydim1_update_halo_kernel1_t2_h || xdim2 != xdim2_update_halo_kernel1_t2_h || ydim2 != ydim2_update_halo_kernel1_t2_h || xdim3 != xdim3_update_halo_kernel1_t2_h || ydim3 != ydim3_update_halo_kernel1_t2_h || xdim4 != xdim4_update_halo_kernel1_t2_h || ydim4 != ydim4_update_halo_kernel1_t2_h || xdim5 != xdim5_update_halo_kernel1_t2_h || ydim5 != ydim5_update_halo_kernel1_t2_h || xdim6 != xdim6_update_halo_kernel1_t2_h || ydim6 != ydim6_update_halo_kernel1_t2_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel1_t2, &xdim0, sizeof(int) ); xdim0_update_halo_kernel1_t2_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel1_t2, &ydim0, sizeof(int) ); ydim0_update_halo_kernel1_t2_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel1_t2, &xdim1, sizeof(int) ); xdim1_update_halo_kernel1_t2_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel1_t2, &ydim1, sizeof(int) ); ydim1_update_halo_kernel1_t2_h = ydim1; hipMemcpyToSymbol( xdim2_update_halo_kernel1_t2, &xdim2, sizeof(int) ); xdim2_update_halo_kernel1_t2_h = xdim2; hipMemcpyToSymbol( ydim2_update_halo_kernel1_t2, &ydim2, sizeof(int) ); ydim2_update_halo_kernel1_t2_h = ydim2; hipMemcpyToSymbol( xdim3_update_halo_kernel1_t2, &xdim3, sizeof(int) ); xdim3_update_halo_kernel1_t2_h = xdim3; hipMemcpyToSymbol( ydim3_update_halo_kernel1_t2, &ydim3, sizeof(int) ); ydim3_update_halo_kernel1_t2_h = ydim3; hipMemcpyToSymbol( xdim4_update_halo_kernel1_t2, &xdim4, sizeof(int) ); xdim4_update_halo_kernel1_t2_h = xdim4; hipMemcpyToSymbol( ydim4_update_halo_kernel1_t2, &ydim4, sizeof(int) ); ydim4_update_halo_kernel1_t2_h = ydim4; hipMemcpyToSymbol( xdim5_update_halo_kernel1_t2, &xdim5, sizeof(int) ); xdim5_update_halo_kernel1_t2_h = xdim5; hipMemcpyToSymbol( ydim5_update_halo_kernel1_t2, &ydim5, sizeof(int) ); ydim5_update_halo_kernel1_t2_h = ydim5; hipMemcpyToSymbol( xdim6_update_halo_kernel1_t2, &xdim6, sizeof(int) ); xdim6_update_halo_kernel1_t2_h = xdim6; hipMemcpyToSymbol( ydim6_update_halo_kernel1_t2, &ydim6, sizeof(int) ); ydim6_update_halo_kernel1_t2_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[14].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel1_t2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[14].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); ops_set_halo_dirtybit3(&args[6],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[14].mpi_time += t2-t1; OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_t2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 14; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 14; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg*)malloc(8*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_t2_execute; if (OPS_diags > 1) { ops_timing_realloc(14,"update_halo_kernel1_t2"); } ops_enqueue_kernel(desc); } #endif
a26ceebd4630b3cdcb18d71adc37fef16757e738.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_t2; int xdim0_update_halo_kernel1_t2_h = -1; __constant__ int ydim0_update_halo_kernel1_t2; int ydim0_update_halo_kernel1_t2_h = -1; __constant__ int xdim1_update_halo_kernel1_t2; int xdim1_update_halo_kernel1_t2_h = -1; __constant__ int ydim1_update_halo_kernel1_t2; int ydim1_update_halo_kernel1_t2_h = -1; __constant__ int xdim2_update_halo_kernel1_t2; int xdim2_update_halo_kernel1_t2_h = -1; __constant__ int ydim2_update_halo_kernel1_t2; int ydim2_update_halo_kernel1_t2_h = -1; __constant__ int xdim3_update_halo_kernel1_t2; int xdim3_update_halo_kernel1_t2_h = -1; __constant__ int ydim3_update_halo_kernel1_t2; int ydim3_update_halo_kernel1_t2_h = -1; __constant__ int xdim4_update_halo_kernel1_t2; int xdim4_update_halo_kernel1_t2_h = -1; __constant__ int ydim4_update_halo_kernel1_t2; int ydim4_update_halo_kernel1_t2_h = -1; __constant__ int xdim5_update_halo_kernel1_t2; int xdim5_update_halo_kernel1_t2_h = -1; __constant__ int ydim5_update_halo_kernel1_t2; int ydim5_update_halo_kernel1_t2_h = -1; __constant__ int xdim6_update_halo_kernel1_t2; int xdim6_update_halo_kernel1_t2_h = -1; __constant__ int ydim6_update_halo_kernel1_t2; int ydim6_update_halo_kernel1_t2_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_t2*(y)+xdim0_update_halo_kernel1_t2*ydim0_update_halo_kernel1_t2*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_t2*(y)+xdim1_update_halo_kernel1_t2*ydim1_update_halo_kernel1_t2*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_t2*(y)+xdim2_update_halo_kernel1_t2*ydim2_update_halo_kernel1_t2*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_t2*(y)+xdim3_update_halo_kernel1_t2*ydim3_update_halo_kernel1_t2*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_t2*(y)+xdim4_update_halo_kernel1_t2*ydim4_update_halo_kernel1_t2*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_t2*(y)+xdim5_update_halo_kernel1_t2*ydim5_update_halo_kernel1_t2*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_t2*(y)+xdim6_update_halo_kernel1_t2*ydim6_update_halo_kernel1_t2*(z)) //user function __device__ inline void update_halo_kernel1_t2_gpu(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed , const int* fields) { if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(0,-3,0)]; if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(0,-3,0)]; if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(0,-3,0)]; if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(0,-3,0)]; if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(0,-3,0)]; if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(0,-3,0)]; if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(0,-3,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_t2( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, double* __restrict arg6, const int* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_t2 + idx_z * 1*1 * xdim0_update_halo_kernel1_t2 * ydim0_update_halo_kernel1_t2; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_t2 + idx_z * 1*1 * xdim1_update_halo_kernel1_t2 * ydim1_update_halo_kernel1_t2; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_t2 + idx_z * 1*1 * xdim2_update_halo_kernel1_t2 * ydim2_update_halo_kernel1_t2; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_t2 + idx_z * 1*1 * xdim3_update_halo_kernel1_t2 * ydim3_update_halo_kernel1_t2; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_t2 + idx_z * 1*1 * xdim4_update_halo_kernel1_t2 * ydim4_update_halo_kernel1_t2; arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_t2 + idx_z * 1*1 * xdim5_update_halo_kernel1_t2 * ydim5_update_halo_kernel1_t2; arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_t2 + idx_z * 1*1 * xdim6_update_halo_kernel1_t2 * ydim6_update_halo_kernel1_t2; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_t2_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_t2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_t2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif //Timing double t1,t2,c1,c2; ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,8,range,14)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(14,"update_halo_kernel1_t2"); OPS_kernels[14].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel1_t2_h || ydim0 != ydim0_update_halo_kernel1_t2_h || xdim1 != xdim1_update_halo_kernel1_t2_h || ydim1 != ydim1_update_halo_kernel1_t2_h || xdim2 != xdim2_update_halo_kernel1_t2_h || ydim2 != ydim2_update_halo_kernel1_t2_h || xdim3 != xdim3_update_halo_kernel1_t2_h || ydim3 != ydim3_update_halo_kernel1_t2_h || xdim4 != xdim4_update_halo_kernel1_t2_h || ydim4 != ydim4_update_halo_kernel1_t2_h || xdim5 != xdim5_update_halo_kernel1_t2_h || ydim5 != ydim5_update_halo_kernel1_t2_h || xdim6 != xdim6_update_halo_kernel1_t2_h || ydim6 != ydim6_update_halo_kernel1_t2_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel1_t2, &xdim0, sizeof(int) ); xdim0_update_halo_kernel1_t2_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel1_t2, &ydim0, sizeof(int) ); ydim0_update_halo_kernel1_t2_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel1_t2, &xdim1, sizeof(int) ); xdim1_update_halo_kernel1_t2_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel1_t2, &ydim1, sizeof(int) ); ydim1_update_halo_kernel1_t2_h = ydim1; cudaMemcpyToSymbol( xdim2_update_halo_kernel1_t2, &xdim2, sizeof(int) ); xdim2_update_halo_kernel1_t2_h = xdim2; cudaMemcpyToSymbol( ydim2_update_halo_kernel1_t2, &ydim2, sizeof(int) ); ydim2_update_halo_kernel1_t2_h = ydim2; cudaMemcpyToSymbol( xdim3_update_halo_kernel1_t2, &xdim3, sizeof(int) ); xdim3_update_halo_kernel1_t2_h = xdim3; cudaMemcpyToSymbol( ydim3_update_halo_kernel1_t2, &ydim3, sizeof(int) ); ydim3_update_halo_kernel1_t2_h = ydim3; cudaMemcpyToSymbol( xdim4_update_halo_kernel1_t2, &xdim4, sizeof(int) ); xdim4_update_halo_kernel1_t2_h = xdim4; cudaMemcpyToSymbol( ydim4_update_halo_kernel1_t2, &ydim4, sizeof(int) ); ydim4_update_halo_kernel1_t2_h = ydim4; cudaMemcpyToSymbol( xdim5_update_halo_kernel1_t2, &xdim5, sizeof(int) ); xdim5_update_halo_kernel1_t2_h = xdim5; cudaMemcpyToSymbol( ydim5_update_halo_kernel1_t2, &ydim5, sizeof(int) ); ydim5_update_halo_kernel1_t2_h = ydim5; cudaMemcpyToSymbol( xdim6_update_halo_kernel1_t2, &xdim6, sizeof(int) ); xdim6_update_halo_kernel1_t2_h = xdim6; cudaMemcpyToSymbol( ydim6_update_halo_kernel1_t2, &ydim6, sizeof(int) ); ydim6_update_halo_kernel1_t2_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[14].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel1_t2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[14].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); ops_set_halo_dirtybit3(&args[6],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[14].mpi_time += t2-t1; OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_t2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 14; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 14; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg*)malloc(8*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_t2_execute; if (OPS_diags > 1) { ops_timing_realloc(14,"update_halo_kernel1_t2"); } ops_enqueue_kernel(desc); } #endif
777a8c9886d6597203a3ade67b69d617db8cef7a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/device.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_ROCM template <typename Dtype> __global__ void AdaGradUpdate(int N, Dtype* g, Dtype* h, Dtype delta, Dtype local_rate) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float hi = h[i] = h[i] + gi*gi; g[i] = local_rate * gi / (sqrt(hi) + delta); } } #endif template <typename Dtype> void adagrad_update_gpu(device* dev, int_tp N, Dtype* g, Dtype* h, Dtype delta, Dtype local_rate) { if (dev->backend() == BACKEND_CUDA) { #ifdef USE_ROCM AdaGradUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS) ( N, g, h, delta, local_rate); CUDA_POST_KERNEL_CHECK; #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context(dev->id()); viennacl::ocl::program &program = dev->template program<Dtype>(); viennacl::ocl::kernel &oclk_ada_grad_update = program.get_kernel( CL_KERNEL_SELECT("ada_grad_update")); viennacl::ocl::enqueue( oclk_ada_grad_update(N, WrapHandle((cl_mem) g, &ctx), WrapHandle((cl_mem) h, &ctx), fixup_arg_type(delta), fixup_arg_type(local_rate)), ctx.get_queue()); #endif // USE_GREENTEA } } #ifdef HAS_HALF_SUPPORT template void adagrad_update_gpu<half>(device*, int_tp, half*, half*, half, half); #endif template void adagrad_update_gpu<float>(device*, int_tp, float*, float*, float, float); template void adagrad_update_gpu<double>(device*, int_tp, double*, double*, double, double); } // namespace caffe
777a8c9886d6597203a3ade67b69d617db8cef7a.cu
#include "caffe/device.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_CUDA template <typename Dtype> __global__ void AdaGradUpdate(int N, Dtype* g, Dtype* h, Dtype delta, Dtype local_rate) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float hi = h[i] = h[i] + gi*gi; g[i] = local_rate * gi / (sqrt(hi) + delta); } } #endif template <typename Dtype> void adagrad_update_gpu(device* dev, int_tp N, Dtype* g, Dtype* h, Dtype delta, Dtype local_rate) { if (dev->backend() == BACKEND_CUDA) { #ifdef USE_CUDA AdaGradUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS) ( N, g, h, delta, local_rate); CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context(dev->id()); viennacl::ocl::program &program = dev->template program<Dtype>(); viennacl::ocl::kernel &oclk_ada_grad_update = program.get_kernel( CL_KERNEL_SELECT("ada_grad_update")); viennacl::ocl::enqueue( oclk_ada_grad_update(N, WrapHandle((cl_mem) g, &ctx), WrapHandle((cl_mem) h, &ctx), fixup_arg_type(delta), fixup_arg_type(local_rate)), ctx.get_queue()); #endif // USE_GREENTEA } } #ifdef HAS_HALF_SUPPORT template void adagrad_update_gpu<half>(device*, int_tp, half*, half*, half, half); #endif template void adagrad_update_gpu<float>(device*, int_tp, float*, float*, float, float); template void adagrad_update_gpu<double>(device*, int_tp, double*, double*, double, double); } // namespace caffe
a83e355a786b9c0e870f4d24c87d5da8a075d780.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "z_GMT.cuh" // CPU version of GMT (see .cuh file) // TODO: what if MCMP were all on the CPU except for the CP computation? // now the coding is much simpler, and I can use vectors more easily /*********************** CPU functions ***********************/ float z_GMT(float rn, int initIdx, int goalIdx, float dt, int numMCParticles, float *h, int *nnGoSizes, int *nnGoEdges, int *nnIdxs, int maxNNSize, bool *isFreeSamples, bool *isFreeEdges, int numDisc, int obstaclesCount, float *d_topts, float *topts, float *d_copts, float *copts, float *d_offsets, float offsetMult, std::vector<int> P, int *Pcounts, int *pathPrev, int *pathNode, float *pathCost, float *pathTime, int maxPathCount, std::vector<int> G, int *sizeG, float *d_pathTime, int *wavefrontPathPrev, int *wavefrontNodeNext, int *wavefrontEdge, int *d_wavefrontPathPrev, int *d_wavefrontNodeNext, int *d_wavefrontEdge, float lambda, int numBuckets, float *samples, float *obstacles, float *d_obstaclesInflated, int Tmax, float *splitPath, int *pathLength, float *costGoal) { // std::cout << "________________ Beginning Z_GMT ________________" << std::endl; double t_ccgmtStart = std::clock(); float dr = lambda*rn; int numPaths = 1; // setup initial path int Gidx = 0; bool goalCondition = false; bool emptyOpenSet = false; P[initIdx*NUM + 0] = 0; // path 0 is at initIdx Pcounts[initIdx]++; pathPrev[0] = -2; // denote end pathNode[0] = initIdx; pathCost[0] = 0; pathTime[0] = 0; sizeG[Gidx]++; G[0] = 0; float costThreshold = h[initIdx]; int maxItrs = 20; int itrs = 0; // *************************** exploration loop *************************** while (itrs < maxItrs && !goalCondition && !emptyOpenSet) { // cutoff at solution exists with cp = cpMinSoln or expansion is empty ++itrs; // std::cout << "************** starting iteration " << itrs << " with " << sizeG[Gidx] << " paths" << std::endl; int numNewPaths = 0; for (int g = 0; g < sizeG[Gidx]; ++g) { int pathIdxPrev = G[Gidx*maxPathCount + g]; // path to expand from G[Gidx*maxPathCount + g] = -1; // clear out this g int nodeIdxPrev = pathNode[pathIdxPrev]; for (int nn = 0; nn < nnGoSizes[nodeIdxPrev]; ++nn) { int nodeIdxNext = nnGoEdges[nodeIdxPrev*maxNNSize + nn]; // node to expand to int edgeIdx = nnIdxs[nodeIdxNext*NUM + nodeIdxPrev]; // edge index connecting prev to next // check if edge is collision free and the sample is free if (!isFreeEdges[edgeIdx] || !isFreeSamples[nodeIdxNext]) continue; wavefrontPathPrev[numNewPaths] = pathIdxPrev; wavefrontNodeNext[numNewPaths] = nodeIdxNext; wavefrontEdge[numNewPaths] = edgeIdx; numNewPaths++; if (numNewPaths > maxPathCount) { return -1; } } } if (numPaths + numNewPaths >= maxPathCount) { std::cout << "maxPathCount reached, increase max number of paths" << std::endl; return -1; } sizeG[Gidx] = 0; // reset G size // copy necessary info to GPU hipDeviceSynchronize(); CUDA_ERROR_CHECK(hipMemcpy(d_wavefrontPathPrev, wavefrontPathPrev, sizeof(int)*numNewPaths, hipMemcpyHostToDevice)); CUDA_ERROR_CHECK(hipMemcpy(d_wavefrontEdge, wavefrontEdge, sizeof(int)*numNewPaths, hipMemcpyHostToDevice)); hipDeviceSynchronize(); // calculate CP (do half plane checks per particle, then sum) // copy over path times for (int i = 0; i < numNewPaths; ++i) { pathTime[numPaths + i] = pathTime[wavefrontPathPrev[i]] + topts[wavefrontEdge[i]]; pathCost[numPaths + i] = pathCost[wavefrontPathPrev[i]] + copts[wavefrontEdge[i]]; // cost = time currently } // ************************************** dominance check ************************************** // load all new nodes into P int PnewCount[NUM]; for (int i = 0; i < NUM; ++i) PnewCount[i] = 0; for (int i = 0; i < numNewPaths; ++i) { int nodeIdx = wavefrontNodeNext[i]; int pathIdx = numPaths + i; P[nodeIdx*NUM + Pcounts[nodeIdx] + PnewCount[nodeIdx]] = pathIdx; PnewCount[nodeIdx]++; } // check new paths against stored paths for (int i = 0; i < numNewPaths; ++i) { int nodeIdx = wavefrontNodeNext[i]; // already eliminated or at the goal idx if (wavefrontNodeNext[i] == -1) // || nodeIdx == goalIdx) continue; int pathIdx = numPaths + i; for (int j = 0; j < Pcounts[nodeIdx] + PnewCount[nodeIdx]; ++j) { int pathIdxCompare = P[nodeIdx*NUM + j]; // don't compare to self if (pathIdxCompare == pathIdx) continue; // comparison if (pathCost[pathIdxCompare] < pathCost[pathIdx]) { // check if paths are co-dominant, then keep the one with a lower path number if (pathCost[pathIdxCompare] >= pathCost[pathIdx] && pathIdx < pathIdxCompare) { continue; } wavefrontNodeNext[i] = -1; // mark for removal break; } } } // ************************************** store good paths ************************************** int numValidNewPaths = 0; for (int i = 0; i < numNewPaths; ++i) { int nodeIdx = wavefrontNodeNext[i]; int pathIdx = numPaths + i; // TODO: if i break here, decrement path index and path count if (wavefrontNodeNext[i] == -1 || wavefrontNodeNext[i] == 0) { // either node is at the init or marked as bad wavefrontNodeNext[i] = -1; // clear continue; } int pathIdxStore = numPaths + numValidNewPaths; pathTime[pathIdxStore] = pathTime[pathIdx]; pathCost[pathIdxStore] = pathCost[pathIdx]; float dCost = (pathCost[pathIdxStore] + h[nodeIdx] - costThreshold); dCost = ::max((float) 0, dCost); // if below zero, put into the next bucket int bucketIdx = (((int) (dCost / dr)) + Gidx + 1) % numBuckets; G[bucketIdx*maxPathCount + sizeG[bucketIdx]] = pathIdxStore; sizeG[bucketIdx]++; pathPrev[pathIdxStore] = wavefrontPathPrev[i]; pathNode[pathIdxStore] = wavefrontNodeNext[i]; P[nodeIdx*NUM + Pcounts[nodeIdx]] = pathIdxStore; Pcounts[nodeIdx]++; wavefrontNodeNext[i] = -1; // clear, TODO: uncessary, but nice for debugging purposes to have a fresh array numValidNewPaths++; } numPaths += numValidNewPaths; // update goal condition if (Pcounts[goalIdx] > 0) goalCondition = true; if (goalCondition) { break; } // update empty open set condition emptyOpenSet = true; for (int b = 0; b < numBuckets; ++b) emptyOpenSet = emptyOpenSet && (sizeG[b] == 0); if (emptyOpenSet) { std::cout << "emptyOpenSet met" << std::endl; break; } // update G index Gidx = (Gidx+1) % numBuckets; costThreshold += dr; // end and send out warning if maxPathCount is exceeded if (numPaths >= maxPathCount) { std::cout << "maxPathCount reached, increase max number of paths" << std::endl; return -1; } } // output all paths // find best path with cp < cpTarget, then bisection search // how tight is our solution? int bestPathIdx = -1; float bestPathCost = std::numeric_limits<float>::max(); for (int i = 0; i < Pcounts[goalIdx]; ++i) { int pathIdx = P[goalIdx*NUM + i]; if (goalCondition && bestPathCost > pathCost[pathIdx]) { bestPathCost = pathCost[pathIdx]; bestPathIdx = pathIdx; } // output path std::cout << "nodes = [" << pathNode[pathIdx]; while (pathPrev[pathIdx] != -2) { pathIdx = pathPrev[pathIdx]; std::cout << ", " << pathNode[pathIdx]; } std::cout << "]"; } // validate chosen path, or iterate to next path std::cout << " with cost = " << bestPathCost << std::endl; *costGoal = bestPathCost; if (bestPathCost > 10000) { std::cout << "FAILED TO FIND A PATH" << std::endl; return 0; // return to deflate the obstacles } // load samples into array std::vector<float> xs; // for (int d = 0; d < DIM; ++d) // xs[d] = samples[goalIdx*DIM+d]; xs.clear(); int pathIdx = bestPathIdx; int nodeIdx = pathNode[pathIdx]; int pathNumSamples = 0; while (pathIdx != -2) { ++pathNumSamples; for (int d = DIM-1; d >= 0; --d) xs.insert(xs.begin(), samples[nodeIdx*DIM+d]); pathIdx = pathPrev[pathIdx]; nodeIdx = pathNode[pathIdx]; } // printArray(&(xs[0]),pathNumSamples,DIM,std::cout); // std::cout << "path has " << int(xs.size()) << " elements" << std::endl; // solve 2pbvp float bestPathTopt = findOptimalPath(dt, splitPath, &(xs[0]), pathNumSamples, pathLength); // std::cout << "2pbvp soln is " << std::endl; float *d_path; CUDA_ERROR_CHECK(hipMalloc(&d_path, sizeof(float)*Tmax*DIM)); float *d_obstacles; CUDA_ERROR_CHECK(hipMalloc(&d_obstacles, sizeof(float)*2*obstaclesCount*DIM)); CUDA_ERROR_CHECK(hipMemcpy(d_obstacles, obstacles, sizeof(float)*2*obstaclesCount*DIM, hipMemcpyHostToDevice)); // ************************************** smoothing ************************************** // load up // std::cout << "smoothing" << std::endl; float smoothPath[DIM*Tmax]; // empty array for the smoothed path int smoothPathLength = 0; std::vector<float> xsSmooth(xs.size()); float lastSmoothBelowPath[DIM*Tmax]; // empty array for the smoothed path that was last seen below the CP constraint copyArray(lastSmoothBelowPath, splitPath, DIM*Tmax); int lastSmoothBelowPathLength = *pathLength; std::vector<float> xsLastSmoothBelow(xs.size()); std::copy(xs.begin(), xs.end(), xsLastSmoothBelow.begin()); float lastSmoothBelowCost = 1000000; int dtMult = 10; float optPath[DIM*Tmax*dtMult]; // empty array for the optimal path from the init to goal (i.e. no obstacles) int optPathLength = 0; std::vector<float> xsOpt; // determine the optimal path from start to goal for (int d = 0; d < DIM; ++d) xsOpt.push_back(xs[d]); for (int d = 0; d < DIM; ++d) xsOpt.push_back(xs[(pathNumSamples-1)*DIM+d]); float optPathTopt = findOptimalPath(dt/dtMult, optPath, &(xsOpt[0]), 2, &optPathLength); // std::cout << " nominal path is: " << std::endl; // printArray(splitPath,*pathLength,DIM,std::cout); // find the path points that map to the nominal path std::vector<float> splitPathTopts(pathNumSamples,0); std::vector<int> splitPathIdxs(pathNumSamples,0); for (int i = 0; i < pathNumSamples-1; ++i) splitPathTopts[i+1] = splitPathTopts[i]+toptBisection(&xs[i*DIM], &xs[(i+1)*DIM], 2); for (int i = 0; i < pathNumSamples; ++i) splitPathIdxs[i] = (optPathLength-1)*splitPathTopts[i]/splitPathTopts[splitPathTopts.size()-1]; // std::cout << "found optimal times as: "; // for ( int i = 0; i < splitPathTopts.size(); i++) { // std::cout << splitPathTopts[i] << " "; // } // std::cout << std::endl; // std::cout << "found indexes as: "; // for ( int i = 0; i < splitPathIdxs.size(); i++) { // std::cout << splitPathIdxs[i] << " "; // } // std::cout << " of " << optPathLength; xsOpt.clear(); // std::cout << " means we match: " << std::endl; for ( int i = 0; i < pathNumSamples; i++) { // printArray(&xs[i*DIM],1,DIM,std::cout); // std::cout << " with "; printArray(&optPath[splitPathIdxs[i]*DIM],1,DIM,std::cout); for (int d = 0; d < DIM; ++d) xsOpt.push_back(optPath[splitPathIdxs[i]*DIM+d]); } // std::cout << "verify creation of xsOpt: " << std::endl; // for ( int i = 0; i < xsOpt.size(); i++) { // std::cout << xsOpt[i] << " "; // if ((i + 1) % 6 == 0) // std::cout << std::endl; // } // generate new xsSmooth and enter loop int maxSmoothItrs = 15; int smoothItrs = 0; float alpha = 1.0; float alphaU = 1.0; float alphaL = 0.0; // TODO if exit with maxSmoothItrs, need to default to the last path under the CP constraint // save the max close wise float smoothPathCost = 0; // float solnPathCP = 0; while (smoothItrs < maxSmoothItrs) { for (int i = 0; i < xs.size(); ++i) xsSmooth[i] = (1-alpha)*xs[i] + alpha*xsOpt[i]; smoothPathLength = 0; smoothPathCost = 0; findOptimalPath(dt, smoothPath, &(xsSmooth[0]), pathNumSamples, &smoothPathLength); // ignoring return of topt for (int i = 0; i < pathNumSamples-1; ++i) { float tau = toptBisection(&xsSmooth[i*DIM], &xsSmooth[(i+1)*DIM], 2); smoothPathCost += cost(tau, &xsSmooth[i*DIM], &xsSmooth[(i+1)*DIM]); } CUDA_ERROR_CHECK(hipMemcpy(d_path, smoothPath, sizeof(float)*Tmax*DIM, hipMemcpyHostToDevice)); hipDeviceSynchronize(); bool collisionFree; bool *d_collisionFree; CUDA_ERROR_CHECK(hipMalloc(&d_collisionFree, sizeof(bool))); hipLaunchKernelGGL(( isFreePath), dim3(1),dim3(1), 0, 0, d_obstaclesInflated, obstaclesCount, d_path, smoothPathLength, d_collisionFree); hipDeviceSynchronize(); CUDA_ERROR_CHECK(hipMemcpy(&collisionFree, d_collisionFree, sizeof(bool), hipMemcpyDeviceToHost)); // std::cout << "-------------- iteration " << smoothItrs << ", alpha = " << alpha << // ", col free = " << collisionFree << ", cost = " << smoothPathCost << std::endl; if (!collisionFree) alphaU = alpha; if (collisionFree) alphaL = alpha; alpha = (alphaL + alphaU)/2; // go for path closest to the cp limit if (!collisionFree) { // std::cout << " NOT FREE smoothed path is: " << std::endl; // printArray(smoothPath,smoothPathLength,DIM,std::cout); } if (collisionFree) { std::cout << "NEW BEST PATH!" << std::endl; // std::cout << " FREE smoothed path is: " << std::endl; // printArray(smoothPath,smoothPathLength,DIM,std::cout); copyArray(lastSmoothBelowPath, smoothPath, DIM*Tmax); lastSmoothBelowPathLength = smoothPathLength; std::vector<float> xsLastSmoothBelow(xs.size()); std::copy(xsSmooth.begin(), xsSmooth.end(), xsLastSmoothBelow.begin()); lastSmoothBelowCost = smoothPathCost; } ++smoothItrs; } // std::cout << " smoothed path is: " << std::endl; // printArray(lastSmoothBelowPath,lastSmoothBelowPathLength,DIM,std::cout); copyArray(splitPath, lastSmoothBelowPath, DIM*Tmax); std::cout << "cost = " << lastSmoothBelowCost << std::endl; *costGoal = lastSmoothBelowCost; CUDA_ERROR_CHECK(hipMemcpy(d_path, splitPath, sizeof(float)*Tmax*DIM, hipMemcpyHostToDevice)); double t_CPStart = std::clock(); float cp = collisionProbability(d_obstacles, obstaclesCount, d_offsets, offsetMult, d_path, lastSmoothBelowPathLength); double t_CP = (std::clock() - t_CPStart) / (double) CLOCKS_PER_SEC; // std::cout << "CP took: " << t_CP << std::endl; // std::cout << "Collision Probability = " << cp << std::endl; hipFree(d_path); hipFree(d_obstacles); return cp; } /*********************** GPU kernels ***********************/ // probably just called with one thread, I hate how I am implementing this for the one time use __global__ void isFreePath(float *obstacles, int obstaclesCount, float *path, int pathLength, bool *collisionFree) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= 1) return; float v[DIM], w[DIM]; float bbMin[DIM], bbMax[DIM]; bool motionValid = true; for (int i = 0; i < pathLength-1; ++i) { if (!motionValid) break; for (int d = 0; d < DIM; ++d) { v[d] = path[i*DIM + d]; w[d] = path[(i+1)*DIM + d]; if (v[d] > w[d]) { bbMin[d] = w[d]; bbMax[d] = v[d]; } else { bbMin[d] = v[d]; bbMax[d] = w[d]; } } motionValid = motionValid && isMotionValid(v, w, bbMin, bbMax, obstaclesCount, obstacles, NULL); } collisionFree[0] = motionValid; }
a83e355a786b9c0e870f4d24c87d5da8a075d780.cu
#include "z_GMT.cuh" // CPU version of GMT (see .cuh file) // TODO: what if MCMP were all on the CPU except for the CP computation? // now the coding is much simpler, and I can use vectors more easily /*********************** CPU functions ***********************/ float z_GMT(float rn, int initIdx, int goalIdx, float dt, int numMCParticles, float *h, int *nnGoSizes, int *nnGoEdges, int *nnIdxs, int maxNNSize, bool *isFreeSamples, bool *isFreeEdges, int numDisc, int obstaclesCount, float *d_topts, float *topts, float *d_copts, float *copts, float *d_offsets, float offsetMult, std::vector<int> P, int *Pcounts, int *pathPrev, int *pathNode, float *pathCost, float *pathTime, int maxPathCount, std::vector<int> G, int *sizeG, float *d_pathTime, int *wavefrontPathPrev, int *wavefrontNodeNext, int *wavefrontEdge, int *d_wavefrontPathPrev, int *d_wavefrontNodeNext, int *d_wavefrontEdge, float lambda, int numBuckets, float *samples, float *obstacles, float *d_obstaclesInflated, int Tmax, float *splitPath, int *pathLength, float *costGoal) { // std::cout << "________________ Beginning Z_GMT ________________" << std::endl; double t_ccgmtStart = std::clock(); float dr = lambda*rn; int numPaths = 1; // setup initial path int Gidx = 0; bool goalCondition = false; bool emptyOpenSet = false; P[initIdx*NUM + 0] = 0; // path 0 is at initIdx Pcounts[initIdx]++; pathPrev[0] = -2; // denote end pathNode[0] = initIdx; pathCost[0] = 0; pathTime[0] = 0; sizeG[Gidx]++; G[0] = 0; float costThreshold = h[initIdx]; int maxItrs = 20; int itrs = 0; // *************************** exploration loop *************************** while (itrs < maxItrs && !goalCondition && !emptyOpenSet) { // cutoff at solution exists with cp = cpMinSoln or expansion is empty ++itrs; // std::cout << "************** starting iteration " << itrs << " with " << sizeG[Gidx] << " paths" << std::endl; int numNewPaths = 0; for (int g = 0; g < sizeG[Gidx]; ++g) { int pathIdxPrev = G[Gidx*maxPathCount + g]; // path to expand from G[Gidx*maxPathCount + g] = -1; // clear out this g int nodeIdxPrev = pathNode[pathIdxPrev]; for (int nn = 0; nn < nnGoSizes[nodeIdxPrev]; ++nn) { int nodeIdxNext = nnGoEdges[nodeIdxPrev*maxNNSize + nn]; // node to expand to int edgeIdx = nnIdxs[nodeIdxNext*NUM + nodeIdxPrev]; // edge index connecting prev to next // check if edge is collision free and the sample is free if (!isFreeEdges[edgeIdx] || !isFreeSamples[nodeIdxNext]) continue; wavefrontPathPrev[numNewPaths] = pathIdxPrev; wavefrontNodeNext[numNewPaths] = nodeIdxNext; wavefrontEdge[numNewPaths] = edgeIdx; numNewPaths++; if (numNewPaths > maxPathCount) { return -1; } } } if (numPaths + numNewPaths >= maxPathCount) { std::cout << "maxPathCount reached, increase max number of paths" << std::endl; return -1; } sizeG[Gidx] = 0; // reset G size // copy necessary info to GPU cudaDeviceSynchronize(); CUDA_ERROR_CHECK(cudaMemcpy(d_wavefrontPathPrev, wavefrontPathPrev, sizeof(int)*numNewPaths, cudaMemcpyHostToDevice)); CUDA_ERROR_CHECK(cudaMemcpy(d_wavefrontEdge, wavefrontEdge, sizeof(int)*numNewPaths, cudaMemcpyHostToDevice)); cudaDeviceSynchronize(); // calculate CP (do half plane checks per particle, then sum) // copy over path times for (int i = 0; i < numNewPaths; ++i) { pathTime[numPaths + i] = pathTime[wavefrontPathPrev[i]] + topts[wavefrontEdge[i]]; pathCost[numPaths + i] = pathCost[wavefrontPathPrev[i]] + copts[wavefrontEdge[i]]; // cost = time currently } // ************************************** dominance check ************************************** // load all new nodes into P int PnewCount[NUM]; for (int i = 0; i < NUM; ++i) PnewCount[i] = 0; for (int i = 0; i < numNewPaths; ++i) { int nodeIdx = wavefrontNodeNext[i]; int pathIdx = numPaths + i; P[nodeIdx*NUM + Pcounts[nodeIdx] + PnewCount[nodeIdx]] = pathIdx; PnewCount[nodeIdx]++; } // check new paths against stored paths for (int i = 0; i < numNewPaths; ++i) { int nodeIdx = wavefrontNodeNext[i]; // already eliminated or at the goal idx if (wavefrontNodeNext[i] == -1) // || nodeIdx == goalIdx) continue; int pathIdx = numPaths + i; for (int j = 0; j < Pcounts[nodeIdx] + PnewCount[nodeIdx]; ++j) { int pathIdxCompare = P[nodeIdx*NUM + j]; // don't compare to self if (pathIdxCompare == pathIdx) continue; // comparison if (pathCost[pathIdxCompare] < pathCost[pathIdx]) { // check if paths are co-dominant, then keep the one with a lower path number if (pathCost[pathIdxCompare] >= pathCost[pathIdx] && pathIdx < pathIdxCompare) { continue; } wavefrontNodeNext[i] = -1; // mark for removal break; } } } // ************************************** store good paths ************************************** int numValidNewPaths = 0; for (int i = 0; i < numNewPaths; ++i) { int nodeIdx = wavefrontNodeNext[i]; int pathIdx = numPaths + i; // TODO: if i break here, decrement path index and path count if (wavefrontNodeNext[i] == -1 || wavefrontNodeNext[i] == 0) { // either node is at the init or marked as bad wavefrontNodeNext[i] = -1; // clear continue; } int pathIdxStore = numPaths + numValidNewPaths; pathTime[pathIdxStore] = pathTime[pathIdx]; pathCost[pathIdxStore] = pathCost[pathIdx]; float dCost = (pathCost[pathIdxStore] + h[nodeIdx] - costThreshold); dCost = std::max((float) 0, dCost); // if below zero, put into the next bucket int bucketIdx = (((int) (dCost / dr)) + Gidx + 1) % numBuckets; G[bucketIdx*maxPathCount + sizeG[bucketIdx]] = pathIdxStore; sizeG[bucketIdx]++; pathPrev[pathIdxStore] = wavefrontPathPrev[i]; pathNode[pathIdxStore] = wavefrontNodeNext[i]; P[nodeIdx*NUM + Pcounts[nodeIdx]] = pathIdxStore; Pcounts[nodeIdx]++; wavefrontNodeNext[i] = -1; // clear, TODO: uncessary, but nice for debugging purposes to have a fresh array numValidNewPaths++; } numPaths += numValidNewPaths; // update goal condition if (Pcounts[goalIdx] > 0) goalCondition = true; if (goalCondition) { break; } // update empty open set condition emptyOpenSet = true; for (int b = 0; b < numBuckets; ++b) emptyOpenSet = emptyOpenSet && (sizeG[b] == 0); if (emptyOpenSet) { std::cout << "emptyOpenSet met" << std::endl; break; } // update G index Gidx = (Gidx+1) % numBuckets; costThreshold += dr; // end and send out warning if maxPathCount is exceeded if (numPaths >= maxPathCount) { std::cout << "maxPathCount reached, increase max number of paths" << std::endl; return -1; } } // output all paths // find best path with cp < cpTarget, then bisection search // how tight is our solution? int bestPathIdx = -1; float bestPathCost = std::numeric_limits<float>::max(); for (int i = 0; i < Pcounts[goalIdx]; ++i) { int pathIdx = P[goalIdx*NUM + i]; if (goalCondition && bestPathCost > pathCost[pathIdx]) { bestPathCost = pathCost[pathIdx]; bestPathIdx = pathIdx; } // output path std::cout << "nodes = [" << pathNode[pathIdx]; while (pathPrev[pathIdx] != -2) { pathIdx = pathPrev[pathIdx]; std::cout << ", " << pathNode[pathIdx]; } std::cout << "]"; } // validate chosen path, or iterate to next path std::cout << " with cost = " << bestPathCost << std::endl; *costGoal = bestPathCost; if (bestPathCost > 10000) { std::cout << "FAILED TO FIND A PATH" << std::endl; return 0; // return to deflate the obstacles } // load samples into array std::vector<float> xs; // for (int d = 0; d < DIM; ++d) // xs[d] = samples[goalIdx*DIM+d]; xs.clear(); int pathIdx = bestPathIdx; int nodeIdx = pathNode[pathIdx]; int pathNumSamples = 0; while (pathIdx != -2) { ++pathNumSamples; for (int d = DIM-1; d >= 0; --d) xs.insert(xs.begin(), samples[nodeIdx*DIM+d]); pathIdx = pathPrev[pathIdx]; nodeIdx = pathNode[pathIdx]; } // printArray(&(xs[0]),pathNumSamples,DIM,std::cout); // std::cout << "path has " << int(xs.size()) << " elements" << std::endl; // solve 2pbvp float bestPathTopt = findOptimalPath(dt, splitPath, &(xs[0]), pathNumSamples, pathLength); // std::cout << "2pbvp soln is " << std::endl; float *d_path; CUDA_ERROR_CHECK(cudaMalloc(&d_path, sizeof(float)*Tmax*DIM)); float *d_obstacles; CUDA_ERROR_CHECK(cudaMalloc(&d_obstacles, sizeof(float)*2*obstaclesCount*DIM)); CUDA_ERROR_CHECK(cudaMemcpy(d_obstacles, obstacles, sizeof(float)*2*obstaclesCount*DIM, cudaMemcpyHostToDevice)); // ************************************** smoothing ************************************** // load up // std::cout << "smoothing" << std::endl; float smoothPath[DIM*Tmax]; // empty array for the smoothed path int smoothPathLength = 0; std::vector<float> xsSmooth(xs.size()); float lastSmoothBelowPath[DIM*Tmax]; // empty array for the smoothed path that was last seen below the CP constraint copyArray(lastSmoothBelowPath, splitPath, DIM*Tmax); int lastSmoothBelowPathLength = *pathLength; std::vector<float> xsLastSmoothBelow(xs.size()); std::copy(xs.begin(), xs.end(), xsLastSmoothBelow.begin()); float lastSmoothBelowCost = 1000000; int dtMult = 10; float optPath[DIM*Tmax*dtMult]; // empty array for the optimal path from the init to goal (i.e. no obstacles) int optPathLength = 0; std::vector<float> xsOpt; // determine the optimal path from start to goal for (int d = 0; d < DIM; ++d) xsOpt.push_back(xs[d]); for (int d = 0; d < DIM; ++d) xsOpt.push_back(xs[(pathNumSamples-1)*DIM+d]); float optPathTopt = findOptimalPath(dt/dtMult, optPath, &(xsOpt[0]), 2, &optPathLength); // std::cout << " nominal path is: " << std::endl; // printArray(splitPath,*pathLength,DIM,std::cout); // find the path points that map to the nominal path std::vector<float> splitPathTopts(pathNumSamples,0); std::vector<int> splitPathIdxs(pathNumSamples,0); for (int i = 0; i < pathNumSamples-1; ++i) splitPathTopts[i+1] = splitPathTopts[i]+toptBisection(&xs[i*DIM], &xs[(i+1)*DIM], 2); for (int i = 0; i < pathNumSamples; ++i) splitPathIdxs[i] = (optPathLength-1)*splitPathTopts[i]/splitPathTopts[splitPathTopts.size()-1]; // std::cout << "found optimal times as: "; // for ( int i = 0; i < splitPathTopts.size(); i++) { // std::cout << splitPathTopts[i] << " "; // } // std::cout << std::endl; // std::cout << "found indexes as: "; // for ( int i = 0; i < splitPathIdxs.size(); i++) { // std::cout << splitPathIdxs[i] << " "; // } // std::cout << " of " << optPathLength; xsOpt.clear(); // std::cout << " means we match: " << std::endl; for ( int i = 0; i < pathNumSamples; i++) { // printArray(&xs[i*DIM],1,DIM,std::cout); // std::cout << " with "; printArray(&optPath[splitPathIdxs[i]*DIM],1,DIM,std::cout); for (int d = 0; d < DIM; ++d) xsOpt.push_back(optPath[splitPathIdxs[i]*DIM+d]); } // std::cout << "verify creation of xsOpt: " << std::endl; // for ( int i = 0; i < xsOpt.size(); i++) { // std::cout << xsOpt[i] << " "; // if ((i + 1) % 6 == 0) // std::cout << std::endl; // } // generate new xsSmooth and enter loop int maxSmoothItrs = 15; int smoothItrs = 0; float alpha = 1.0; float alphaU = 1.0; float alphaL = 0.0; // TODO if exit with maxSmoothItrs, need to default to the last path under the CP constraint // save the max close wise float smoothPathCost = 0; // float solnPathCP = 0; while (smoothItrs < maxSmoothItrs) { for (int i = 0; i < xs.size(); ++i) xsSmooth[i] = (1-alpha)*xs[i] + alpha*xsOpt[i]; smoothPathLength = 0; smoothPathCost = 0; findOptimalPath(dt, smoothPath, &(xsSmooth[0]), pathNumSamples, &smoothPathLength); // ignoring return of topt for (int i = 0; i < pathNumSamples-1; ++i) { float tau = toptBisection(&xsSmooth[i*DIM], &xsSmooth[(i+1)*DIM], 2); smoothPathCost += cost(tau, &xsSmooth[i*DIM], &xsSmooth[(i+1)*DIM]); } CUDA_ERROR_CHECK(cudaMemcpy(d_path, smoothPath, sizeof(float)*Tmax*DIM, cudaMemcpyHostToDevice)); cudaDeviceSynchronize(); bool collisionFree; bool *d_collisionFree; CUDA_ERROR_CHECK(cudaMalloc(&d_collisionFree, sizeof(bool))); isFreePath<<<1,1>>>(d_obstaclesInflated, obstaclesCount, d_path, smoothPathLength, d_collisionFree); cudaDeviceSynchronize(); CUDA_ERROR_CHECK(cudaMemcpy(&collisionFree, d_collisionFree, sizeof(bool), cudaMemcpyDeviceToHost)); // std::cout << "-------------- iteration " << smoothItrs << ", alpha = " << alpha << // ", col free = " << collisionFree << ", cost = " << smoothPathCost << std::endl; if (!collisionFree) alphaU = alpha; if (collisionFree) alphaL = alpha; alpha = (alphaL + alphaU)/2; // go for path closest to the cp limit if (!collisionFree) { // std::cout << " NOT FREE smoothed path is: " << std::endl; // printArray(smoothPath,smoothPathLength,DIM,std::cout); } if (collisionFree) { std::cout << "NEW BEST PATH!" << std::endl; // std::cout << " FREE smoothed path is: " << std::endl; // printArray(smoothPath,smoothPathLength,DIM,std::cout); copyArray(lastSmoothBelowPath, smoothPath, DIM*Tmax); lastSmoothBelowPathLength = smoothPathLength; std::vector<float> xsLastSmoothBelow(xs.size()); std::copy(xsSmooth.begin(), xsSmooth.end(), xsLastSmoothBelow.begin()); lastSmoothBelowCost = smoothPathCost; } ++smoothItrs; } // std::cout << " smoothed path is: " << std::endl; // printArray(lastSmoothBelowPath,lastSmoothBelowPathLength,DIM,std::cout); copyArray(splitPath, lastSmoothBelowPath, DIM*Tmax); std::cout << "cost = " << lastSmoothBelowCost << std::endl; *costGoal = lastSmoothBelowCost; CUDA_ERROR_CHECK(cudaMemcpy(d_path, splitPath, sizeof(float)*Tmax*DIM, cudaMemcpyHostToDevice)); double t_CPStart = std::clock(); float cp = collisionProbability(d_obstacles, obstaclesCount, d_offsets, offsetMult, d_path, lastSmoothBelowPathLength); double t_CP = (std::clock() - t_CPStart) / (double) CLOCKS_PER_SEC; // std::cout << "CP took: " << t_CP << std::endl; // std::cout << "Collision Probability = " << cp << std::endl; cudaFree(d_path); cudaFree(d_obstacles); return cp; } /*********************** GPU kernels ***********************/ // probably just called with one thread, I hate how I am implementing this for the one time use __global__ void isFreePath(float *obstacles, int obstaclesCount, float *path, int pathLength, bool *collisionFree) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= 1) return; float v[DIM], w[DIM]; float bbMin[DIM], bbMax[DIM]; bool motionValid = true; for (int i = 0; i < pathLength-1; ++i) { if (!motionValid) break; for (int d = 0; d < DIM; ++d) { v[d] = path[i*DIM + d]; w[d] = path[(i+1)*DIM + d]; if (v[d] > w[d]) { bbMin[d] = w[d]; bbMax[d] = v[d]; } else { bbMin[d] = v[d]; bbMax[d] = w[d]; } } motionValid = motionValid && isMotionValid(v, w, bbMin, bbMax, obstaclesCount, obstacles, NULL); } collisionFree[0] = motionValid; }
e21628f7037fb538e879da4a1448e5b6e38f19fa.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <chrono> #include <hip/hip_runtime.h> #include "bitmap_image.hpp" #define check(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ printf("[ERROR] Failed to run stmt %d, error body: %s\n", __LINE__, hipGetErrorString(err)); \ return -1; } \ } while (0) \ #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 #define BLOCK_SIZE (BLOCK_SIZE_X * BLOCK_SIZE_Y) #define THRESHOLD 20 #define FOUND_MIN 5000 #define min(a, b) ((a) < (b) ? (a) : (b)) __global__ void compute_sad_array( int*__restrict__ sad_array, const unsigned char*__restrict__ image, const unsigned char*__restrict__ kernel, const int sad_array_size, const int image_width, const int image_height, const int kernel_width, const int kernel_height, const int kernel_size) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int sad_result = 0; if (row < image_height && col < image_width) { const int overlap_width = min(image_width - col, kernel_width); const int overlap_height = min(image_height - row, kernel_height); #pragma unroll 4 for (int kr = 0; kr < overlap_height; kr++) { #pragma unroll 4 for (int kc = 0; kc < overlap_width; kc++) { const int image_addr = ((row + kr) * image_width + (col + kc)) * 3; const int kernel_addr = (kr * kernel_width + kc) * 3; const int m_r = (int)(image[image_addr + 0]); const int m_g = (int)(image[image_addr + 1]); const int m_b = (int)(image[image_addr + 2]); const int t_r = (int)(kernel[kernel_addr + 0]); const int t_g = (int)(kernel[kernel_addr + 1]); const int t_b = (int)(kernel[kernel_addr + 2]); const int error = abs(m_r - t_r) + abs(m_g - t_g) + abs(m_b - t_b); sad_result += error; } } int norm_sad = (int)(sad_result / (float)kernel_size); int my_index_in_sad_array = row * image_width + col; if (my_index_in_sad_array < sad_array_size) { sad_array[my_index_in_sad_array] = norm_sad; } } } __global__ void find_min_in_sad_array( const int sad_array_size, const int* __restrict__ sad_array, int* __restrict__ min_sad) { unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = gridDim.x * blockDim.x; unsigned int offset = 0; __shared__ int cache[BLOCK_SIZE]; int temp = FOUND_MIN; while (gid + offset < sad_array_size) { temp = min(temp, sad_array[gid + offset]); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); unsigned int i = blockDim.x / 2; while (i != 0) { if (threadIdx.x < i) cache[threadIdx.x] = min(cache[threadIdx.x], cache[threadIdx.x + i]); __syncthreads(); i /= 2; } // Update global min for each block if (threadIdx.x == 0) atomicMin(min_sad, cache[0]); } __global__ void get_num_of_occurrences( const int sad_array_size, const int*__restrict__ sad_array, const int*__restrict__ min_sad, int*__restrict__ num_occurrences) { unsigned int gid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s; if (gid < sad_array_size) { if (threadIdx.x == 0) s = 0; __syncthreads(); if (sad_array[gid] == *min_sad) atomicAdd(&s, 1); __syncthreads(); // Update global occurance for each block if (threadIdx.x == 0) atomicAdd(num_occurrences, s); } } int main(int argc, char* argv[]) { if (argc != 4) { std::cerr << "Usage: ./main <image> <template image> <repeat>\n"; return 1; } bitmap_image main_image(argv[1]); bitmap_image template_image(argv[2]); const int repeat = atoi(argv[3]); const int main_width = main_image.width(); const int main_height = main_image.height(); const int main_size = main_width * main_height; const int template_width = template_image.width(); const int template_height = template_image.height(); const int template_size = template_width * template_height; const int height_difference = main_height - template_height; const int width_difference = main_width - template_width; const int sad_array_size = (height_difference + 1) * (width_difference + 1); // Host allocation unsigned char* h_main_image = new unsigned char[3 * main_size]; for (int row = 0; row < main_height; row++) { for (int col = 0; col < main_width; col++) { rgb_t colors; main_image.get_pixel(col, row, colors); h_main_image[(row * main_width + col) * 3 + 0] = colors.red; h_main_image[(row * main_width + col) * 3 + 1] = colors.green; h_main_image[(row * main_width + col) * 3 + 2] = colors.blue; } } unsigned char* h_template_image = new unsigned char[3 * template_size]; for (int row = 0; row < template_height; row++) { for (int col = 0; col < template_width; col++) { rgb_t colors; template_image.get_pixel(col, row, colors); h_template_image[(row * template_width + col) * 3 + 0] = colors.red; h_template_image[(row * template_width + col) * 3 + 1] = colors.green; h_template_image[(row * template_width + col) * 3 + 2] = colors.blue; } } int* h_sad_array = new int[sad_array_size]; int h_num_occurances; int h_min_mse; // Device allocation unsigned char* d_main_image; unsigned char* d_template_image; int* d_sad_array; int* d_min_mse; int* d_num_occurances; check(hipMalloc((void **)&d_main_image, 3 * main_size * sizeof(unsigned char))); check(hipMalloc((void **)&d_template_image, 3 * template_size * sizeof(unsigned char))); check(hipMalloc((void **)&d_sad_array, sad_array_size * sizeof(int))); check(hipMalloc((void **)&d_min_mse, sizeof(int))); check(hipMalloc((void **)&d_num_occurances, sizeof(int))); dim3 grids((unsigned int)ceil((float)(main_width) / BLOCK_SIZE_X), (unsigned int)ceil((float)(main_height) / BLOCK_SIZE_Y), 1); dim3 blocks(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); dim3 grids_2((unsigned int)ceil((float)sad_array_size) / BLOCK_SIZE, 1, 1); dim3 blocks_2(BLOCK_SIZE, 1, 1); check(hipMemcpy(d_main_image, h_main_image, 3 * main_size * sizeof(unsigned char), hipMemcpyHostToDevice)); check(hipMemcpy(d_template_image, h_template_image, 3 * template_size * sizeof(unsigned char), hipMemcpyHostToDevice)); // Measure device execution time double kernel_time = 0.0; auto begin = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { h_min_mse = THRESHOLD; check(hipMemset(d_num_occurances, 0, sizeof(int))); check(hipMemcpy(d_min_mse, &h_min_mse, sizeof(int), hipMemcpyHostToDevice)); hipDeviceSynchronize(); auto kbegin = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( compute_sad_array) , dim3(grids), dim3(blocks) , 0, 0, d_sad_array, d_main_image, d_template_image, sad_array_size, main_width, main_height, template_width, template_height, template_size); hipLaunchKernelGGL(( find_min_in_sad_array) , dim3(grids_2), dim3(blocks_2) , 0, 0, sad_array_size, d_sad_array, d_min_mse); hipLaunchKernelGGL(( get_num_of_occurrences) , dim3(grids_2), dim3(blocks_2) , 0, 0, sad_array_size, d_sad_array, d_min_mse, d_num_occurances); hipDeviceSynchronize(); auto kend = std::chrono::steady_clock::now(); kernel_time += std::chrono::duration_cast<std::chrono::milliseconds> (kend - kbegin).count(); check(hipMemcpy(&h_min_mse, d_min_mse, sizeof(int), hipMemcpyDeviceToHost)); check(hipMemcpy(&h_num_occurances, d_num_occurances, sizeof(int), hipMemcpyDeviceToHost)); } auto end = std::chrono::steady_clock::now(); float elapsed_time = std::chrono::duration_cast<std::chrono::milliseconds> (end - begin).count(); std::cout << "Parallel Computation Results: " << std::endl; std::cout << "Kernel time in msec: " << kernel_time << std::endl; std::cout << "Elapsed time in msec: " << elapsed_time << std::endl; std::cout << "Main Image Dimensions: " << main_width << "*" << main_height << std::endl; std::cout << "Template Image Dimensions: " << template_width << "*" << template_height << std::endl; std::cout << "Found Minimum: " << h_min_mse << std::endl; std::cout << "Number of Occurances: " << h_num_occurances << std::endl; check(hipFree(d_main_image)); check(hipFree(d_template_image)); check(hipFree(d_sad_array)); check(hipFree(d_min_mse)); check(hipFree(d_num_occurances)); delete[] h_main_image; delete[] h_template_image; delete[] h_sad_array; return 0; }
e21628f7037fb538e879da4a1448e5b6e38f19fa.cu
#include <iostream> #include <chrono> #include <cuda.h> #include "bitmap_image.hpp" #define check(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ printf("[ERROR] Failed to run stmt %d, error body: %s\n", __LINE__, cudaGetErrorString(err)); \ return -1; } \ } while (0) \ #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 #define BLOCK_SIZE (BLOCK_SIZE_X * BLOCK_SIZE_Y) #define THRESHOLD 20 #define FOUND_MIN 5000 #define min(a, b) ((a) < (b) ? (a) : (b)) __global__ void compute_sad_array( int*__restrict__ sad_array, const unsigned char*__restrict__ image, const unsigned char*__restrict__ kernel, const int sad_array_size, const int image_width, const int image_height, const int kernel_width, const int kernel_height, const int kernel_size) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int sad_result = 0; if (row < image_height && col < image_width) { const int overlap_width = min(image_width - col, kernel_width); const int overlap_height = min(image_height - row, kernel_height); #pragma unroll 4 for (int kr = 0; kr < overlap_height; kr++) { #pragma unroll 4 for (int kc = 0; kc < overlap_width; kc++) { const int image_addr = ((row + kr) * image_width + (col + kc)) * 3; const int kernel_addr = (kr * kernel_width + kc) * 3; const int m_r = (int)(image[image_addr + 0]); const int m_g = (int)(image[image_addr + 1]); const int m_b = (int)(image[image_addr + 2]); const int t_r = (int)(kernel[kernel_addr + 0]); const int t_g = (int)(kernel[kernel_addr + 1]); const int t_b = (int)(kernel[kernel_addr + 2]); const int error = abs(m_r - t_r) + abs(m_g - t_g) + abs(m_b - t_b); sad_result += error; } } int norm_sad = (int)(sad_result / (float)kernel_size); int my_index_in_sad_array = row * image_width + col; if (my_index_in_sad_array < sad_array_size) { sad_array[my_index_in_sad_array] = norm_sad; } } } __global__ void find_min_in_sad_array( const int sad_array_size, const int* __restrict__ sad_array, int* __restrict__ min_sad) { unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = gridDim.x * blockDim.x; unsigned int offset = 0; __shared__ int cache[BLOCK_SIZE]; int temp = FOUND_MIN; while (gid + offset < sad_array_size) { temp = min(temp, sad_array[gid + offset]); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); unsigned int i = blockDim.x / 2; while (i != 0) { if (threadIdx.x < i) cache[threadIdx.x] = min(cache[threadIdx.x], cache[threadIdx.x + i]); __syncthreads(); i /= 2; } // Update global min for each block if (threadIdx.x == 0) atomicMin(min_sad, cache[0]); } __global__ void get_num_of_occurrences( const int sad_array_size, const int*__restrict__ sad_array, const int*__restrict__ min_sad, int*__restrict__ num_occurrences) { unsigned int gid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int s; if (gid < sad_array_size) { if (threadIdx.x == 0) s = 0; __syncthreads(); if (sad_array[gid] == *min_sad) atomicAdd(&s, 1); __syncthreads(); // Update global occurance for each block if (threadIdx.x == 0) atomicAdd(num_occurrences, s); } } int main(int argc, char* argv[]) { if (argc != 4) { std::cerr << "Usage: ./main <image> <template image> <repeat>\n"; return 1; } bitmap_image main_image(argv[1]); bitmap_image template_image(argv[2]); const int repeat = atoi(argv[3]); const int main_width = main_image.width(); const int main_height = main_image.height(); const int main_size = main_width * main_height; const int template_width = template_image.width(); const int template_height = template_image.height(); const int template_size = template_width * template_height; const int height_difference = main_height - template_height; const int width_difference = main_width - template_width; const int sad_array_size = (height_difference + 1) * (width_difference + 1); // Host allocation unsigned char* h_main_image = new unsigned char[3 * main_size]; for (int row = 0; row < main_height; row++) { for (int col = 0; col < main_width; col++) { rgb_t colors; main_image.get_pixel(col, row, colors); h_main_image[(row * main_width + col) * 3 + 0] = colors.red; h_main_image[(row * main_width + col) * 3 + 1] = colors.green; h_main_image[(row * main_width + col) * 3 + 2] = colors.blue; } } unsigned char* h_template_image = new unsigned char[3 * template_size]; for (int row = 0; row < template_height; row++) { for (int col = 0; col < template_width; col++) { rgb_t colors; template_image.get_pixel(col, row, colors); h_template_image[(row * template_width + col) * 3 + 0] = colors.red; h_template_image[(row * template_width + col) * 3 + 1] = colors.green; h_template_image[(row * template_width + col) * 3 + 2] = colors.blue; } } int* h_sad_array = new int[sad_array_size]; int h_num_occurances; int h_min_mse; // Device allocation unsigned char* d_main_image; unsigned char* d_template_image; int* d_sad_array; int* d_min_mse; int* d_num_occurances; check(cudaMalloc((void **)&d_main_image, 3 * main_size * sizeof(unsigned char))); check(cudaMalloc((void **)&d_template_image, 3 * template_size * sizeof(unsigned char))); check(cudaMalloc((void **)&d_sad_array, sad_array_size * sizeof(int))); check(cudaMalloc((void **)&d_min_mse, sizeof(int))); check(cudaMalloc((void **)&d_num_occurances, sizeof(int))); dim3 grids((unsigned int)ceil((float)(main_width) / BLOCK_SIZE_X), (unsigned int)ceil((float)(main_height) / BLOCK_SIZE_Y), 1); dim3 blocks(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1); dim3 grids_2((unsigned int)ceil((float)sad_array_size) / BLOCK_SIZE, 1, 1); dim3 blocks_2(BLOCK_SIZE, 1, 1); check(cudaMemcpy(d_main_image, h_main_image, 3 * main_size * sizeof(unsigned char), cudaMemcpyHostToDevice)); check(cudaMemcpy(d_template_image, h_template_image, 3 * template_size * sizeof(unsigned char), cudaMemcpyHostToDevice)); // Measure device execution time double kernel_time = 0.0; auto begin = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { h_min_mse = THRESHOLD; check(cudaMemset(d_num_occurances, 0, sizeof(int))); check(cudaMemcpy(d_min_mse, &h_min_mse, sizeof(int), cudaMemcpyHostToDevice)); cudaDeviceSynchronize(); auto kbegin = std::chrono::steady_clock::now(); compute_sad_array <<< grids, blocks >>> ( d_sad_array, d_main_image, d_template_image, sad_array_size, main_width, main_height, template_width, template_height, template_size); find_min_in_sad_array <<< grids_2, blocks_2 >>> ( sad_array_size, d_sad_array, d_min_mse); get_num_of_occurrences <<< grids_2, blocks_2 >>> ( sad_array_size, d_sad_array, d_min_mse, d_num_occurances); cudaDeviceSynchronize(); auto kend = std::chrono::steady_clock::now(); kernel_time += std::chrono::duration_cast<std::chrono::milliseconds> (kend - kbegin).count(); check(cudaMemcpy(&h_min_mse, d_min_mse, sizeof(int), cudaMemcpyDeviceToHost)); check(cudaMemcpy(&h_num_occurances, d_num_occurances, sizeof(int), cudaMemcpyDeviceToHost)); } auto end = std::chrono::steady_clock::now(); float elapsed_time = std::chrono::duration_cast<std::chrono::milliseconds> (end - begin).count(); std::cout << "Parallel Computation Results: " << std::endl; std::cout << "Kernel time in msec: " << kernel_time << std::endl; std::cout << "Elapsed time in msec: " << elapsed_time << std::endl; std::cout << "Main Image Dimensions: " << main_width << "*" << main_height << std::endl; std::cout << "Template Image Dimensions: " << template_width << "*" << template_height << std::endl; std::cout << "Found Minimum: " << h_min_mse << std::endl; std::cout << "Number of Occurances: " << h_num_occurances << std::endl; check(cudaFree(d_main_image)); check(cudaFree(d_template_image)); check(cudaFree(d_sad_array)); check(cudaFree(d_min_mse)); check(cudaFree(d_num_occurances)); delete[] h_main_image; delete[] h_template_image; delete[] h_sad_array; return 0; }
0626e7128eb64560a96926f315d31a035e1febd9.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************************************ * Implementing Singular Value Decomposition on GPU using CUDA using algorithm * * given in IPDPS '09 paper "Singular Value Decomposition on GPU using CUDA" * * * * Copyright (c) 2009 International Institute of Information Technology, Hyderabad. * * All rights reserved. * * * * Permission to use, copy, modify and distribute this software and its documentation for * * educational purpose is hereby granted without fee, provided that the above copyright * * notice and this permission notice appear in all copies of this software and that you do * * not sell the software. * * * * THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED OR * * OTHERWISE. * * * * Created by Sheetal Lahabar. * * Small update for test on CUDA 4.0 (GTX 580) * ************************************************************************************************/ #ifndef _EXAMPLE_CU_ #define _EXAMPLE_CU_ #include "example.h" //Include the below file in your main program #include "cusvd.cu" float *initialize(int ind) { int i = 0, j = 0, l = 0; float *temp = (float*)malloc(sizeof(float) * ind * ind); for(i=0 ; i < ind ; i++) { for(j=0 ; j < ind ; j++) { if(i==j) temp[l++] = 1; else temp[l++] = 0; } } return temp; } int main(int argc, char** argv) { bool result; double *Sigma; //M>=N and M and N are a multiple of 32 int M = 512, N = 512; float *A, *U, *VT, *d_A, *d_U, *d_VT; //Step 1 - Read A in column major order A = (float*)malloc(sizeof(float) * M * N); FILE *fp = fopen("data", "r"); for(i=0 ; i < M * N ; i++) { fscanf(fp,"%f", &A[i]); } fclose(fp); //Step 2 Sigma = (double*)malloc(sizeof(double)*N); //Step 3 CUT_DEVICE_INIT(argc, argv); status = hipblasInit(); if(status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "Error in initialization"); return EXIT_FAILURE; } //Step 4 hipblasAlloc(M*N*sizeof(float), sizeof(float), (void**)&d_A); hipblasAlloc(M*M*sizeof(float), sizeof(float), (void**)&d_U); hipblasAlloc(N*N*sizeof(float), sizeof(float), (void**)&d_VT); //Step 5 U = initialize(M); VT = initialize(N); hipblasSetMatrix(M, N, sizeof(float), A, M, d_A, M); hipblasSetMatrix(M, N, sizeof(float), U, M, d_U, M); hipblasSetMatrix(M, N, sizeof(float), VT, M, d_VT, M); //Step 6 timer = 0; CUT_SAFE_CALL(cutCreateTimer(&timer)); CUT_SAFE_CALL(cutStartTimer(timer)); result = cusvd(M, N, d_A, d_U, d_VT, Sigma); CUT_SAFE_CALL(cutStopTimer(timer)); printf("SVD processing time: %f (ms)\n", cutGetTimerValue(timer)); CUT_SAFE_CALL(cutDeleteTimer(timer)); /* printf("Copy and print VT matrix\n"); CUDA_SAFE_CALL(hipMemcpy(VT, d_VT, sizeof(float)*N*N, hipMemcpyDeviceToHost)); for(int i=0; i < N; i++) for(int j=0; j < N; j++) printf("%f\n", check2[i*N+j]); */ //Step 7 free(A); CUDA_SAFE_CALL(hipFree(d_A)); CUDA_SAFE_CALL(hipFree(d_U)); CUDA_SAFE_CALL(hipFree(d_VT)); CUT_EXIT(argc, argv); return 0; } #endif
0626e7128eb64560a96926f315d31a035e1febd9.cu
/************************************************************************************************ * Implementing Singular Value Decomposition on GPU using CUDA using algorithm * * given in IPDPS '09 paper "Singular Value Decomposition on GPU using CUDA" * * * * Copyright (c) 2009 International Institute of Information Technology, Hyderabad. * * All rights reserved. * * * * Permission to use, copy, modify and distribute this software and its documentation for * * educational purpose is hereby granted without fee, provided that the above copyright * * notice and this permission notice appear in all copies of this software and that you do * * not sell the software. * * * * THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED OR * * OTHERWISE. * * * * Created by Sheetal Lahabar. * * Small update for test on CUDA 4.0 (GTX 580) * ************************************************************************************************/ #ifndef _EXAMPLE_CU_ #define _EXAMPLE_CU_ #include "example.h" //Include the below file in your main program #include "cusvd.cu" float *initialize(int ind) { int i = 0, j = 0, l = 0; float *temp = (float*)malloc(sizeof(float) * ind * ind); for(i=0 ; i < ind ; i++) { for(j=0 ; j < ind ; j++) { if(i==j) temp[l++] = 1; else temp[l++] = 0; } } return temp; } int main(int argc, char** argv) { bool result; double *Sigma; //M>=N and M and N are a multiple of 32 int M = 512, N = 512; float *A, *U, *VT, *d_A, *d_U, *d_VT; //Step 1 - Read A in column major order A = (float*)malloc(sizeof(float) * M * N); FILE *fp = fopen("data", "r"); for(i=0 ; i < M * N ; i++) { fscanf(fp,"%f", &A[i]); } fclose(fp); //Step 2 Sigma = (double*)malloc(sizeof(double)*N); //Step 3 CUT_DEVICE_INIT(argc, argv); status = cublasInit(); if(status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "Error in initialization"); return EXIT_FAILURE; } //Step 4 cublasAlloc(M*N*sizeof(float), sizeof(float), (void**)&d_A); cublasAlloc(M*M*sizeof(float), sizeof(float), (void**)&d_U); cublasAlloc(N*N*sizeof(float), sizeof(float), (void**)&d_VT); //Step 5 U = initialize(M); VT = initialize(N); cublasSetMatrix(M, N, sizeof(float), A, M, d_A, M); cublasSetMatrix(M, N, sizeof(float), U, M, d_U, M); cublasSetMatrix(M, N, sizeof(float), VT, M, d_VT, M); //Step 6 timer = 0; CUT_SAFE_CALL(cutCreateTimer(&timer)); CUT_SAFE_CALL(cutStartTimer(timer)); result = cusvd(M, N, d_A, d_U, d_VT, Sigma); CUT_SAFE_CALL(cutStopTimer(timer)); printf("SVD processing time: %f (ms)\n", cutGetTimerValue(timer)); CUT_SAFE_CALL(cutDeleteTimer(timer)); /* printf("Copy and print VT matrix\n"); CUDA_SAFE_CALL(cudaMemcpy(VT, d_VT, sizeof(float)*N*N, cudaMemcpyDeviceToHost)); for(int i=0; i < N; i++) for(int j=0; j < N; j++) printf("%f\n", check2[i*N+j]); */ //Step 7 free(A); CUDA_SAFE_CALL(cudaFree(d_A)); CUDA_SAFE_CALL(cudaFree(d_U)); CUDA_SAFE_CALL(cudaFree(d_VT)); CUT_EXIT(argc, argv); return 0; } #endif
630a29f6c469e9b6974c7c14bd0ecb93ef703f2c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice1D.h" #include "cudaTools.h" #include "reductionADD.h" #include "Calibreur_GPU.h" #include <hiprand/hiprand_kernel.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ __device__ float f(float x); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void montecarloDevice(float* ptrResultGM, int n, hiprandState_t* tabDevGeneratorGM); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void montecarloDevice(float* ptrResultGM, int n, hiprandState_t* tabDevGeneratorGM) { //Shared Memory __shared__ extern float tabSM[]; //Montecarlo int n0 = 0; const int a = -1; const int b = 1; const int m = 2; const int TID = Indice1D::tid(); const int TID_LOCAL = Indice1D::tidLocal(); // Global Memory -> Register (optimization) hiprandState_t localGenerator = tabDevGeneratorGM [TID]; float xAlea; float yAlea; for (long i = 1; i <= n; i++) { xAlea = a + (b-a) * hiprand_uniform(&localGenerator); yAlea = m * hiprand_uniform(&localGenerator); if(yAlea < f(xAlea)) { n0++; } } tabDevGeneratorGM[TID] = localGenerator; tabSM[TID_LOCAL] = n0; __syncthreads(); reductionADD(tabSM, ptrResultGM); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ float f(float x) { return sqrt(1-(x*x)); } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
630a29f6c469e9b6974c7c14bd0ecb93ef703f2c.cu
#include "Indice1D.h" #include "cudaTools.h" #include "reductionADD.h" #include "Calibreur_GPU.h" #include <curand_kernel.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ __device__ float f(float x); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void montecarloDevice(float* ptrResultGM, int n, curandState* tabDevGeneratorGM); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void montecarloDevice(float* ptrResultGM, int n, curandState* tabDevGeneratorGM) { //Shared Memory __shared__ extern float tabSM[]; //Montecarlo int n0 = 0; const int a = -1; const int b = 1; const int m = 2; const int TID = Indice1D::tid(); const int TID_LOCAL = Indice1D::tidLocal(); // Global Memory -> Register (optimization) curandState localGenerator = tabDevGeneratorGM [TID]; float xAlea; float yAlea; for (long i = 1; i <= n; i++) { xAlea = a + (b-a) * curand_uniform(&localGenerator); yAlea = m * curand_uniform(&localGenerator); if(yAlea < f(xAlea)) { n0++; } } tabDevGeneratorGM[TID] = localGenerator; tabSM[TID_LOCAL] = n0; __syncthreads(); reductionADD(tabSM, ptrResultGM); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ float f(float x) { return sqrt(1-(x*x)); } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
7520ad1aca8f9255f1a074aad277f665b9386507.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" float h_A[]= { 0.7213110389323074, 0.7036072892760992, 0.7972208480899733, 0.5153771118746243, 0.9216180039321551, 0.9615308379788774, 0.6739112073326918, 0.7155256159701121, 0.79258098175816, 0.9020993892924711, 0.8466942281531666, 0.5537511206889679, 0.5438491782768006, 0.7116611841848426, 0.5345733881172183, 0.6305435920228089, 0.9277412059481013, 0.9601175756536102, 0.5446681834227135, 0.8851113168080971, 0.9213980885405118, 0.9915170026771223, 0.9587707645596766, 0.6219713016116879, 0.59276932445235, 0.8176418731352921, 0.6798959501796369, 0.7878206193846803, 0.9719590782979264, 0.9033969542114628, 0.5257703038302184, 0.5060736164359405, 0.7617378755679971, 0.6813167728709468, 0.9829966035233257, 0.9436321253947957, 0.7141845778293617, 0.8573416967736969, 0.5422510511098686, 0.8854271419940865, 0.7079571984354764, 0.5809701005539115, 0.7670813919644976, 0.5026467498089977, 0.9606221012390379, 0.881001105638563, 0.5452291376880389, 0.6132540058623579, 0.8398635939579402, 0.8950965387747699, 0.577723664133599, 0.65654378252201, 0.551610115160845, 0.7471229692397765, 0.6064047653161813, 0.5348684993169526, 0.5714834053077453, 0.5699335991556576, 0.9581613026614635, 0.5800884251550227, 0.5621315217726607, 0.7260545065405787, 0.8263375959182511, 0.7004871816808094, 0.6628612849215572, 0.7630559501530698, 0.6090032592882435, 0.5297518102839368, 0.9716773789062979, 0.9792908103361173, 0.813729030576051, 0.9358222319928466, 0.9552555022955985, 0.9659368142992655, 0.5503401170334505, 0.7983438229770574, 0.8612723782138829, 0.8954838168746426, 0.6707221843700835, 0.87132828504265, 0.6813242207001085, 0.8731189965641872, 0.8963920044873096, 0.6589936053193723, 0.5142121673426723, 0.831984826281815, 0.7639888826373817, 0.6420662927592042, 0.8954349479086924, 0.6547561830427948, 0.7015274218684611, 0.9130054177465503, 0.8337834495754337, 0.9077154271987949, 0.8910566519116231, 0.9933333958880843, 0.7208793338115973, 0.8732275221059247, 0.6499131589293037, 0.6225016536582735, 0.506024346065687, 0.6334473626646867, 0.8642540267703913, 0.6723920546622816, 0.6454028787559742, 0.7549523427254714, 0.805806790664657, 0.6303127937986285, 0.8937464695521591, 0.9274795925381079, 0.7702126603748829, 0.8055277344962463, 0.8157804696830382, 0.6219542206282047, 0.5431210508452308, 0.5110240954422681, 0.5513970040916557, 0.9426855261472222, 0.8748081215557051, 0.741094255177086, 0.8497124066557136, 0.6273164134428054, 0.7748003989421978, 0.6611962772062256, 0.7401352720701797, 0.7687019107934498, 0.7767744755636825, 0.5913898182077495, 0.5385837135035092, 0.6082693609582921, 0.8626094369370461, 0.9699138062719916, 0.6910816521277436, 0.8865828246797212, 0.7391787580562894, 0.8063511498264284, 0.7425674516704125, 0.6912159215757671, 0.6498378086468368, 0.5128719865057241, 0.7829125973602015, 0.7452102281974154, 0.9838934615177628, 0.7448248499577833, 0.7821284001877189, 0.8336625562667601, 0.7947946889587203, 0.6821792645687259, 0.6749657511955991, 0.82056385260541, 0.594456091081921, 0.5025934920429402, 0.5358054634484388, 0.7299644543146196, 0.800198263470014, 0.5200795780935461, 0.9127085069646359, 0.520189261488305, 0.5721914853245895, 0.8353448213929731, 0.6954853620284434, 0.5287393914234155, 0.7749133682929376, 0.9626332237047208, 0.504531933674538, 0.7416881835220519, 0.8029161141930703, 0.9512371692847112, 0.678590959823639, 0.5942002011283005, 0.7231946939255616, 0.8891894086624462, 0.8467433825580544, 0.8250284374644645, 0.5006569190556542, 0.7168051210436979, 0.7906952071921669, 0.8270348886306684, 0.6098752346678269, 0.5222347961416989, 0.7929018318998295, 0.5101055813477642, 0.7169833755909871, 0.5702461928129504, 0.9023390198515662, 0.8818647192968105, 0.963439971720073, 0.5654149968549657, 0.9693649864156556, 0.8935600740999206, 0.7186220005851536, 0.7447207297667482, 0.9237355949222104, 0.9169525724065697, 0.5802019547819947, 0.9405852601832476, 0.8876001178289146, 0.7693074221942977, 0.5587374478203172, 0.94048087746272, 0.7081619194854067, 0.5577780946399669, 0.639746138541101, 0.5677279896423741, 0.7013171406572657, 0.8531962429999775, 0.9534461688042684, 0.9544454327368554, 0.7419798377038764, 0.7781598798873539, 0.9880995476366918, 0.9085277704260113, 0.8808235015558032, 0.645561561502733, 0.6756942754185749, 0.5067823378774516, 0.506378470755337, 0.810777692389024, 0.5123445978187132, 0.8708238156107807, 0.8503474958572756, 0.954442138732992, 0.9080537030698932, 0.9018808628797211, 0.9449707700689803, 0.8191690079487154, 0.6827385944940092, 0.8929245413314859, 0.5292200559271782, 0.8624408829473031, 0.6845293685637726, 0.6978517435484533, 0.7839079461033043, 0.8375322422631433, 0.9537265678949418, 0.8208246713448575, 0.9297464038826257, 0.8533886395580749, 0.6595065038741466, 0.5367401755113856, 0.5711884859357816, 0.9956268735292508, 0.5355328946765383, 0.5455838142054785, 0.9854809795755022, 0.7816238163921971, 0.5441177260680754, 0.7809034256329053, 0.8833044171043674, 0.7976267671978272, 0.6949480150237191, 0.97620470200642, 0.8229129809550366, 0.869410101611787, 0.7857192541226444, 0.9049082880373116, 0.8217825261671052, 0.5978171261180575, 0.7516740248586333, 0.835374844356755, 0.6647492038017275, 0.654372380384227, 0.533928025397404, 0.8754977563944351, 0.5272353988484589, 0.9324332907020879, 0.8092159617417161, 0.8081259134157913, 0.7270657386338355, 0.9038540598848672, 0.9711575053233915, 0.6771643891208445, 0.9579552939218283, 0.853761852888534, 0.8960824555273585, 0.5269678068509709, 0.6943165773545628, 0.5796276656005226, 0.706014162848424, 0.8372419249150096, 0.6996409865937712, 0.5716824987141625, 0.6438334075025813, 0.5260148903229024, 0.9331886427333145, 0.9323333298241967, 0.7260844671738941, 0.9270432682797352, 0.9337665703653302, 0.6315745755524134, 0.7385872282881337, 0.7804016374726153, 0.8425343267477277, 0.8992882216574574, 0.8154295497769282, 0.9454044330995546, 0.9504649658919259, 0.7552753505697389, 0.703333042647019, 0.851159545729975, 0.6219148319282171, 0.916502088917321, 0.8613541833378122, 0.9124095174944935, 0.9286629029328552, 0.612550255934398, 0.8679719411162063, 0.9693607203380965, 0.8453412431169058, 0.8725376854813824, 0.7912174040299396, 0.5888947879582849, 0.9848924865749478, 0.8174611882014551, 0.7400586120333806, 0.9892661252731871, 0.9813085520067423, 0.9573712978603379, 0.649588198345867, 0.6250235203353991, 0.5955909287888107, 0.620373053569861, 0.526029835569561, 0.7144405404754234, 0.780285293622486, 0.8666553691054684, 0.8964231072882586, 0.8591118144087425, 0.9572999862830796, 0.9572257787849424, 0.7738885343608862, 0.6271141855599663, 0.5539161885973134, 0.674124267886359, 0.8456009864511342, 0.789085976005303, 0.9179901181123216, 0.6757078684677362, 0.554766193187559, 0.8991436828308943, 0.72861832616026, 0.8437634536967329, 0.9579330569265415, 0.9850939686630349, 0.8228998187444237, 0.8388421319341619, 0.5297061437767218, 0.6094811259115935, 0.6935463743720558, 0.8634731923807555, 0.8062166169630336, 0.7894429442727542, 0.7651759682262698, 0.9731711420190147, 0.9941330255107579, 0.7341363475012623, 0.9521718941174941, 0.9765870463957484, 0.7892140968169058, 0.7051042952693836, 0.7433897579012426, 0.7796078583896936, 0.9420146913245346, 0.5585284274921852, 0.6614237390233539, 0.8225768496189233, 0.9228783746366208, 0.5543525000804628, 0.712200484202808, 0.7458232809428115, 0.6370976984346218, 0.7614142413586191, 0.807507380810081, 0.5224827912351996, 0.6724736119732511, 0.593179868665466, 0.9306430166660243, 0.5324363741372664, 0.8889130123351461, 0.9107423542503432, 0.6550202302554903, 0.944894451539642, 0.9052118156355418, 0.5590466975750978, 0.805467627477866, 0.980986543801699, 0.5502338170531667, 0.7555823165396496, 0.5443648978107398, 0.7345272275399661, 0.5414860274201246, 0.5203901051451236, 0.5423934957350249, 0.6539933857773308, 0.9208207415813712, 0.5502128020162411, 0.6876157144549884, 0.5702853582447343, 0.5904228543623173, 0.8889623987647162, 0.8985261691881565, 0.5048154598888724, 0.6076562850468037, 0.6961908971137987, 0.6500172650564393, 0.8511948985756508, 0.66319992047087, 0.9484600768282544, 0.5089392129473143, 0.7668151252216436, 0.8848797511184859, 0.8442684141503054, 0.8293842491761947, 0.9228608712069555, 0.716848629788235, 0.9187140383200945, 0.7381151908607154, 0.9316436232594469, 0.9007970659292505, 0.8571124627981633, 0.7563461277617688, 0.513696518145864, 0.5748871483045002, 0.9520785587809371, 0.8896683038703164, 0.6998782192395402, 0.8834708853708064, 0.6918430479828297, 0.9714088838129342, 0.7358356385444389, 0.706251044986796, 0.6445022136693828, 0.6502160963722614, 0.67845490395182, 0.6989987170451886, 0.5019984250344179, 0.595813407554385, 0.7070955606691687, 0.8216025133858569, 0.6394441217980584, 0.9861550745536893, 0.5630267871638102, 0.7197774220892457, 0.5707820282987905, 0.5554174483683547, 0.5608175917649101, 0.7346401576638559, 0.6966702058717653, 0.6445646315267469, 0.8744094501599702, 0.8013922028376101, 0.8827004493426109, 0.6030222621908303, 0.8946255779552771, 0.5787758041400044, 0.7187007220827769, 0.6284769420023835, 0.7346188868724551, 0.695829407120654, 0.6386811653525657, 0.7150252903858563, 0.6293714022285715, 0.5249633990391243, 0.8647394391293555, 0.8230188310468833, 0.8976785086232437, 0.5039818379281453, 0.8211569054426457, 0.7194889824918933, 0.9289131829642242, 0.7541040312860572, 0.742051339762712, 0.5515601578515075, 0.8893119930560853, 0.7849628852472481, 0.8274409410965613, 0.9430988897880357, 0.5593617432017448, 0.9863674781085019, 0.8818683165019685, 0.5334322723076458, 0.5431232249183211, 0.956274780832098, 0.6750059580031558, 0.8033812934976199, 0.5633421647434877, 0.7588173120251935, 0.6148214442198539, 0.5306560401308527, 0.9159055555823894, 0.7829682549085655, 0.6077550001723855, 0.6768516473519923, 0.9083875292208695, 0.8238798732507031, 0.8842950293224175, 0.7509042583446859, 0.5835260581189603, 0.771432641391331, 0.8735359080421995, 0.6654501818823724, 0.8330481168914817, 0.6586063723028632, 0.5324851641008854, 0.6516233791749912, 0.8668811241368004, 0.6901352977817978, 0.8276546369593913, 0.883845084525178, 0.5816936278385099, 0.6363005501359384, 0.9231059836000148, 0.7356217224283114, 0.8089825425159602, 0.5589388507840811, 0.6306424940443877, 0.5350914779207365, 0.5810872349283129, 0.8127313666556297, 0.9468961803825973, 0.7860397530364289, 0.6428268710185303, 0.8479121114897317, 0.9319903051408973, 0.7831470045544986, 0.7486175937861625, 0.9891526397281045, 0.8517228564544181, 0.8630442177017649, 0.7009876895581848, 0.919993632123095, 0.9742906773769642, 0.5001017712541294, 0.7288059511752198, 0.5356269422575284, 0.5009525538060497, 0.506362747718705, 0.6855898077307808, 0.8186058722239021, 0.6437350504120393, 0.718239019281738, 0.5968857591904404, 0.609524616029788, 0.7346307101193603, 0.5303081011328907, 0.8945951420123923, 0.714368980223252, 0.9270788314109553, 0.7772798233338185, 0.5043792436685017, 0.5489902958837589, 0.8161466086153648, 0.9002587902927479, 0.8594978043993857, 0.7720918657186675, 0.708471889407926, 0.8553134366489652, 0.8307261279699127, 0.9196334979451786, 0.5460130796628213, 0.5322359638794936, 0.7015642375586723, 0.9454702956809771, 0.5614611510499937, 0.9886486033452997, 0.8505700200768735, 0.7705538255754345, 0.5242606557494717, 0.5487130885383176, 0.9797450718881408, 0.7620902625223458, 0.5234501381612409, 0.6015105519508768, 0.6908773949001565, 0.8052201557728094, 0.5619060781574086, 0.8365028605718405, 0.7381675988564596, 0.5518180430531955, 0.7932454856705924, 0.8872510212340574, 0.7280059763374385, 0.978804367491308, 0.9211564036702524, 0.8495645790636963, 0.9434113274257316, 0.9900948520859476, 0.5320469764868729, 0.824062921633453, 0.6392225464923971, 0.7767066000638363, 0.5870722591297224, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0}; int h_B[]= { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 97, 99, 101, 103, 106, 108, 111, 113, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 142, 144, 146, 148, 150, 152, 158, 160, 163, 165, 168, 170, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 199, 201, 204, 206, 209, 211, 198, 116, 203, 198, 116, 198, 116, 203, 193, 215, 116, 156, 156, 193, 196, 196, 198, 215, 587, 589, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 844, 846, 848, 850, 852, 854, 856, 858, 860, 862, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 162, 157, 162, 157, 167, 141, 105, 141, 110, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 1065, 1072, 203, 213, 208, 162, 157, 141, 110, 105, 213, 208, 213, 208, 1075, 162, 157, 141, 1059, 162, 157, 172, 1065, 208, 162, 157, 141, 1059, 162, 157, 172, 1065, 213, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 1065, 213, 208, 162, 157, 162, 157, 167, 141, 105, 141, 110, 213, 208, 162, 157, 162, 157, 167, 141, 1036, 162, 157, 172, 1065, 203, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 110, 105, 213, 208, 213, 208, 162, 157, 141, 110, 105, 213, 208, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 1065, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 1059, 162, 157, 162, 157, 167, 172, 1065, 203, 213, 208, 203, 213, 208, 1086, 1074, 1086, 1074, 1086, 1087, 1086, 1087, 1086, 1087, 1086, 1087, 1086, 1087, 1086, 1079, 1087, 1087, 1087, 1086, 1087, 1086, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1087, 1086, 1087, 1086, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1536, 1538, 1545, 1547, 1549, 1551, 1555, 1559, 1561, 1564, 1566, 1568, 1570, 1574, 1579, 1583, 1588, 1590, 1592, 1596, 1598, 1602, 1604, 1606, 1613, 1615, 1617, 1622, 1627, 1629, 1631, 1633, 1637, 1639, 1642, 1644, 1646, 1648, 1651, 1653, 1655, 1657, 1659, 1661, 1665, 1667, 1671, 1673, 1675, 1677, 1682, 1684, 1690, 1693, 1544, 1542, 1557, 1558, 1692, 1696, 1698, 1681, 1689, 1700, 1701, 1692, 1702, 1703, 1573, 1577, 1692, 1089, 1582, 1586, 1692, 1089, 1601, 1708, 1710, 1612, 1610, 1621, 1625, 1626, 1714, 1670, 1681, 1688, 1689, 1692, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1540, 1084, 1083, 1846, 1847, 1085, 1553, 1084, 1083, 1798, 1848, 1849, 1085, 1850, 1679, 1084, 1083, 1801, 1080, 1081, 1853, 1854, 1857, 1679, 1084, 1083, 1860, 1686, 1084, 1083, 1861, 1862, 1863, 1679, 1084, 1083, 1864, 1686, 1084, 1083, 1865, 1866, 1867, 1594, 1084, 1083, 1811, 1686, 1084, 1083, 1868, 1085, 1608, 1084, 1083, 1871, 1872, 1085, 1619, 1084, 1083, 1873, 1686, 1084, 1083, 1874, 1875, 1085, 1635, 1084, 1083, 1824, 1686, 1084, 1083, 1826, 1085, 1089, 1679, 1084, 1083, 1830, 1080, 1081, 1663, 1084, 1083, 1836, 1686, 1084, 1083, 1877, 1085, 1679, 1084, 1083, 1878, 1686, 1084, 1083, 1879, 1880, 1085, 1881, 1089, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2048, 2049, 2050, 2051, 2053, 2054, 2055, 2056, 2057, 2060, 2062, 2063, 2064, 2065, 2066, 2067, 2069, 2070, 2071, 2072, 2073, 2075, 2076, 2077, 2079, 2081, 2082, 2083, 2085, 2086, 2087, 2089, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2099, 2100, 2101, 2102, 2103, 2105, 2106, 2107, 2108, 2110, 2111, 2112, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2140, 2141, 2142, 2143, 2145, 2146, 2147, 2149, 2150, 2151, 2152, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2304, 2309, 2314, 2320, 2321, 2322, 2325, 2328, 2329, 2332, 2335, 2336, 2340, 2344, 2349, 2352, 2356, 2360, 2366, 2372, 2376, 2380, 2383, 2386, 2388, 2308, 2389, 2313, 2319, 2318, 2389, 2343, 2348, 2355, 2365, 2364, 2371, 2370, 2389, 2379, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2560, 2561, 2562, 2565, 2566, 2568, 2569, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2585, 2586, 2587, 2588, 2589, 2371, 2370, 2080, 2090, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2371, 2370, 2598, 2599, 2389, 2387, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2148, 2307, 2058, 2312, 2836, 2148, 2317, 2838, 2148, 2068, 2840, 2841, 2078, 2074, 2842, 2088, 2084, 2843, 2098, 2339, 2844, 2148, 2347, 2113, 2109, 2363, 2359, 2848, 2850, 2148, 2369, 2852, 2853, 2139, 2375, 2854, 2148, 2144, 2856, 2857, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3072, 3073, 3074, 3075, 3077, 3078, 3080, 3081, 3082, 3084, 3085, 3087, 3088, 3090, 3091, 3093, 3094, 3095, 3096, 3097, 3098, 3101, 3102, 3103, 3105, 3106, 3108, 3109, 3110, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3328, 3330, 3332, 3334, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3352, 3354, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3100, 3356, 2847, 3089, 3092, 2835, 3076, 3336, 2846, 3079, 3086, 3099, 3107, 3351, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4096, 4098, 4100, 4102, 4104, 4106, 4108, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4352, 4354, 4356, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4608, 4610, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4864, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1090, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 5376, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255}; int h_C[]= { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 98, 100, 102, 104, 107, 109, 112, 114, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 143, 145, 147, 149, 151, 153, 159, 161, 164, 166, 169, 171, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 200, 202, 205, 207, 210, 212, 115, 115, 194, 115, 115, 115, 197, 195, 96, 96, 115, 154, 155, 214, 194, 195, 197, 214, 588, 590, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859, 861, 863, 238, 239, 250, 253, 254, 289, 334, 335, 343, 346, 363, 375, 376, 385, 386, 387, 388, 392, 883, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1053, 1034, 1061, 1055, 1056, 1030, 1024, 1032, 1033, 1068, 1067, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1048, 1043, 1025, 1051, 1073, 1066, 1068, 1067, 1053, 1041, 1048, 1043, 1042, 1045, 1044, 1047, 1046, 1076, 1053, 1026, 1057, 1058, 1061, 1060, 1063, 1064, 1070, 1053, 1027, 1057, 1058, 1061, 1060, 1063, 1064, 1071, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1028, 1050, 1029, 1061, 1060, 1063, 1051, 1068, 1067, 1053, 1034, 1061, 1055, 1056, 1030, 1031, 1032, 1033, 1068, 1067, 1053, 1034, 1061, 1055, 1056, 1035, 1058, 1061, 1060, 1063, 1064, 1066, 1068, 1037, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1048, 1050, 1038, 1061, 1060, 1063, 1040, 1039, 1068, 1067, 1071, 1070, 1053, 1041, 1048, 1043, 1042, 1045, 1044, 1047, 1046, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1048, 1050, 1049, 1061, 1060, 1063, 1051, 1068, 1067, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1057, 1058, 1061, 1060, 1061, 1061, 1062, 1063, 1064, 1066, 1068, 1067, 1069, 1071, 1070, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1077, 1077, 1077, 1077, 1078, 1082, 1078, 1078, 1082, 1088, 1088, 1088, 1088, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 251, 252, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 336, 337, 338, 339, 340, 341, 342, 344, 345, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 377, 378, 379, 380, 381, 382, 383, 384, 389, 390, 391, 398, 412, 413, 415, 416, 1315, 1315, 1315, 1315, 434, 435, 446, 447, 457, 458, 460, 461, 467, 488, 490, 491, 507, 517, 518, 521, 522, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1537, 1539, 1546, 1548, 1550, 1552, 1556, 1560, 1562, 1565, 1567, 1569, 1571, 1575, 1580, 1584, 1589, 1591, 1593, 1597, 1599, 1603, 1605, 1607, 1614, 1616, 1618, 1623, 1628, 1630, 1632, 1634, 1638, 1640, 1643, 1645, 1647, 1649, 1652, 1654, 1656, 1658, 1660, 1662, 1666, 1668, 1672, 1674, 1676, 1678, 1683, 1685, 1691, 1694, 1543, 1541, 1687, 1302, 1315, 1697, 1699, 1650, 1315, 420, 421, 1315, 423, 424, 1572, 1576, 1078, 1578, 1581, 1585, 1078, 1587, 1600, 1709, 1711, 1611, 1609, 1620, 1624, 1078, 1715, 1669, 1680, 1687, 1088, 1088, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1793, 1840, 1792, 396, 397, 1794, 1797, 1796, 1795, 1554, 404, 405, 1799, 407, 1841, 1840, 1800, 1563, 1802, 1803, 418, 419, 422, 1841, 1840, 1804, 428, 1843, 1843, 1805, 432, 433, 436, 1841, 1840, 1806, 440, 1843, 1843, 1807, 444, 445, 448, 1810, 1809, 1808, 1595, 1843, 1843, 1812, 456, 1813, 1815, 1840, 1814, 465, 466, 1816, 1818, 1840, 1817, 472, 1843, 1843, 1819, 476, 477, 1820, 1823, 1822, 1821, 1636, 1843, 1843, 1825, 1641, 1827, 1828, 1841, 1840, 1829, 1650, 1831, 1832, 1835, 1834, 1833, 1664, 1843, 1843, 1837, 505, 1838, 1841, 1840, 1839, 511, 1843, 1843, 1842, 515, 516, 1844, 520, 1845, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 393, 394, 395, 2052, 399, 400, 401, 402, 403, 406, 408, 409, 410, 411, 414, 417, 1855, 1858, 425, 426, 427, 429, 430, 431, 1704, 437, 438, 439, 441, 442, 443, 1706, 449, 450, 451, 452, 453, 454, 455, 459, 462, 463, 464, 2104, 468, 469, 470, 471, 473, 474, 475, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 489, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 506, 508, 509, 510, 512, 513, 514, 1717, 519, 1719, 523, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2305, 2310, 2315, 1856, 1859, 2323, 2326, 1705, 2330, 2333, 1707, 2337, 2341, 2345, 2350, 2353, 2357, 2361, 2367, 2373, 2377, 2381, 2384, 1718, 1720, 1695, 2061, 2059, 1852, 1851, 1870, 1869, 1712, 2114, 1713, 1713, 1876, 1876, 1716, 1716, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2306, 2311, 2316, 2324, 2327, 2331, 2334, 2338, 2342, 2346, 2351, 2354, 2358, 2362, 2368, 2374, 2378, 2382, 2385, 526, 529, 530, 533, 534, 2564, 2563, 2567, 2570, 547, 548, 551, 554, 557, 558, 559, 560, 2584, 2583, 567, 568, 2584, 2583, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2834, 2816, 2834, 2817, 2837, 2834, 2818, 2839, 2834, 2830, 537, 538, 2820, 2819, 541, 2822, 2821, 544, 2824, 2823, 2845, 2834, 2825, 2827, 2826, 2829, 2828, 2849, 2851, 2834, 2830, 563, 564, 2832, 2831, 2855, 2834, 2833, 571, 572, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 524, 525, 527, 528, 531, 532, 535, 536, 3083, 539, 540, 542, 543, 545, 546, 549, 550, 552, 553, 555, 556, 561, 562, 3104, 565, 566, 569, 570, 3111, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3329, 3331, 3333, 3335, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3353, 3355, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3594, 3596, 3592, 3589, 3590, 3584, 3585, 3587, 3591, 3586, 3588, 3593, 3595, 3594, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4097, 4099, 4101, 4103, 4105, 4107, 4109, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4353, 4355, 4357, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4609, 4358, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4865, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 5120, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 591, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255}; bool h_Op[]= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #define THREADS_PER_BLOCK 256 #define BLOCKS_PER_GRID 1 #define SIZE_OF_IN 768 #define SIZE_OF_AC 5120 __device__ void ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) { int i= blockDim.x * blockIdx.x + threadIdx.x; __shared__ float R[23*THREADS_PER_BLOCK]; const int t= THREADS_PER_BLOCK; __shared__ float final; final=0; R[i + 0*t] = A[i + 0*t]; R[i + 1*t] = A[i + 1*t]; R[i + 2*t] = A[i + 2*t]; __syncthreads(); for (int iter=0; iter< n_iter; iter++) { R[i + 3*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]]; __syncthreads(); R[i + 4*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]]; __syncthreads(); R[i + 5*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]]; __syncthreads(); R[i + 6*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]]; __syncthreads(); R[i + 7*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]]; __syncthreads(); R[i + 8*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]]; __syncthreads(); R[i + 9*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]]; __syncthreads(); R[i + 10*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]]; __syncthreads(); R[i + 11*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]]; __syncthreads(); R[i + 12*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]]; __syncthreads(); R[i + 13*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]]; __syncthreads(); R[i + 14*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]]; __syncthreads(); R[i + 15*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]]; __syncthreads(); R[i + 16*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]]; __syncthreads(); R[i + 17*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]]; __syncthreads(); R[i + 18*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]]; __syncthreads(); R[i + 19*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]]; __syncthreads(); R[i + 20*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]]; __syncthreads(); R[i + 21*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]]; __syncthreads(); R[i + 22*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]]; if (i==0) { final += R[22*t]; } __syncthreads(); } if (i==0) { A[0]= final;} }
7520ad1aca8f9255f1a074aad277f665b9386507.cu
float h_A[]= { 0.7213110389323074, 0.7036072892760992, 0.7972208480899733, 0.5153771118746243, 0.9216180039321551, 0.9615308379788774, 0.6739112073326918, 0.7155256159701121, 0.79258098175816, 0.9020993892924711, 0.8466942281531666, 0.5537511206889679, 0.5438491782768006, 0.7116611841848426, 0.5345733881172183, 0.6305435920228089, 0.9277412059481013, 0.9601175756536102, 0.5446681834227135, 0.8851113168080971, 0.9213980885405118, 0.9915170026771223, 0.9587707645596766, 0.6219713016116879, 0.59276932445235, 0.8176418731352921, 0.6798959501796369, 0.7878206193846803, 0.9719590782979264, 0.9033969542114628, 0.5257703038302184, 0.5060736164359405, 0.7617378755679971, 0.6813167728709468, 0.9829966035233257, 0.9436321253947957, 0.7141845778293617, 0.8573416967736969, 0.5422510511098686, 0.8854271419940865, 0.7079571984354764, 0.5809701005539115, 0.7670813919644976, 0.5026467498089977, 0.9606221012390379, 0.881001105638563, 0.5452291376880389, 0.6132540058623579, 0.8398635939579402, 0.8950965387747699, 0.577723664133599, 0.65654378252201, 0.551610115160845, 0.7471229692397765, 0.6064047653161813, 0.5348684993169526, 0.5714834053077453, 0.5699335991556576, 0.9581613026614635, 0.5800884251550227, 0.5621315217726607, 0.7260545065405787, 0.8263375959182511, 0.7004871816808094, 0.6628612849215572, 0.7630559501530698, 0.6090032592882435, 0.5297518102839368, 0.9716773789062979, 0.9792908103361173, 0.813729030576051, 0.9358222319928466, 0.9552555022955985, 0.9659368142992655, 0.5503401170334505, 0.7983438229770574, 0.8612723782138829, 0.8954838168746426, 0.6707221843700835, 0.87132828504265, 0.6813242207001085, 0.8731189965641872, 0.8963920044873096, 0.6589936053193723, 0.5142121673426723, 0.831984826281815, 0.7639888826373817, 0.6420662927592042, 0.8954349479086924, 0.6547561830427948, 0.7015274218684611, 0.9130054177465503, 0.8337834495754337, 0.9077154271987949, 0.8910566519116231, 0.9933333958880843, 0.7208793338115973, 0.8732275221059247, 0.6499131589293037, 0.6225016536582735, 0.506024346065687, 0.6334473626646867, 0.8642540267703913, 0.6723920546622816, 0.6454028787559742, 0.7549523427254714, 0.805806790664657, 0.6303127937986285, 0.8937464695521591, 0.9274795925381079, 0.7702126603748829, 0.8055277344962463, 0.8157804696830382, 0.6219542206282047, 0.5431210508452308, 0.5110240954422681, 0.5513970040916557, 0.9426855261472222, 0.8748081215557051, 0.741094255177086, 0.8497124066557136, 0.6273164134428054, 0.7748003989421978, 0.6611962772062256, 0.7401352720701797, 0.7687019107934498, 0.7767744755636825, 0.5913898182077495, 0.5385837135035092, 0.6082693609582921, 0.8626094369370461, 0.9699138062719916, 0.6910816521277436, 0.8865828246797212, 0.7391787580562894, 0.8063511498264284, 0.7425674516704125, 0.6912159215757671, 0.6498378086468368, 0.5128719865057241, 0.7829125973602015, 0.7452102281974154, 0.9838934615177628, 0.7448248499577833, 0.7821284001877189, 0.8336625562667601, 0.7947946889587203, 0.6821792645687259, 0.6749657511955991, 0.82056385260541, 0.594456091081921, 0.5025934920429402, 0.5358054634484388, 0.7299644543146196, 0.800198263470014, 0.5200795780935461, 0.9127085069646359, 0.520189261488305, 0.5721914853245895, 0.8353448213929731, 0.6954853620284434, 0.5287393914234155, 0.7749133682929376, 0.9626332237047208, 0.504531933674538, 0.7416881835220519, 0.8029161141930703, 0.9512371692847112, 0.678590959823639, 0.5942002011283005, 0.7231946939255616, 0.8891894086624462, 0.8467433825580544, 0.8250284374644645, 0.5006569190556542, 0.7168051210436979, 0.7906952071921669, 0.8270348886306684, 0.6098752346678269, 0.5222347961416989, 0.7929018318998295, 0.5101055813477642, 0.7169833755909871, 0.5702461928129504, 0.9023390198515662, 0.8818647192968105, 0.963439971720073, 0.5654149968549657, 0.9693649864156556, 0.8935600740999206, 0.7186220005851536, 0.7447207297667482, 0.9237355949222104, 0.9169525724065697, 0.5802019547819947, 0.9405852601832476, 0.8876001178289146, 0.7693074221942977, 0.5587374478203172, 0.94048087746272, 0.7081619194854067, 0.5577780946399669, 0.639746138541101, 0.5677279896423741, 0.7013171406572657, 0.8531962429999775, 0.9534461688042684, 0.9544454327368554, 0.7419798377038764, 0.7781598798873539, 0.9880995476366918, 0.9085277704260113, 0.8808235015558032, 0.645561561502733, 0.6756942754185749, 0.5067823378774516, 0.506378470755337, 0.810777692389024, 0.5123445978187132, 0.8708238156107807, 0.8503474958572756, 0.954442138732992, 0.9080537030698932, 0.9018808628797211, 0.9449707700689803, 0.8191690079487154, 0.6827385944940092, 0.8929245413314859, 0.5292200559271782, 0.8624408829473031, 0.6845293685637726, 0.6978517435484533, 0.7839079461033043, 0.8375322422631433, 0.9537265678949418, 0.8208246713448575, 0.9297464038826257, 0.8533886395580749, 0.6595065038741466, 0.5367401755113856, 0.5711884859357816, 0.9956268735292508, 0.5355328946765383, 0.5455838142054785, 0.9854809795755022, 0.7816238163921971, 0.5441177260680754, 0.7809034256329053, 0.8833044171043674, 0.7976267671978272, 0.6949480150237191, 0.97620470200642, 0.8229129809550366, 0.869410101611787, 0.7857192541226444, 0.9049082880373116, 0.8217825261671052, 0.5978171261180575, 0.7516740248586333, 0.835374844356755, 0.6647492038017275, 0.654372380384227, 0.533928025397404, 0.8754977563944351, 0.5272353988484589, 0.9324332907020879, 0.8092159617417161, 0.8081259134157913, 0.7270657386338355, 0.9038540598848672, 0.9711575053233915, 0.6771643891208445, 0.9579552939218283, 0.853761852888534, 0.8960824555273585, 0.5269678068509709, 0.6943165773545628, 0.5796276656005226, 0.706014162848424, 0.8372419249150096, 0.6996409865937712, 0.5716824987141625, 0.6438334075025813, 0.5260148903229024, 0.9331886427333145, 0.9323333298241967, 0.7260844671738941, 0.9270432682797352, 0.9337665703653302, 0.6315745755524134, 0.7385872282881337, 0.7804016374726153, 0.8425343267477277, 0.8992882216574574, 0.8154295497769282, 0.9454044330995546, 0.9504649658919259, 0.7552753505697389, 0.703333042647019, 0.851159545729975, 0.6219148319282171, 0.916502088917321, 0.8613541833378122, 0.9124095174944935, 0.9286629029328552, 0.612550255934398, 0.8679719411162063, 0.9693607203380965, 0.8453412431169058, 0.8725376854813824, 0.7912174040299396, 0.5888947879582849, 0.9848924865749478, 0.8174611882014551, 0.7400586120333806, 0.9892661252731871, 0.9813085520067423, 0.9573712978603379, 0.649588198345867, 0.6250235203353991, 0.5955909287888107, 0.620373053569861, 0.526029835569561, 0.7144405404754234, 0.780285293622486, 0.8666553691054684, 0.8964231072882586, 0.8591118144087425, 0.9572999862830796, 0.9572257787849424, 0.7738885343608862, 0.6271141855599663, 0.5539161885973134, 0.674124267886359, 0.8456009864511342, 0.789085976005303, 0.9179901181123216, 0.6757078684677362, 0.554766193187559, 0.8991436828308943, 0.72861832616026, 0.8437634536967329, 0.9579330569265415, 0.9850939686630349, 0.8228998187444237, 0.8388421319341619, 0.5297061437767218, 0.6094811259115935, 0.6935463743720558, 0.8634731923807555, 0.8062166169630336, 0.7894429442727542, 0.7651759682262698, 0.9731711420190147, 0.9941330255107579, 0.7341363475012623, 0.9521718941174941, 0.9765870463957484, 0.7892140968169058, 0.7051042952693836, 0.7433897579012426, 0.7796078583896936, 0.9420146913245346, 0.5585284274921852, 0.6614237390233539, 0.8225768496189233, 0.9228783746366208, 0.5543525000804628, 0.712200484202808, 0.7458232809428115, 0.6370976984346218, 0.7614142413586191, 0.807507380810081, 0.5224827912351996, 0.6724736119732511, 0.593179868665466, 0.9306430166660243, 0.5324363741372664, 0.8889130123351461, 0.9107423542503432, 0.6550202302554903, 0.944894451539642, 0.9052118156355418, 0.5590466975750978, 0.805467627477866, 0.980986543801699, 0.5502338170531667, 0.7555823165396496, 0.5443648978107398, 0.7345272275399661, 0.5414860274201246, 0.5203901051451236, 0.5423934957350249, 0.6539933857773308, 0.9208207415813712, 0.5502128020162411, 0.6876157144549884, 0.5702853582447343, 0.5904228543623173, 0.8889623987647162, 0.8985261691881565, 0.5048154598888724, 0.6076562850468037, 0.6961908971137987, 0.6500172650564393, 0.8511948985756508, 0.66319992047087, 0.9484600768282544, 0.5089392129473143, 0.7668151252216436, 0.8848797511184859, 0.8442684141503054, 0.8293842491761947, 0.9228608712069555, 0.716848629788235, 0.9187140383200945, 0.7381151908607154, 0.9316436232594469, 0.9007970659292505, 0.8571124627981633, 0.7563461277617688, 0.513696518145864, 0.5748871483045002, 0.9520785587809371, 0.8896683038703164, 0.6998782192395402, 0.8834708853708064, 0.6918430479828297, 0.9714088838129342, 0.7358356385444389, 0.706251044986796, 0.6445022136693828, 0.6502160963722614, 0.67845490395182, 0.6989987170451886, 0.5019984250344179, 0.595813407554385, 0.7070955606691687, 0.8216025133858569, 0.6394441217980584, 0.9861550745536893, 0.5630267871638102, 0.7197774220892457, 0.5707820282987905, 0.5554174483683547, 0.5608175917649101, 0.7346401576638559, 0.6966702058717653, 0.6445646315267469, 0.8744094501599702, 0.8013922028376101, 0.8827004493426109, 0.6030222621908303, 0.8946255779552771, 0.5787758041400044, 0.7187007220827769, 0.6284769420023835, 0.7346188868724551, 0.695829407120654, 0.6386811653525657, 0.7150252903858563, 0.6293714022285715, 0.5249633990391243, 0.8647394391293555, 0.8230188310468833, 0.8976785086232437, 0.5039818379281453, 0.8211569054426457, 0.7194889824918933, 0.9289131829642242, 0.7541040312860572, 0.742051339762712, 0.5515601578515075, 0.8893119930560853, 0.7849628852472481, 0.8274409410965613, 0.9430988897880357, 0.5593617432017448, 0.9863674781085019, 0.8818683165019685, 0.5334322723076458, 0.5431232249183211, 0.956274780832098, 0.6750059580031558, 0.8033812934976199, 0.5633421647434877, 0.7588173120251935, 0.6148214442198539, 0.5306560401308527, 0.9159055555823894, 0.7829682549085655, 0.6077550001723855, 0.6768516473519923, 0.9083875292208695, 0.8238798732507031, 0.8842950293224175, 0.7509042583446859, 0.5835260581189603, 0.771432641391331, 0.8735359080421995, 0.6654501818823724, 0.8330481168914817, 0.6586063723028632, 0.5324851641008854, 0.6516233791749912, 0.8668811241368004, 0.6901352977817978, 0.8276546369593913, 0.883845084525178, 0.5816936278385099, 0.6363005501359384, 0.9231059836000148, 0.7356217224283114, 0.8089825425159602, 0.5589388507840811, 0.6306424940443877, 0.5350914779207365, 0.5810872349283129, 0.8127313666556297, 0.9468961803825973, 0.7860397530364289, 0.6428268710185303, 0.8479121114897317, 0.9319903051408973, 0.7831470045544986, 0.7486175937861625, 0.9891526397281045, 0.8517228564544181, 0.8630442177017649, 0.7009876895581848, 0.919993632123095, 0.9742906773769642, 0.5001017712541294, 0.7288059511752198, 0.5356269422575284, 0.5009525538060497, 0.506362747718705, 0.6855898077307808, 0.8186058722239021, 0.6437350504120393, 0.718239019281738, 0.5968857591904404, 0.609524616029788, 0.7346307101193603, 0.5303081011328907, 0.8945951420123923, 0.714368980223252, 0.9270788314109553, 0.7772798233338185, 0.5043792436685017, 0.5489902958837589, 0.8161466086153648, 0.9002587902927479, 0.8594978043993857, 0.7720918657186675, 0.708471889407926, 0.8553134366489652, 0.8307261279699127, 0.9196334979451786, 0.5460130796628213, 0.5322359638794936, 0.7015642375586723, 0.9454702956809771, 0.5614611510499937, 0.9886486033452997, 0.8505700200768735, 0.7705538255754345, 0.5242606557494717, 0.5487130885383176, 0.9797450718881408, 0.7620902625223458, 0.5234501381612409, 0.6015105519508768, 0.6908773949001565, 0.8052201557728094, 0.5619060781574086, 0.8365028605718405, 0.7381675988564596, 0.5518180430531955, 0.7932454856705924, 0.8872510212340574, 0.7280059763374385, 0.978804367491308, 0.9211564036702524, 0.8495645790636963, 0.9434113274257316, 0.9900948520859476, 0.5320469764868729, 0.824062921633453, 0.6392225464923971, 0.7767066000638363, 0.5870722591297224, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0}; int h_B[]= { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 97, 99, 101, 103, 106, 108, 111, 113, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 142, 144, 146, 148, 150, 152, 158, 160, 163, 165, 168, 170, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 199, 201, 204, 206, 209, 211, 198, 116, 203, 198, 116, 198, 116, 203, 193, 215, 116, 156, 156, 193, 196, 196, 198, 215, 587, 589, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 844, 846, 848, 850, 852, 854, 856, 858, 860, 862, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 162, 157, 162, 157, 167, 141, 105, 141, 110, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 1065, 1072, 203, 213, 208, 162, 157, 141, 110, 105, 213, 208, 213, 208, 1075, 162, 157, 141, 1059, 162, 157, 172, 1065, 208, 162, 157, 141, 1059, 162, 157, 172, 1065, 213, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 1065, 213, 208, 162, 157, 162, 157, 167, 141, 105, 141, 110, 213, 208, 162, 157, 162, 157, 167, 141, 1036, 162, 157, 172, 1065, 203, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 110, 105, 213, 208, 213, 208, 162, 157, 141, 110, 105, 213, 208, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 1065, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 1059, 162, 157, 162, 157, 167, 172, 1065, 203, 213, 208, 203, 213, 208, 1086, 1074, 1086, 1074, 1086, 1087, 1086, 1087, 1086, 1087, 1086, 1087, 1086, 1087, 1086, 1079, 1087, 1087, 1087, 1086, 1087, 1086, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1087, 1086, 1087, 1086, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1536, 1538, 1545, 1547, 1549, 1551, 1555, 1559, 1561, 1564, 1566, 1568, 1570, 1574, 1579, 1583, 1588, 1590, 1592, 1596, 1598, 1602, 1604, 1606, 1613, 1615, 1617, 1622, 1627, 1629, 1631, 1633, 1637, 1639, 1642, 1644, 1646, 1648, 1651, 1653, 1655, 1657, 1659, 1661, 1665, 1667, 1671, 1673, 1675, 1677, 1682, 1684, 1690, 1693, 1544, 1542, 1557, 1558, 1692, 1696, 1698, 1681, 1689, 1700, 1701, 1692, 1702, 1703, 1573, 1577, 1692, 1089, 1582, 1586, 1692, 1089, 1601, 1708, 1710, 1612, 1610, 1621, 1625, 1626, 1714, 1670, 1681, 1688, 1689, 1692, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1540, 1084, 1083, 1846, 1847, 1085, 1553, 1084, 1083, 1798, 1848, 1849, 1085, 1850, 1679, 1084, 1083, 1801, 1080, 1081, 1853, 1854, 1857, 1679, 1084, 1083, 1860, 1686, 1084, 1083, 1861, 1862, 1863, 1679, 1084, 1083, 1864, 1686, 1084, 1083, 1865, 1866, 1867, 1594, 1084, 1083, 1811, 1686, 1084, 1083, 1868, 1085, 1608, 1084, 1083, 1871, 1872, 1085, 1619, 1084, 1083, 1873, 1686, 1084, 1083, 1874, 1875, 1085, 1635, 1084, 1083, 1824, 1686, 1084, 1083, 1826, 1085, 1089, 1679, 1084, 1083, 1830, 1080, 1081, 1663, 1084, 1083, 1836, 1686, 1084, 1083, 1877, 1085, 1679, 1084, 1083, 1878, 1686, 1084, 1083, 1879, 1880, 1085, 1881, 1089, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2048, 2049, 2050, 2051, 2053, 2054, 2055, 2056, 2057, 2060, 2062, 2063, 2064, 2065, 2066, 2067, 2069, 2070, 2071, 2072, 2073, 2075, 2076, 2077, 2079, 2081, 2082, 2083, 2085, 2086, 2087, 2089, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2099, 2100, 2101, 2102, 2103, 2105, 2106, 2107, 2108, 2110, 2111, 2112, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2140, 2141, 2142, 2143, 2145, 2146, 2147, 2149, 2150, 2151, 2152, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2304, 2309, 2314, 2320, 2321, 2322, 2325, 2328, 2329, 2332, 2335, 2336, 2340, 2344, 2349, 2352, 2356, 2360, 2366, 2372, 2376, 2380, 2383, 2386, 2388, 2308, 2389, 2313, 2319, 2318, 2389, 2343, 2348, 2355, 2365, 2364, 2371, 2370, 2389, 2379, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2560, 2561, 2562, 2565, 2566, 2568, 2569, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2585, 2586, 2587, 2588, 2589, 2371, 2370, 2080, 2090, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2371, 2370, 2598, 2599, 2389, 2387, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2148, 2307, 2058, 2312, 2836, 2148, 2317, 2838, 2148, 2068, 2840, 2841, 2078, 2074, 2842, 2088, 2084, 2843, 2098, 2339, 2844, 2148, 2347, 2113, 2109, 2363, 2359, 2848, 2850, 2148, 2369, 2852, 2853, 2139, 2375, 2854, 2148, 2144, 2856, 2857, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3072, 3073, 3074, 3075, 3077, 3078, 3080, 3081, 3082, 3084, 3085, 3087, 3088, 3090, 3091, 3093, 3094, 3095, 3096, 3097, 3098, 3101, 3102, 3103, 3105, 3106, 3108, 3109, 3110, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3328, 3330, 3332, 3334, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3352, 3354, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3100, 3356, 2847, 3089, 3092, 2835, 3076, 3336, 2846, 3079, 3086, 3099, 3107, 3351, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4096, 4098, 4100, 4102, 4104, 4106, 4108, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4352, 4354, 4356, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4608, 4610, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4864, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1090, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 5376, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255}; int h_C[]= { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 98, 100, 102, 104, 107, 109, 112, 114, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 143, 145, 147, 149, 151, 153, 159, 161, 164, 166, 169, 171, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 200, 202, 205, 207, 210, 212, 115, 115, 194, 115, 115, 115, 197, 195, 96, 96, 115, 154, 155, 214, 194, 195, 197, 214, 588, 590, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859, 861, 863, 238, 239, 250, 253, 254, 289, 334, 335, 343, 346, 363, 375, 376, 385, 386, 387, 388, 392, 883, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1053, 1034, 1061, 1055, 1056, 1030, 1024, 1032, 1033, 1068, 1067, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1048, 1043, 1025, 1051, 1073, 1066, 1068, 1067, 1053, 1041, 1048, 1043, 1042, 1045, 1044, 1047, 1046, 1076, 1053, 1026, 1057, 1058, 1061, 1060, 1063, 1064, 1070, 1053, 1027, 1057, 1058, 1061, 1060, 1063, 1064, 1071, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1028, 1050, 1029, 1061, 1060, 1063, 1051, 1068, 1067, 1053, 1034, 1061, 1055, 1056, 1030, 1031, 1032, 1033, 1068, 1067, 1053, 1034, 1061, 1055, 1056, 1035, 1058, 1061, 1060, 1063, 1064, 1066, 1068, 1037, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1048, 1050, 1038, 1061, 1060, 1063, 1040, 1039, 1068, 1067, 1071, 1070, 1053, 1041, 1048, 1043, 1042, 1045, 1044, 1047, 1046, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1048, 1050, 1049, 1061, 1060, 1063, 1051, 1068, 1067, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1057, 1058, 1061, 1060, 1061, 1061, 1062, 1063, 1064, 1066, 1068, 1067, 1069, 1071, 1070, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1077, 1077, 1077, 1077, 1078, 1082, 1078, 1078, 1082, 1088, 1088, 1088, 1088, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 251, 252, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 336, 337, 338, 339, 340, 341, 342, 344, 345, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 377, 378, 379, 380, 381, 382, 383, 384, 389, 390, 391, 398, 412, 413, 415, 416, 1315, 1315, 1315, 1315, 434, 435, 446, 447, 457, 458, 460, 461, 467, 488, 490, 491, 507, 517, 518, 521, 522, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1537, 1539, 1546, 1548, 1550, 1552, 1556, 1560, 1562, 1565, 1567, 1569, 1571, 1575, 1580, 1584, 1589, 1591, 1593, 1597, 1599, 1603, 1605, 1607, 1614, 1616, 1618, 1623, 1628, 1630, 1632, 1634, 1638, 1640, 1643, 1645, 1647, 1649, 1652, 1654, 1656, 1658, 1660, 1662, 1666, 1668, 1672, 1674, 1676, 1678, 1683, 1685, 1691, 1694, 1543, 1541, 1687, 1302, 1315, 1697, 1699, 1650, 1315, 420, 421, 1315, 423, 424, 1572, 1576, 1078, 1578, 1581, 1585, 1078, 1587, 1600, 1709, 1711, 1611, 1609, 1620, 1624, 1078, 1715, 1669, 1680, 1687, 1088, 1088, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1793, 1840, 1792, 396, 397, 1794, 1797, 1796, 1795, 1554, 404, 405, 1799, 407, 1841, 1840, 1800, 1563, 1802, 1803, 418, 419, 422, 1841, 1840, 1804, 428, 1843, 1843, 1805, 432, 433, 436, 1841, 1840, 1806, 440, 1843, 1843, 1807, 444, 445, 448, 1810, 1809, 1808, 1595, 1843, 1843, 1812, 456, 1813, 1815, 1840, 1814, 465, 466, 1816, 1818, 1840, 1817, 472, 1843, 1843, 1819, 476, 477, 1820, 1823, 1822, 1821, 1636, 1843, 1843, 1825, 1641, 1827, 1828, 1841, 1840, 1829, 1650, 1831, 1832, 1835, 1834, 1833, 1664, 1843, 1843, 1837, 505, 1838, 1841, 1840, 1839, 511, 1843, 1843, 1842, 515, 516, 1844, 520, 1845, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 393, 394, 395, 2052, 399, 400, 401, 402, 403, 406, 408, 409, 410, 411, 414, 417, 1855, 1858, 425, 426, 427, 429, 430, 431, 1704, 437, 438, 439, 441, 442, 443, 1706, 449, 450, 451, 452, 453, 454, 455, 459, 462, 463, 464, 2104, 468, 469, 470, 471, 473, 474, 475, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 489, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 506, 508, 509, 510, 512, 513, 514, 1717, 519, 1719, 523, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2305, 2310, 2315, 1856, 1859, 2323, 2326, 1705, 2330, 2333, 1707, 2337, 2341, 2345, 2350, 2353, 2357, 2361, 2367, 2373, 2377, 2381, 2384, 1718, 1720, 1695, 2061, 2059, 1852, 1851, 1870, 1869, 1712, 2114, 1713, 1713, 1876, 1876, 1716, 1716, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2306, 2311, 2316, 2324, 2327, 2331, 2334, 2338, 2342, 2346, 2351, 2354, 2358, 2362, 2368, 2374, 2378, 2382, 2385, 526, 529, 530, 533, 534, 2564, 2563, 2567, 2570, 547, 548, 551, 554, 557, 558, 559, 560, 2584, 2583, 567, 568, 2584, 2583, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2834, 2816, 2834, 2817, 2837, 2834, 2818, 2839, 2834, 2830, 537, 538, 2820, 2819, 541, 2822, 2821, 544, 2824, 2823, 2845, 2834, 2825, 2827, 2826, 2829, 2828, 2849, 2851, 2834, 2830, 563, 564, 2832, 2831, 2855, 2834, 2833, 571, 572, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 524, 525, 527, 528, 531, 532, 535, 536, 3083, 539, 540, 542, 543, 545, 546, 549, 550, 552, 553, 555, 556, 561, 562, 3104, 565, 566, 569, 570, 3111, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3329, 3331, 3333, 3335, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3353, 3355, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3594, 3596, 3592, 3589, 3590, 3584, 3585, 3587, 3591, 3586, 3588, 3593, 3595, 3594, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4097, 4099, 4101, 4103, 4105, 4107, 4109, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4353, 4355, 4357, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4609, 4358, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4865, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 5120, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 591, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255}; bool h_Op[]= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #define THREADS_PER_BLOCK 256 #define BLOCKS_PER_GRID 1 #define SIZE_OF_IN 768 #define SIZE_OF_AC 5120 __device__ void ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) { int i= blockDim.x * blockIdx.x + threadIdx.x; __shared__ float R[23*THREADS_PER_BLOCK]; const int t= THREADS_PER_BLOCK; __shared__ float final; final=0; R[i + 0*t] = A[i + 0*t]; R[i + 1*t] = A[i + 1*t]; R[i + 2*t] = A[i + 2*t]; __syncthreads(); for (int iter=0; iter< n_iter; iter++) { R[i + 3*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]]; __syncthreads(); R[i + 4*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]]; __syncthreads(); R[i + 5*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]]; __syncthreads(); R[i + 6*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]]; __syncthreads(); R[i + 7*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]]; __syncthreads(); R[i + 8*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]]; __syncthreads(); R[i + 9*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]]; __syncthreads(); R[i + 10*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]]; __syncthreads(); R[i + 11*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]]; __syncthreads(); R[i + 12*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]]; __syncthreads(); R[i + 13*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]]; __syncthreads(); R[i + 14*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]]; __syncthreads(); R[i + 15*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]]; __syncthreads(); R[i + 16*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]]; __syncthreads(); R[i + 17*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]]; __syncthreads(); R[i + 18*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]]; __syncthreads(); R[i + 19*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]]; __syncthreads(); R[i + 20*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]]; __syncthreads(); R[i + 21*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]]; __syncthreads(); R[i + 22*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]]; if (i==0) { final += R[22*t]; } __syncthreads(); } if (i==0) { A[0]= final;} }
196f162f0c804b08b03239434ecaafc2dca508cb.hip
// !!! This is a file automatically generated by hipify!!! /* Ray-Triangle Intersection Test Routines */ /* Different optimizations of my and Ben Trumbore's */ /* code from journals of graphics tools (JGT) */ /* http://www.acm.org/jgt/ */ /* by Tomas Moller, May 2000 */ #include <math.h> #include <iostream> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "RayTriangleIntersect_hip.cuh" #define EPSILON 0.000001 #define CROSS(dest,v1,v2) \ dest[0]=v1[1]*v2[2]-v1[2]*v2[1]; \ dest[1]=v1[2]*v2[0]-v1[0]*v2[2]; \ dest[2]=v1[0]*v2[1]-v1[1]*v2[0]; #define DOT(v1,v2) (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]) #define SUB(dest,v1,v2) \ dest[0]=v1[0]-v2[0]; \ dest[1]=v1[1]-v2[1]; \ dest[2]=v1[2]-v2[2]; namespace Intersection { /* the original jgt code */ int intersect_triangle(float orig[3], float dir[3], float vert0[3], float vert1[3], float vert2[3], float* t, float* u, float* v) { double edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; double det, inv_det; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); if (det > -EPSILON && det < EPSILON) return 0; inv_det = 1.0 / det; /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec) * inv_det; if (*u < 0.0 || *u > 1.0) return 0; /* prepare to test V parameter */ CROSS(qvec, tvec, edge1); /* calculate V parameter and test bounds */ *v = DOT(dir, qvec) * inv_det; if (*v < 0.0 || *u + *v > 1.0) return 0; /* calculate t, ray intersects triangle */ *t = DOT(edge2, qvec) * inv_det; return 1; } /* code rewritten to do tests on the sign of the determinant */ /* the division is at the end in the code */ int intersect_triangle1(double orig[3], double dir[3], double vert0[3], double vert1[3], double vert2[3], double* t, double* u, double* v) { double edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; double det, inv_det; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); if (det > EPSILON) { /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); if (*u < 0.0 || *u > det) return 0; /* prepare to test V parameter */ CROSS(qvec, tvec, edge1); /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v < 0.0 || *u + *v > det) return 0; } else if (det < -EPSILON) { /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); /* printf("*u=%f\n",(float)*u); */ /* printf("det=%f\n",det); */ if (*u > 0.0 || *u < det) return 0; /* prepare to test V parameter */ CROSS(qvec, tvec, edge1); /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v > 0.0 || *u + *v < det) return 0; } else return 0; /* ray is parallell to the plane of the triangle */ inv_det = 1.0 / det; /* calculate t, ray intersects triangle */ *t = DOT(edge2, qvec) * inv_det; (*u) *= inv_det; (*v) *= inv_det; return 1; } /* code rewritten to do tests on the sign of the determinant */ /* the division is before the test of the sign of the det */ int intersect_triangle2(double orig[3], double dir[3], double vert0[3], double vert1[3], double vert2[3], double* t, double* u, double* v) { double edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; double det, inv_det; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); inv_det = 1.0 / det; if (det > EPSILON) { /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); if (*u < 0.0 || *u > det) return 0; /* prepare to test V parameter */ CROSS(qvec, tvec, edge1); /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v < 0.0 || *u + *v > det) return 0; } else if (det < -EPSILON) { /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); if (*u > 0.0 || *u < det) return 0; /* prepare to test V parameter */ CROSS(qvec, tvec, edge1); /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v > 0.0 || *u + *v < det) return 0; } else return 0; /* ray is parallell to the plane of the triangle */ /* calculate t, ray intersects triangle */ *t = DOT(edge2, qvec) * inv_det; (*u) *= inv_det; (*v) *= inv_det; return 1; } /* code rewritten to do tests on the sign of the determinant */ /* the division is before the test of the sign of the det */ /* and one CROSS has been moved out from the if-else if-else */ __device__ int intersect_triangle3(float orig[3], float dir[3], float vert0[3], float vert1[3], float vert2[3], float* t, float* u, float* v) { //std::cout << "vert0 = " << vert0[0] << ", " << vert0[1] << ", " << vert0[2] << std::endl; //std::cout << "vert1 = " << vert1[0] << ", " << vert1[1] << ", " << vert1[2] << std::endl; //std::cout << "vert2 = " << vert2[0] << ", " << vert2[1] << ", " << vert2[2] << std::endl; //std::cout << "orig = " << orig[0] << ", " << orig[1] << ", " << orig[2] << std::endl; float edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; float det, inv_det; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); inv_det = 1.0 / det; CROSS(qvec, tvec, edge1); if (det > EPSILON) { *u = DOT(tvec, pvec); if (*u < 0.0 || *u > det) return 0; /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v < 0.0 || *u + *v > det) return 0; } else if (det < -EPSILON) { /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); if (*u > 0.0 || *u < det) return 0; /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v > 0.0 || *u + *v < det) return 0; } else return 0; /* ray is parallell to the plane of the triangle */ *t = DOT(edge2, qvec) * inv_det; (*u) *= inv_det; (*v) *= inv_det; if (*t > 0) { return 1; } else { return 0; } } /* code rewritten to do tests on the sign of the determinant */ /* the division is before the test of the sign of the det */ /* and one CROSS has been moved out from the if-else if-else */ int intersect_triangleCPU(float orig[3], float dir[3], float vert0[3], float vert1[3], float vert2[3], float* t, float* u, float* v) { //std::cout << "vert0 = " << vert0[0] << ", " << vert0[1] << ", " << vert0[2] << std::endl; //std::cout << "vert1 = " << vert1[0] << ", " << vert1[1] << ", " << vert1[2] << std::endl; //std::cout << "vert2 = " << vert2[0] << ", " << vert2[1] << ", " << vert2[2] << std::endl; //std::cout << "orig = " << orig[0] << ", " << orig[1] << ", " << orig[2] << std::endl; float edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; float det, inv_det; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); inv_det = 1.0 / det; CROSS(qvec, tvec, edge1); if (det > EPSILON) { *u = DOT(tvec, pvec); if (*u < 0.0 || *u > det) return 0; /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v < 0.0 || *u + *v > det) return 0; } else if (det < -EPSILON) { /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); if (*u > 0.0 || *u < det) return 0; /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v > 0.0 || *u + *v < det) return 0; } else return 0; /* ray is parallell to the plane of the triangle */ *t = DOT(edge2, qvec) * inv_det; (*u) *= inv_det; (*v) *= inv_det; if (*t > 0) { return 1; } else { return 0; } } __global__ void intersect_triangle4(float orig[3], float dir[3], int* triangles, float* vertices, int* result, int* numberOfCalculations) //hier bepaalde waarden nog eens uitprinten voor eenvoudig voorbeeld om te kijken of wel degelijk gebeurt wat je verwacht { int tid = threadIdx.x + blockIdx.x * blockDim.x; /*for (int i = 0; i < 24; i++) { printf("vertices = %f\n", vertices[i]); } for (int i = 0; i < 36; i++) { printf("triangles = %d\n", triangles[i]); }*/ /*if (tid == 0) { printf("numberOfCalculations: %d", *numberOfCalculations); }*/ if (tid < *numberOfCalculations) { float vert0[3] = { vertices[triangles[tid * 3] * 3], vertices[triangles[tid * 3] * 3 + 1], vertices[triangles[tid * 3] * 3 + 2] }; float vert1[3] = { vertices[triangles[(tid * 3) + 1] * 3], vertices[triangles[(tid * 3) + 1] * 3 + 1], vertices[triangles[(tid * 3) + 1] * 3 + 2] }; float vert2[3] = { vertices[triangles[(tid * 3) + 2] * 3], vertices[triangles[(tid * 3) + 2] * 3 + 1], vertices[triangles[(tid * 3) + 2] * 3 + 2] }; //printf("vert0 = %f, %f, %f\n", vert0[0], vert0[1], vert0[2]); //printf("vert1 = %f, %f, %f\n", vert1[0], vert1[1], vert1[2]); //printf("vert2 = %f, %f, %f\n", vert2[0], vert2[1], vert2[2]); //float vert0[3] = { 1.0, 0.0, 0.0 }; //float vert1[3] = { 0.0, 1.0, 0.0 }; //float vert2[3] = { 0.0, 0.0, 1.0 }; /*float newDir[3]; newDir[0] = dir[0] - orig[0]; newDir[1] = dir[1] - orig[1]; newDir[2] = dir[2] - orig[2];*/ float edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; float det, inv_det; float t, u, v; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ //CROSS(pvec, newDir, edge2); CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); inv_det = 1.0 / det; CROSS(qvec, tvec, edge1); if (det > EPSILON) { u = DOT(tvec, pvec); if (u < 0.0 || u > det) { result[tid] = 0; return; } /* calculate V parameter and test bounds */ //v = DOT(newDir, qvec); v = DOT(dir, qvec); if (v < 0.0 || u + v > det) { result[tid] = 0; return; } } else if (det < -EPSILON) { /* calculate U parameter and test bounds */ u = DOT(tvec, pvec); if (u > 0.0 || u < det) { result[tid] = 0; return; } /* calculate V parameter and test bounds */ //v = DOT(newDir, qvec); v = DOT(dir, qvec); if (v > 0.0 || u + v < det) { result[tid] = 0; return; } } else { result[tid] = 0; /* ray is parallell to the plane of the triangle */ return; } t = DOT(edge2, qvec) * inv_det; (u) *= inv_det; (v) *= inv_det; if (t > 0) { result[tid] = 1; return; } else { result[tid] = 0; return; } } } //block per origin __global__ void intersect_triangleGPU(float3* origins, float dir[3], int3* triangles, float3* vertices, int numberOfTriangles, bool* inside, float3* outsideVertices) // , int* intersectionsPerOrigin, float3* outsideVertices { int threadidx = threadIdx.x; float orig[3] = { origins[blockIdx.x].x, origins[blockIdx.x].y, origins[blockIdx.x].z }; __shared__ int intersectionsPerBlock[128]; //!!!Threads per block moet een macht van 2 zijn!!! //zoniet krijg je problemen met lijn 494 (i /= 2) int numberOfIntersections = 0; int punt1; int punt2; int punt3; while (threadidx < numberOfTriangles) { //if (*inside) { punt1 = triangles[threadidx].x; punt2 = triangles[threadidx].y; punt3 = triangles[threadidx].z; float vert0[3] = { vertices[punt1].x, vertices[punt1].y, vertices[punt1].z }; float vert1[3] = { vertices[punt2].x, vertices[punt2].y, vertices[punt2].z }; float vert2[3] = { vertices[punt3].x, vertices[punt3].y, vertices[punt3].z }; float t, u, v; if (intersect_triangle3(orig, dir, vert0, vert1, vert2, &t, &u, &v) == 1) { numberOfIntersections += 1; } threadidx += 128; /*} else { return; }*/ } threadidx = threadIdx.x; intersectionsPerBlock[threadidx] = numberOfIntersections; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (threadidx < i) { intersectionsPerBlock[threadidx] += intersectionsPerBlock[threadidx + i]; } __syncthreads(); i /= 2; } if (threadidx == 0) { //intersectionsPerOrigin[blockIdx.x] = intersectionsPerBlock[0]; if (intersectionsPerBlock[0] % 2 == 0) { *inside = false; //return; outsideVertices[blockIdx.x].x = orig[0]; outsideVertices[blockIdx.x].y = orig[1]; outsideVertices[blockIdx.x].z = orig[2]; } } } }
196f162f0c804b08b03239434ecaafc2dca508cb.cu
/* Ray-Triangle Intersection Test Routines */ /* Different optimizations of my and Ben Trumbore's */ /* code from journals of graphics tools (JGT) */ /* http://www.acm.org/jgt/ */ /* by Tomas Moller, May 2000 */ #include <math.h> #include <iostream> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "RayTriangleIntersect.cuh" #define EPSILON 0.000001 #define CROSS(dest,v1,v2) \ dest[0]=v1[1]*v2[2]-v1[2]*v2[1]; \ dest[1]=v1[2]*v2[0]-v1[0]*v2[2]; \ dest[2]=v1[0]*v2[1]-v1[1]*v2[0]; #define DOT(v1,v2) (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]) #define SUB(dest,v1,v2) \ dest[0]=v1[0]-v2[0]; \ dest[1]=v1[1]-v2[1]; \ dest[2]=v1[2]-v2[2]; namespace Intersection { /* the original jgt code */ int intersect_triangle(float orig[3], float dir[3], float vert0[3], float vert1[3], float vert2[3], float* t, float* u, float* v) { double edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; double det, inv_det; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); if (det > -EPSILON && det < EPSILON) return 0; inv_det = 1.0 / det; /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec) * inv_det; if (*u < 0.0 || *u > 1.0) return 0; /* prepare to test V parameter */ CROSS(qvec, tvec, edge1); /* calculate V parameter and test bounds */ *v = DOT(dir, qvec) * inv_det; if (*v < 0.0 || *u + *v > 1.0) return 0; /* calculate t, ray intersects triangle */ *t = DOT(edge2, qvec) * inv_det; return 1; } /* code rewritten to do tests on the sign of the determinant */ /* the division is at the end in the code */ int intersect_triangle1(double orig[3], double dir[3], double vert0[3], double vert1[3], double vert2[3], double* t, double* u, double* v) { double edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; double det, inv_det; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); if (det > EPSILON) { /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); if (*u < 0.0 || *u > det) return 0; /* prepare to test V parameter */ CROSS(qvec, tvec, edge1); /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v < 0.0 || *u + *v > det) return 0; } else if (det < -EPSILON) { /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); /* printf("*u=%f\n",(float)*u); */ /* printf("det=%f\n",det); */ if (*u > 0.0 || *u < det) return 0; /* prepare to test V parameter */ CROSS(qvec, tvec, edge1); /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v > 0.0 || *u + *v < det) return 0; } else return 0; /* ray is parallell to the plane of the triangle */ inv_det = 1.0 / det; /* calculate t, ray intersects triangle */ *t = DOT(edge2, qvec) * inv_det; (*u) *= inv_det; (*v) *= inv_det; return 1; } /* code rewritten to do tests on the sign of the determinant */ /* the division is before the test of the sign of the det */ int intersect_triangle2(double orig[3], double dir[3], double vert0[3], double vert1[3], double vert2[3], double* t, double* u, double* v) { double edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; double det, inv_det; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); inv_det = 1.0 / det; if (det > EPSILON) { /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); if (*u < 0.0 || *u > det) return 0; /* prepare to test V parameter */ CROSS(qvec, tvec, edge1); /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v < 0.0 || *u + *v > det) return 0; } else if (det < -EPSILON) { /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); if (*u > 0.0 || *u < det) return 0; /* prepare to test V parameter */ CROSS(qvec, tvec, edge1); /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v > 0.0 || *u + *v < det) return 0; } else return 0; /* ray is parallell to the plane of the triangle */ /* calculate t, ray intersects triangle */ *t = DOT(edge2, qvec) * inv_det; (*u) *= inv_det; (*v) *= inv_det; return 1; } /* code rewritten to do tests on the sign of the determinant */ /* the division is before the test of the sign of the det */ /* and one CROSS has been moved out from the if-else if-else */ __device__ int intersect_triangle3(float orig[3], float dir[3], float vert0[3], float vert1[3], float vert2[3], float* t, float* u, float* v) { //std::cout << "vert0 = " << vert0[0] << ", " << vert0[1] << ", " << vert0[2] << std::endl; //std::cout << "vert1 = " << vert1[0] << ", " << vert1[1] << ", " << vert1[2] << std::endl; //std::cout << "vert2 = " << vert2[0] << ", " << vert2[1] << ", " << vert2[2] << std::endl; //std::cout << "orig = " << orig[0] << ", " << orig[1] << ", " << orig[2] << std::endl; float edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; float det, inv_det; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); inv_det = 1.0 / det; CROSS(qvec, tvec, edge1); if (det > EPSILON) { *u = DOT(tvec, pvec); if (*u < 0.0 || *u > det) return 0; /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v < 0.0 || *u + *v > det) return 0; } else if (det < -EPSILON) { /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); if (*u > 0.0 || *u < det) return 0; /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v > 0.0 || *u + *v < det) return 0; } else return 0; /* ray is parallell to the plane of the triangle */ *t = DOT(edge2, qvec) * inv_det; (*u) *= inv_det; (*v) *= inv_det; if (*t > 0) { return 1; } else { return 0; } } /* code rewritten to do tests on the sign of the determinant */ /* the division is before the test of the sign of the det */ /* and one CROSS has been moved out from the if-else if-else */ int intersect_triangleCPU(float orig[3], float dir[3], float vert0[3], float vert1[3], float vert2[3], float* t, float* u, float* v) { //std::cout << "vert0 = " << vert0[0] << ", " << vert0[1] << ", " << vert0[2] << std::endl; //std::cout << "vert1 = " << vert1[0] << ", " << vert1[1] << ", " << vert1[2] << std::endl; //std::cout << "vert2 = " << vert2[0] << ", " << vert2[1] << ", " << vert2[2] << std::endl; //std::cout << "orig = " << orig[0] << ", " << orig[1] << ", " << orig[2] << std::endl; float edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; float det, inv_det; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); inv_det = 1.0 / det; CROSS(qvec, tvec, edge1); if (det > EPSILON) { *u = DOT(tvec, pvec); if (*u < 0.0 || *u > det) return 0; /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v < 0.0 || *u + *v > det) return 0; } else if (det < -EPSILON) { /* calculate U parameter and test bounds */ *u = DOT(tvec, pvec); if (*u > 0.0 || *u < det) return 0; /* calculate V parameter and test bounds */ *v = DOT(dir, qvec); if (*v > 0.0 || *u + *v < det) return 0; } else return 0; /* ray is parallell to the plane of the triangle */ *t = DOT(edge2, qvec) * inv_det; (*u) *= inv_det; (*v) *= inv_det; if (*t > 0) { return 1; } else { return 0; } } __global__ void intersect_triangle4(float orig[3], float dir[3], int* triangles, float* vertices, int* result, int* numberOfCalculations) //hier bepaalde waarden nog eens uitprinten voor eenvoudig voorbeeld om te kijken of wel degelijk gebeurt wat je verwacht { int tid = threadIdx.x + blockIdx.x * blockDim.x; /*for (int i = 0; i < 24; i++) { printf("vertices = %f\n", vertices[i]); } for (int i = 0; i < 36; i++) { printf("triangles = %d\n", triangles[i]); }*/ /*if (tid == 0) { printf("numberOfCalculations: %d", *numberOfCalculations); }*/ if (tid < *numberOfCalculations) { float vert0[3] = { vertices[triangles[tid * 3] * 3], vertices[triangles[tid * 3] * 3 + 1], vertices[triangles[tid * 3] * 3 + 2] }; float vert1[3] = { vertices[triangles[(tid * 3) + 1] * 3], vertices[triangles[(tid * 3) + 1] * 3 + 1], vertices[triangles[(tid * 3) + 1] * 3 + 2] }; float vert2[3] = { vertices[triangles[(tid * 3) + 2] * 3], vertices[triangles[(tid * 3) + 2] * 3 + 1], vertices[triangles[(tid * 3) + 2] * 3 + 2] }; //printf("vert0 = %f, %f, %f\n", vert0[0], vert0[1], vert0[2]); //printf("vert1 = %f, %f, %f\n", vert1[0], vert1[1], vert1[2]); //printf("vert2 = %f, %f, %f\n", vert2[0], vert2[1], vert2[2]); //float vert0[3] = { 1.0, 0.0, 0.0 }; //float vert1[3] = { 0.0, 1.0, 0.0 }; //float vert2[3] = { 0.0, 0.0, 1.0 }; /*float newDir[3]; newDir[0] = dir[0] - orig[0]; newDir[1] = dir[1] - orig[1]; newDir[2] = dir[2] - orig[2];*/ float edge1[3], edge2[3], tvec[3], pvec[3], qvec[3]; float det, inv_det; float t, u, v; /* find vectors for two edges sharing vert0 */ SUB(edge1, vert1, vert0); SUB(edge2, vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ //CROSS(pvec, newDir, edge2); CROSS(pvec, dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ det = DOT(edge1, pvec); /* calculate distance from vert0 to ray origin */ SUB(tvec, orig, vert0); inv_det = 1.0 / det; CROSS(qvec, tvec, edge1); if (det > EPSILON) { u = DOT(tvec, pvec); if (u < 0.0 || u > det) { result[tid] = 0; return; } /* calculate V parameter and test bounds */ //v = DOT(newDir, qvec); v = DOT(dir, qvec); if (v < 0.0 || u + v > det) { result[tid] = 0; return; } } else if (det < -EPSILON) { /* calculate U parameter and test bounds */ u = DOT(tvec, pvec); if (u > 0.0 || u < det) { result[tid] = 0; return; } /* calculate V parameter and test bounds */ //v = DOT(newDir, qvec); v = DOT(dir, qvec); if (v > 0.0 || u + v < det) { result[tid] = 0; return; } } else { result[tid] = 0; /* ray is parallell to the plane of the triangle */ return; } t = DOT(edge2, qvec) * inv_det; (u) *= inv_det; (v) *= inv_det; if (t > 0) { result[tid] = 1; return; } else { result[tid] = 0; return; } } } //block per origin __global__ void intersect_triangleGPU(float3* origins, float dir[3], int3* triangles, float3* vertices, int numberOfTriangles, bool* inside, float3* outsideVertices) // , int* intersectionsPerOrigin, float3* outsideVertices { int threadidx = threadIdx.x; float orig[3] = { origins[blockIdx.x].x, origins[blockIdx.x].y, origins[blockIdx.x].z }; __shared__ int intersectionsPerBlock[128]; //!!!Threads per block moet een macht van 2 zijn!!! //zoniet krijg je problemen met lijn 494 (i /= 2) int numberOfIntersections = 0; int punt1; int punt2; int punt3; while (threadidx < numberOfTriangles) { //if (*inside) { punt1 = triangles[threadidx].x; punt2 = triangles[threadidx].y; punt3 = triangles[threadidx].z; float vert0[3] = { vertices[punt1].x, vertices[punt1].y, vertices[punt1].z }; float vert1[3] = { vertices[punt2].x, vertices[punt2].y, vertices[punt2].z }; float vert2[3] = { vertices[punt3].x, vertices[punt3].y, vertices[punt3].z }; float t, u, v; if (intersect_triangle3(orig, dir, vert0, vert1, vert2, &t, &u, &v) == 1) { numberOfIntersections += 1; } threadidx += 128; /*} else { return; }*/ } threadidx = threadIdx.x; intersectionsPerBlock[threadidx] = numberOfIntersections; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (threadidx < i) { intersectionsPerBlock[threadidx] += intersectionsPerBlock[threadidx + i]; } __syncthreads(); i /= 2; } if (threadidx == 0) { //intersectionsPerOrigin[blockIdx.x] = intersectionsPerBlock[0]; if (intersectionsPerBlock[0] % 2 == 0) { *inside = false; //return; outsideVertices[blockIdx.x].x = orig[0]; outsideVertices[blockIdx.x].y = orig[1]; outsideVertices[blockIdx.x].z = orig[2]; } } } }
5e7f37253a59e7ce55c049dc33a35464da2a49d9.hip
// !!! This is a file automatically generated by hipify!!! /* ============================================================================ Name : MatrixInverse.cu Author : Yingliang Version : Copyright : Shanghaitech Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <hip/hip_runtime.h> #include <stdlib.h> #include <ctime> using namespace std; #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define random(x) (rand()%x) /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err) { if (err == hipSuccess) return; std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); } void printMatrix(double* inputMatrix, const int rows, const int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { std::cout << inputMatrix[i * cols + j] << "\t"; } std::cout << std::endl; } } /** * CUDA kernel that computes reciprocal values for a given vector */ __global__ void harnessZeroKernel(double *d_augmentedMatrix, const int rowId1, const int rowId2, const int size) { __shared__ double blockR1[512]; __shared__ double blockR2[512]; const int tIdx = threadIdx.x; const int bIdx = blockIdx.x; const int colI = blockIdx.x * blockDim.x + threadIdx.x; if (colI < size * 2) { blockR1[tIdx] = d_augmentedMatrix[rowId1 * 2 * size + blockDim.x * bIdx + tIdx]; blockR2[tIdx] = d_augmentedMatrix[rowId2 * 2 * size + blockDim.x * bIdx + tIdx]; __syncthreads(); d_augmentedMatrix[rowId1 * 2 * size + blockDim.x * bIdx + tIdx] = blockR1[tIdx] + blockR2[tIdx]; } } __global__ void computeRowsKernel(double *d_augmentedMatrix, const int rowId, const int size) { __shared__ double blockR[512]; __shared__ double Aii; const int tIdx = threadIdx.x; const int bIdx = blockIdx.x; const int colI = blockIdx.x * blockDim.x + threadIdx.x; if (colI < size * 2) { blockR[tIdx] = d_augmentedMatrix[rowId * 2 * size + blockDim.x * bIdx + tIdx]; Aii = d_augmentedMatrix[rowId * 2 * size + rowId]; __syncthreads(); blockR[tIdx] = blockR[tIdx] / Aii; d_augmentedMatrix[rowId * 2 * size + blockDim.x * bIdx + tIdx] = blockR[tIdx]; } } __global__ void computeColsKernel(double *d_augmentedMatrix, const int colId, const int size) { __shared__ double blockC[16][16]; // which col need to be zero __shared__ double blockCCurent[16][16]; // which col is the current col __shared__ double ARow[16]; // the pivot row const int tIdx = threadIdx.x; const int tIdy = threadIdx.y; const int rowI = blockIdx.y * blockDim.y + threadIdx.y; const int colI = blockIdx.x * blockDim.x + threadIdx.x; if (colI < size * 2 && rowI < size) { blockC[tIdy][tIdx] = d_augmentedMatrix[rowI * size * 2 + colId]; if (blockC[tIdy][tIdx] != 0) { blockCCurent[tIdy][tIdx] = d_augmentedMatrix[rowI * size * 2 + colI]; ARow[tIdx] = d_augmentedMatrix[colId * size * 2 + colI]; __syncthreads(); if (rowI != colId) { // current row can't sub by current row blockCCurent[tIdy][tIdx] = blockCCurent[tIdy][tIdx] - blockC[tIdy][tIdx] * ARow[tIdx]; } d_augmentedMatrix[rowI * size * 2 + colI] = blockCCurent[tIdy][tIdx]; //d_augmentedMatrix[rowI * size * 2 + colI] = ARow[tIdx]; } } } __global__ void augmentMatrixKernel(double *d_augmentedMatrix, double *d_inputMatrix, const int rows, const int cols) { const int rowI = blockIdx.y * blockDim.y + threadIdx.y; const int colI = blockIdx.x * blockDim.x + threadIdx.x; if (colI < cols && rowI < rows) { // initialize augmentedMatrix if (colI < cols / 2) { d_augmentedMatrix[rowI * cols + colI] = d_inputMatrix[rowI * cols / 2 + colI]; } else if (colI - cols / 2 == rowI) { d_augmentedMatrix[rowI * cols + colI] = 1; } else { d_augmentedMatrix[rowI * cols + colI] = 0; } } } __global__ void getInverseMatrixKernel(double *d_augmentedMatrix, double *d_inverseMatrix, const int rows, const int cols) { const int rowI = blockIdx.y * blockDim.y + threadIdx.y; const int colI = blockIdx.x * blockDim.x + threadIdx.x; if (colI < cols / 2 && rowI < rows) { // initialize augmentedMatrix d_inverseMatrix[rowI * cols / 2 + colI] = d_augmentedMatrix[rowI * cols + colI + cols / 2]; } } /** * Host function that copies the data and launches the work on GPU */ double *gpuMatrixInverse(double *inputMatrix, const int rows, const int cols) { double *h_inverseMatrix; //double *h_augmentedMatrix; double *d_inputMatrix; double *d_inverseMatrix; double *d_augmentedMatrix; const int length = rows * cols; const int size = rows; //printMatrix(inputMatrix, rows, cols); cout << endl; // initialization h_inverseMatrix = (double *)malloc(length * sizeof(double)); //h_augmentedMatrix = (double *)malloc(length * 2 * sizeof(double)); CUDA_CHECK_RETURN(hipMalloc((void **)&d_augmentedMatrix, sizeof(double) * length * 2)); CUDA_CHECK_RETURN(hipMalloc((void **)&d_inputMatrix, sizeof(double) * length)); CUDA_CHECK_RETURN(hipMalloc((void **)&d_inverseMatrix, sizeof(double) * length)); CUDA_CHECK_RETURN(hipMemcpy(d_inputMatrix, inputMatrix, sizeof(double) * length, hipMemcpyHostToDevice)); dim3 blockSize1(16, 16); dim3 gridSize1(cols * 2.0 / blockSize1.x + 1, rows * 1.0 / blockSize1.y + 1); hipLaunchKernelGGL(( augmentMatrixKernel), dim3(gridSize1), dim3(blockSize1), 0, 0, d_augmentedMatrix, d_inputMatrix, rows, cols * 2); hipDeviceSynchronize(); int i = 0; while (i < size) { if (inputMatrix[i * size + i] != 0) { dim3 blockSize2(256); dim3 gridSize2(cols * 2.0 / blockSize2.x + 1, 1); hipLaunchKernelGGL(( computeRowsKernel), dim3(gridSize2), dim3(blockSize2), 0, 0, d_augmentedMatrix, i, size); hipDeviceSynchronize(); } else { int nonZeroRowIndex = 0; for (int j = 0; j < size; j++) { if (inputMatrix[j * size + i] != 0) { nonZeroRowIndex = j; break; } } dim3 blockSize3(256); dim3 gridSize3(cols * 2.0 / blockSize3.x + 1, 1); hipLaunchKernelGGL(( harnessZeroKernel), dim3(gridSize3), dim3(blockSize3), 0, 0, d_augmentedMatrix, i, nonZeroRowIndex, size); hipDeviceSynchronize(); dim3 blockSize4(256); dim3 gridSize4(cols * 2.0 / blockSize4.x + 1, 1); hipLaunchKernelGGL(( computeRowsKernel), dim3(gridSize4), dim3(blockSize4), 0, 0, d_augmentedMatrix, i, size); hipDeviceSynchronize(); } dim3 blockSize5(16, 16); dim3 gridSize5(cols * 2.0 / blockSize5.x + 1, rows * 1.0 / blockSize5.y + 1); hipLaunchKernelGGL(( computeColsKernel), dim3(gridSize5), dim3(blockSize5), 0, 0, d_augmentedMatrix, i, size); hipDeviceSynchronize(); i++; } dim3 blockSize6(16, 16); dim3 gridSize6(cols * 2.0 / blockSize6.x + 1, rows * 1.0 / blockSize6.y + 1); hipLaunchKernelGGL(( getInverseMatrixKernel), dim3(gridSize1), dim3(blockSize1), 0, 0, d_augmentedMatrix, d_inverseMatrix, rows, cols * 2); CUDA_CHECK_RETURN(hipMemcpy(h_inverseMatrix, d_inverseMatrix, sizeof(double) * length, hipMemcpyDeviceToHost)); //CUDA_CHECK_RETURN(hipMemcpy(h_augmentedMatrix, d_augmentedMatrix, sizeof(double) * length * 2, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipFree(d_augmentedMatrix)); CUDA_CHECK_RETURN(hipFree(d_inverseMatrix)); CUDA_CHECK_RETURN(hipFree(d_inputMatrix)); return h_inverseMatrix; } double *createTest(const int rows, const int cols) { double *data = (double *)malloc(rows * cols * sizeof(double)); for (int i = 0; i < rows * cols; i++) { data[i] = random(100); } return data; } int main(void) { const int rows = 1000; const int cols = 1000; double *testMatrix = createTest(rows, cols); double *inverseMatrixGPU; // GPU code clock_t start1,end1; start1 = clock(); inverseMatrixGPU = gpuMatrixInverse(testMatrix, rows, cols); end1 = clock(); double dur1 = (double)(end1 - start1); cout << "\n running time on GPU is " << dur1 / CLOCKS_PER_SEC << " secs\n" << endl; if (rows < 20) { printMatrix(inverseMatrixGPU, rows, cols); double *resultMatrix = (double *)malloc(cols * rows * sizeof(double)); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { resultMatrix[i * cols + j] = 0; } } for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { for (int k = 0; k < cols; k++) { resultMatrix[i * cols + j] += testMatrix[i * cols + k] * inverseMatrixGPU[k * cols + j]; } } } cout << "\nTest the result from GPU\n" << endl; printMatrix(resultMatrix, rows, cols); } /* Free memory */ delete[] inverseMatrixGPU; return 0; }
5e7f37253a59e7ce55c049dc33a35464da2a49d9.cu
/* ============================================================================ Name : MatrixInverse.cu Author : Yingliang Version : Copyright : Shanghaitech Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <cuda_runtime.h> #include <stdlib.h> #include <ctime> using namespace std; #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define random(x) (rand()%x) /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); } void printMatrix(double* inputMatrix, const int rows, const int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { std::cout << inputMatrix[i * cols + j] << "\t"; } std::cout << std::endl; } } /** * CUDA kernel that computes reciprocal values for a given vector */ __global__ void harnessZeroKernel(double *d_augmentedMatrix, const int rowId1, const int rowId2, const int size) { __shared__ double blockR1[512]; __shared__ double blockR2[512]; const int tIdx = threadIdx.x; const int bIdx = blockIdx.x; const int colI = blockIdx.x * blockDim.x + threadIdx.x; if (colI < size * 2) { blockR1[tIdx] = d_augmentedMatrix[rowId1 * 2 * size + blockDim.x * bIdx + tIdx]; blockR2[tIdx] = d_augmentedMatrix[rowId2 * 2 * size + blockDim.x * bIdx + tIdx]; __syncthreads(); d_augmentedMatrix[rowId1 * 2 * size + blockDim.x * bIdx + tIdx] = blockR1[tIdx] + blockR2[tIdx]; } } __global__ void computeRowsKernel(double *d_augmentedMatrix, const int rowId, const int size) { __shared__ double blockR[512]; __shared__ double Aii; const int tIdx = threadIdx.x; const int bIdx = blockIdx.x; const int colI = blockIdx.x * blockDim.x + threadIdx.x; if (colI < size * 2) { blockR[tIdx] = d_augmentedMatrix[rowId * 2 * size + blockDim.x * bIdx + tIdx]; Aii = d_augmentedMatrix[rowId * 2 * size + rowId]; __syncthreads(); blockR[tIdx] = blockR[tIdx] / Aii; d_augmentedMatrix[rowId * 2 * size + blockDim.x * bIdx + tIdx] = blockR[tIdx]; } } __global__ void computeColsKernel(double *d_augmentedMatrix, const int colId, const int size) { __shared__ double blockC[16][16]; // which col need to be zero __shared__ double blockCCurent[16][16]; // which col is the current col __shared__ double ARow[16]; // the pivot row const int tIdx = threadIdx.x; const int tIdy = threadIdx.y; const int rowI = blockIdx.y * blockDim.y + threadIdx.y; const int colI = blockIdx.x * blockDim.x + threadIdx.x; if (colI < size * 2 && rowI < size) { blockC[tIdy][tIdx] = d_augmentedMatrix[rowI * size * 2 + colId]; if (blockC[tIdy][tIdx] != 0) { blockCCurent[tIdy][tIdx] = d_augmentedMatrix[rowI * size * 2 + colI]; ARow[tIdx] = d_augmentedMatrix[colId * size * 2 + colI]; __syncthreads(); if (rowI != colId) { // current row can't sub by current row blockCCurent[tIdy][tIdx] = blockCCurent[tIdy][tIdx] - blockC[tIdy][tIdx] * ARow[tIdx]; } d_augmentedMatrix[rowI * size * 2 + colI] = blockCCurent[tIdy][tIdx]; //d_augmentedMatrix[rowI * size * 2 + colI] = ARow[tIdx]; } } } __global__ void augmentMatrixKernel(double *d_augmentedMatrix, double *d_inputMatrix, const int rows, const int cols) { const int rowI = blockIdx.y * blockDim.y + threadIdx.y; const int colI = blockIdx.x * blockDim.x + threadIdx.x; if (colI < cols && rowI < rows) { // initialize augmentedMatrix if (colI < cols / 2) { d_augmentedMatrix[rowI * cols + colI] = d_inputMatrix[rowI * cols / 2 + colI]; } else if (colI - cols / 2 == rowI) { d_augmentedMatrix[rowI * cols + colI] = 1; } else { d_augmentedMatrix[rowI * cols + colI] = 0; } } } __global__ void getInverseMatrixKernel(double *d_augmentedMatrix, double *d_inverseMatrix, const int rows, const int cols) { const int rowI = blockIdx.y * blockDim.y + threadIdx.y; const int colI = blockIdx.x * blockDim.x + threadIdx.x; if (colI < cols / 2 && rowI < rows) { // initialize augmentedMatrix d_inverseMatrix[rowI * cols / 2 + colI] = d_augmentedMatrix[rowI * cols + colI + cols / 2]; } } /** * Host function that copies the data and launches the work on GPU */ double *gpuMatrixInverse(double *inputMatrix, const int rows, const int cols) { double *h_inverseMatrix; //double *h_augmentedMatrix; double *d_inputMatrix; double *d_inverseMatrix; double *d_augmentedMatrix; const int length = rows * cols; const int size = rows; //printMatrix(inputMatrix, rows, cols); cout << endl; // initialization h_inverseMatrix = (double *)malloc(length * sizeof(double)); //h_augmentedMatrix = (double *)malloc(length * 2 * sizeof(double)); CUDA_CHECK_RETURN(cudaMalloc((void **)&d_augmentedMatrix, sizeof(double) * length * 2)); CUDA_CHECK_RETURN(cudaMalloc((void **)&d_inputMatrix, sizeof(double) * length)); CUDA_CHECK_RETURN(cudaMalloc((void **)&d_inverseMatrix, sizeof(double) * length)); CUDA_CHECK_RETURN(cudaMemcpy(d_inputMatrix, inputMatrix, sizeof(double) * length, cudaMemcpyHostToDevice)); dim3 blockSize1(16, 16); dim3 gridSize1(cols * 2.0 / blockSize1.x + 1, rows * 1.0 / blockSize1.y + 1); augmentMatrixKernel<<<gridSize1, blockSize1>>>(d_augmentedMatrix, d_inputMatrix, rows, cols * 2); cudaDeviceSynchronize(); int i = 0; while (i < size) { if (inputMatrix[i * size + i] != 0) { dim3 blockSize2(256); dim3 gridSize2(cols * 2.0 / blockSize2.x + 1, 1); computeRowsKernel<<<gridSize2, blockSize2>>>(d_augmentedMatrix, i, size); cudaDeviceSynchronize(); } else { int nonZeroRowIndex = 0; for (int j = 0; j < size; j++) { if (inputMatrix[j * size + i] != 0) { nonZeroRowIndex = j; break; } } dim3 blockSize3(256); dim3 gridSize3(cols * 2.0 / blockSize3.x + 1, 1); harnessZeroKernel<<<gridSize3, blockSize3>>>(d_augmentedMatrix, i, nonZeroRowIndex, size); cudaDeviceSynchronize(); dim3 blockSize4(256); dim3 gridSize4(cols * 2.0 / blockSize4.x + 1, 1); computeRowsKernel<<<gridSize4, blockSize4>>>(d_augmentedMatrix, i, size); cudaDeviceSynchronize(); } dim3 blockSize5(16, 16); dim3 gridSize5(cols * 2.0 / blockSize5.x + 1, rows * 1.0 / blockSize5.y + 1); computeColsKernel<<<gridSize5, blockSize5>>>(d_augmentedMatrix, i, size); cudaDeviceSynchronize(); i++; } dim3 blockSize6(16, 16); dim3 gridSize6(cols * 2.0 / blockSize6.x + 1, rows * 1.0 / blockSize6.y + 1); getInverseMatrixKernel<<<gridSize1, blockSize1>>>(d_augmentedMatrix, d_inverseMatrix, rows, cols * 2); CUDA_CHECK_RETURN(cudaMemcpy(h_inverseMatrix, d_inverseMatrix, sizeof(double) * length, cudaMemcpyDeviceToHost)); //CUDA_CHECK_RETURN(cudaMemcpy(h_augmentedMatrix, d_augmentedMatrix, sizeof(double) * length * 2, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaFree(d_augmentedMatrix)); CUDA_CHECK_RETURN(cudaFree(d_inverseMatrix)); CUDA_CHECK_RETURN(cudaFree(d_inputMatrix)); return h_inverseMatrix; } double *createTest(const int rows, const int cols) { double *data = (double *)malloc(rows * cols * sizeof(double)); for (int i = 0; i < rows * cols; i++) { data[i] = random(100); } return data; } int main(void) { const int rows = 1000; const int cols = 1000; double *testMatrix = createTest(rows, cols); double *inverseMatrixGPU; // GPU code clock_t start1,end1; start1 = clock(); inverseMatrixGPU = gpuMatrixInverse(testMatrix, rows, cols); end1 = clock(); double dur1 = (double)(end1 - start1); cout << "\n running time on GPU is " << dur1 / CLOCKS_PER_SEC << " secs!\n" << endl; if (rows < 20) { printMatrix(inverseMatrixGPU, rows, cols); double *resultMatrix = (double *)malloc(cols * rows * sizeof(double)); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { resultMatrix[i * cols + j] = 0; } } for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { for (int k = 0; k < cols; k++) { resultMatrix[i * cols + j] += testMatrix[i * cols + k] * inverseMatrixGPU[k * cols + j]; } } } cout << "\nTest the result from GPU\n" << endl; printMatrix(resultMatrix, rows, cols); } /* Free memory */ delete[] inverseMatrixGPU; return 0; }
3020e3c0335f688e984395728b446ec4c633c840.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_cmp_conv_layer.hpp" namespace caffe { __global__ void sync_cmp_conv_groups() { } template <typename Dtype> void CuDNNCmpConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* muweight = this->blobs_[0]->mutable_cpu_data(); int count = this->blobs_[0]->count(); for (int i = 0; i < count; ++i) { muweight[i] *= this->masks_[i] ; } if(this->quantize_term_) { Dtype* muweight = this->blobs_[0]->mutable_cpu_data(); int count = this->blobs_[0]->count(); for (int i = 0; i < count; ++i) { if (this->masks_[i]) muweight[i] = this->centroids_[this->indices_[i]]; } } const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_cmp_conv_groups), dim3(1), dim3(1), 0, 0, ); } } template <typename Dtype> void CuDNNCmpConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //LOG(INFO) << "CONV BACKWARD" ; const Dtype* weight = NULL; Dtype* weight_diff = NULL; int count = 0 ; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); count = this->blobs_[0]->count(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1*this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); Dtype* cpu_weight_diff = this->blobs_[0]->mutable_cpu_diff(); for(int j = 0; j < count; ++j) { cpu_weight_diff[j] *= this->masks_[j]; } if(this->quantize_term_) { vector<Dtype> tmpDiff(this->class_num_); vector<int> freq(this->class_num_); for (int j = 0; j < count; ++j) { if (this->masks_[j]) { tmpDiff[this->indices_[j]] += cpu_weight_diff[j]; freq[this->indices_[j]]++; } } for(int j = 0; j < count; ++j) { if (this->masks_[j]) { cpu_weight_diff[j] = tmpDiff[this->indices_[j]]/freq[this->indices_[j]] ; } } } } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[2*this->group_ + g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_cmp_conv_groups), dim3(1), dim3(1), 0, 0, ); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNCmpConvolutionLayer); } // namespace caffe #endif
3020e3c0335f688e984395728b446ec4c633c840.cu
#ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_cmp_conv_layer.hpp" namespace caffe { __global__ void sync_cmp_conv_groups() { } template <typename Dtype> void CuDNNCmpConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* muweight = this->blobs_[0]->mutable_cpu_data(); int count = this->blobs_[0]->count(); for (int i = 0; i < count; ++i) { muweight[i] *= this->masks_[i] ; } if(this->quantize_term_) { Dtype* muweight = this->blobs_[0]->mutable_cpu_data(); int count = this->blobs_[0]->count(); for (int i = 0; i < count; ++i) { if (this->masks_[i]) muweight[i] = this->centroids_[this->indices_[i]]; } } const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_cmp_conv_groups<<<1, 1>>>(); } } template <typename Dtype> void CuDNNCmpConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //LOG(INFO) << "CONV BACKWARD" ; const Dtype* weight = NULL; Dtype* weight_diff = NULL; int count = 0 ; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); count = this->blobs_[0]->count(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1*this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); Dtype* cpu_weight_diff = this->blobs_[0]->mutable_cpu_diff(); for(int j = 0; j < count; ++j) { cpu_weight_diff[j] *= this->masks_[j]; } if(this->quantize_term_) { vector<Dtype> tmpDiff(this->class_num_); vector<int> freq(this->class_num_); for (int j = 0; j < count; ++j) { if (this->masks_[j]) { tmpDiff[this->indices_[j]] += cpu_weight_diff[j]; freq[this->indices_[j]]++; } } for(int j = 0; j < count; ++j) { if (this->masks_[j]) { cpu_weight_diff[j] = tmpDiff[this->indices_[j]]/freq[this->indices_[j]] ; } } } } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[2*this->group_ + g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_cmp_conv_groups<<<1, 1>>>(); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNCmpConvolutionLayer); } // namespace caffe #endif
5ac3c969945f6502c40243447b0ec3f64c7b51d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void transposeNaiveCol(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[ix * ny + iy]; } }
5ac3c969945f6502c40243447b0ec3f64c7b51d7.cu
#include "includes.h" __global__ void transposeNaiveCol(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[ix * ny + iy]; } }
dd66b572261eecaf5d023a10ff6a1aea81a67ff5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_ROCM #include "dragon/core/context_cuda.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { #define LDG(x, i) convert::To<AccT>(__ldg(x + i)) template <typename T, typename AccT> __global__ void _MaxPool2dNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int w_out = yi % out_w; const int h_out = (yi / out_w) % out_h; const int c = (yi / out_w / out_h) % C; const int n = yi / out_w / out_h / C; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; const int hend = min(hstart + kernel_h, H); const int wend = min(wstart + kernel_w, W); hstart = max(hstart, 0); wstart = max(wstart, 0); const T* offset_x = x + (n * C + c) * H * W; int mask_val = -1; AccT val = AccT(-FLT_MAX); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (LDG(offset_x, h * W + w) > val) { mask_val = h * W + w; val = LDG(offset_x, mask_val); } } } y[yi] = convert::To<T>(val); mask[yi] = mask_val; } } template <typename T, typename AccT> __global__ void _MaxPool2dNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int c = yi % C; const int w_out = (yi / C) % out_w; const int h_out = (yi / C / out_w) % out_h; const int n = yi / C / out_w / out_h; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; const int hend = min(hstart + kernel_h, H); const int wend = min(wstart + kernel_w, W); hstart = max(hstart, 0); wstart = max(wstart, 0); const int x_offset = n * H * W * C + c; int mask_val = -1; AccT val = T(-FLT_MAX); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int xi = x_offset + (h * W + w) * C; if (LDG(x, xi) > val) { mask_val = xi; val = LDG(x, xi); } } } y[yi] = convert::To<T>(val); mask[yi] = mask_val; } } template <typename T, typename AccT> __global__ void _MaxPool2dGradNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { const int w = xi % W; const int h = (xi / W) % H; const int c = (xi / W / H) % C; const int n = xi / W / H / C; const int out_hstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int out_wstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int out_hend = min((h + pad_h) / stride_h + 1, out_h); const int out_wend = min((w + pad_w) / stride_w + 1, out_w); const int y_offset = (n * C + c) * out_h * out_w; AccT val = AccT(0); for (int h_out = out_hstart; h_out < out_hend; ++h_out) { for (int w_out = out_wstart; w_out < out_wend; ++w_out) { const int yi = y_offset + h_out * out_w + w_out; if (mask[yi] == (h * W + w)) { val += LDG(dy, yi); } } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _MaxPool2dGradNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { const int c = xi % C; const int w = (xi / C) % W; const int h = (xi / C / W) % H; const int n = xi / C / W / H; const int out_hstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int out_wstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int out_hend = min((h + pad_h) / stride_h + 1, out_h); const int out_wend = min((w + pad_w) / stride_w + 1, out_w); const int y_offset = n * out_h * out_w * C + c; AccT val = AccT(0); for (int h_out = out_hstart; h_out < out_hend; ++h_out) { for (int w_out = out_wstart; w_out < out_wend; ++w_out) { const int yi = y_offset + (h_out * out_w + w_out) * C; if (mask[yi] == xi) { val += LDG(dy, yi); } } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _MaxPool3dNCHW( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { int tmp = yi / out_w; const int w_out = yi % out_w; const int h_out = tmp % out_h; tmp /= out_h; const int d_out = tmp % out_d; tmp /= out_d; const int c = tmp % C; const int n = tmp / C; int dstart = d_out * stride_d - pad_d; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; const int dend = min(dstart + kernel_d, D); const int hend = min(hstart + kernel_h, H); const int wend = min(wstart + kernel_w, W); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); const T* offset_x = x + (n * C + c) * D * H * W; int mask_val = -1; AccT val = AccT(-FLT_MAX); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { tmp = (d * H + h) * W + w; if (LDG(offset_x, tmp) > val) { mask_val = tmp; val = LDG(offset_x, mask_val); } } } } y[yi] = convert::To<T>(val); mask[yi] = mask_val; } } template <typename T, typename AccT> __global__ void _MaxPool3dNHWC( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { int tmp = yi / C; const int c = yi % C; const int w_out = tmp % out_w; tmp /= out_w; const int h_out = tmp % out_h; tmp /= out_h; const int d_out = tmp % out_d; const int n = tmp / out_d; int dstart = d_out * stride_d - pad_d; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; const int dend = min(dstart + kernel_d, D); const int hend = min(hstart + kernel_h, H); const int wend = min(wstart + kernel_w, W); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); const int x_offset = n * D * H * W * C + c; int mask_val = -1; AccT val = AccT(-FLT_MAX); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { tmp = x_offset + ((d * H + h) * W + w) * C; if (LDG(x, tmp) > val) { mask_val = tmp; val = LDG(x, tmp); } } } } y[yi] = convert::To<T>(val); mask[yi] = mask_val; } } template <typename T, typename AccT> __global__ void _MaxPool3dGradNCHW( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { int tmp = xi / W; const int w = xi % W; const int h = tmp % H; tmp /= H; const int d = tmp % D; tmp /= D; const int c = tmp % C; const int n = tmp / C; const int out_dstart = (d + pad_d < kernel_d) ? 0 : (d + pad_d - kernel_d) / stride_d + 1; const int out_hstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int out_wstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int out_dend = min((d + pad_d) / stride_d + 1, out_d); const int out_hend = min((h + pad_h) / stride_h + 1, out_h); const int out_wend = min((w + pad_w) / stride_w + 1, out_w); const int y_offset = (n * C + c) * out_d * out_h * out_w; AccT val = AccT(0); for (int d_out = out_dstart; d_out < out_dend; ++d_out) { for (int h_out = out_hstart; h_out < out_hend; ++h_out) { for (int w_out = out_wstart; w_out < out_wend; ++w_out) { tmp = y_offset + (d_out * out_h + h_out) * out_w + w_out; if (mask[tmp] == ((d * H + h) * W + w)) { val += LDG(dy, tmp); } } } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _MaxPool3dGradNHWC( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { int tmp = xi / C; const int c = xi % C; const int w = tmp % W; tmp /= W; const int h = tmp % H; tmp /= H; const int d = tmp % D; const int n = tmp / D; const int out_dstart = (d + pad_d < kernel_d) ? 0 : (d + pad_d - kernel_d) / stride_d + 1; const int out_hstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int out_wstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int out_dend = min((d + pad_d) / stride_d + 1, out_d); const int out_hend = min((h + pad_h) / stride_h + 1, out_h); const int out_wend = min((w + pad_w) / stride_w + 1, out_w); const int y_offset = n * out_d * out_h * out_w * C + c; AccT val = AccT(0); for (int d_out = out_dstart; d_out < out_dend; ++d_out) { for (int h_out = out_hstart; h_out < out_hend; ++h_out) { for (int w_out = out_wstart; w_out < out_wend; ++w_out) { tmp = y_offset + ((d_out * out_h + h_out) * out_w + w_out) * C; if (mask[tmp] == xi) { val += LDG(dy, tmp); } } } } dx[xi] = convert::To<T>(val); } } #undef LDG } // namespace /* ------------------- Launcher Separator ------------------- */ #define DISPATCH_POOL_KERNEL(name, T, AccT, kBlocks, kThreads, ...) \ if (data_format == "NCHW") { \ hipLaunchKernelGGL(( name##NCHW<T, AccT>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } else if (data_format == "NHWC") { \ hipLaunchKernelGGL(( name##NHWC<T, AccT>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } else { \ LOG(FATAL) << "Unknown DataFormat: " << data_format; \ } #define DEFINE_KERNEL_LAUNCHER(name, T, out_dim) \ template <> \ void name<T, CUDAContext>( \ const int N, \ const int C, \ const int H, \ const int W, \ const int out_h, \ const int out_w, \ const int kernel_h, \ const int kernel_w, \ const int stride_h, \ const int stride_w, \ const int pad_h, \ const int pad_w, \ const string& data_format, \ const T* x, \ int* mask, \ T* y, \ CUDAContext* ctx) { \ const int nthreads = N * C * out_dim; \ DISPATCH_POOL_KERNEL( \ _##name, \ math::ScalarType<T>::type, \ math::AccmulatorType<T>::type, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ H, \ W, \ out_h, \ out_w, \ kernel_h, \ kernel_w, \ stride_h, \ stride_w, \ pad_h, \ pad_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ mask, \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } DEFINE_KERNEL_LAUNCHER(MaxPool2d, float16, (out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool2d, float, (out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool2d, double, (out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool2dGrad, float16, (H * W)); // MaxPool2dGrad DEFINE_KERNEL_LAUNCHER(MaxPool2dGrad, float, (H * W)); // MaxPool2dGrad DEFINE_KERNEL_LAUNCHER(MaxPool2dGrad, double, (H * W)); // MaxPool2dGrad #undef DEFINE_KERNEL_LAUNCHER #define DEFINE_KERNEL_LAUNCHER(name, T, out_dim) \ template <> \ void name<T, CUDAContext>( \ const int N, \ const int C, \ const int D, \ const int H, \ const int W, \ const int out_d, \ const int out_h, \ const int out_w, \ const int kernel_d, \ const int kernel_h, \ const int kernel_w, \ const int stride_d, \ const int stride_h, \ const int stride_w, \ const int pad_d, \ const int pad_h, \ const int pad_w, \ const string& data_format, \ const T* x, \ int* mask, \ T* y, \ CUDAContext* ctx) { \ const int nthreads = N * C * out_dim; \ DISPATCH_POOL_KERNEL( \ _##name, \ math::ScalarType<T>::type, \ math::AccmulatorType<T>::type, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ D, \ H, \ W, \ out_d, \ out_h, \ out_w, \ kernel_d, \ kernel_h, \ kernel_w, \ stride_d, \ stride_h, \ stride_w, \ pad_d, \ pad_h, \ pad_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ mask, \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } DEFINE_KERNEL_LAUNCHER(MaxPool3d, float16, (out_d * out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool3d, float, (out_d * out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool3d, double, (out_d * out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool3dGrad, float16, (D * H * W)); // MaxPool3dGrad DEFINE_KERNEL_LAUNCHER(MaxPool3dGrad, float, (D * H * W)); // MaxPool3dGrad DEFINE_KERNEL_LAUNCHER(MaxPool3dGrad, double, (D * H * W)); // MaxPool3dGrad #undef DEFINE_KERNEL_LAUNCHER #undef DISPATCH_POOL_KERNEL } // namespace kernels } // namespace dragon #endif // USE_ROCM
dd66b572261eecaf5d023a10ff6a1aea81a67ff5.cu
#ifdef USE_CUDA #include "dragon/core/context_cuda.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { #define LDG(x, i) convert::To<AccT>(__ldg(x + i)) template <typename T, typename AccT> __global__ void _MaxPool2dNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int w_out = yi % out_w; const int h_out = (yi / out_w) % out_h; const int c = (yi / out_w / out_h) % C; const int n = yi / out_w / out_h / C; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; const int hend = min(hstart + kernel_h, H); const int wend = min(wstart + kernel_w, W); hstart = max(hstart, 0); wstart = max(wstart, 0); const T* offset_x = x + (n * C + c) * H * W; int mask_val = -1; AccT val = AccT(-FLT_MAX); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (LDG(offset_x, h * W + w) > val) { mask_val = h * W + w; val = LDG(offset_x, mask_val); } } } y[yi] = convert::To<T>(val); mask[yi] = mask_val; } } template <typename T, typename AccT> __global__ void _MaxPool2dNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int c = yi % C; const int w_out = (yi / C) % out_w; const int h_out = (yi / C / out_w) % out_h; const int n = yi / C / out_w / out_h; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; const int hend = min(hstart + kernel_h, H); const int wend = min(wstart + kernel_w, W); hstart = max(hstart, 0); wstart = max(wstart, 0); const int x_offset = n * H * W * C + c; int mask_val = -1; AccT val = T(-FLT_MAX); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int xi = x_offset + (h * W + w) * C; if (LDG(x, xi) > val) { mask_val = xi; val = LDG(x, xi); } } } y[yi] = convert::To<T>(val); mask[yi] = mask_val; } } template <typename T, typename AccT> __global__ void _MaxPool2dGradNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { const int w = xi % W; const int h = (xi / W) % H; const int c = (xi / W / H) % C; const int n = xi / W / H / C; const int out_hstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int out_wstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int out_hend = min((h + pad_h) / stride_h + 1, out_h); const int out_wend = min((w + pad_w) / stride_w + 1, out_w); const int y_offset = (n * C + c) * out_h * out_w; AccT val = AccT(0); for (int h_out = out_hstart; h_out < out_hend; ++h_out) { for (int w_out = out_wstart; w_out < out_wend; ++w_out) { const int yi = y_offset + h_out * out_w + w_out; if (mask[yi] == (h * W + w)) { val += LDG(dy, yi); } } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _MaxPool2dGradNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { const int c = xi % C; const int w = (xi / C) % W; const int h = (xi / C / W) % H; const int n = xi / C / W / H; const int out_hstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int out_wstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int out_hend = min((h + pad_h) / stride_h + 1, out_h); const int out_wend = min((w + pad_w) / stride_w + 1, out_w); const int y_offset = n * out_h * out_w * C + c; AccT val = AccT(0); for (int h_out = out_hstart; h_out < out_hend; ++h_out) { for (int w_out = out_wstart; w_out < out_wend; ++w_out) { const int yi = y_offset + (h_out * out_w + w_out) * C; if (mask[yi] == xi) { val += LDG(dy, yi); } } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _MaxPool3dNCHW( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { int tmp = yi / out_w; const int w_out = yi % out_w; const int h_out = tmp % out_h; tmp /= out_h; const int d_out = tmp % out_d; tmp /= out_d; const int c = tmp % C; const int n = tmp / C; int dstart = d_out * stride_d - pad_d; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; const int dend = min(dstart + kernel_d, D); const int hend = min(hstart + kernel_h, H); const int wend = min(wstart + kernel_w, W); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); const T* offset_x = x + (n * C + c) * D * H * W; int mask_val = -1; AccT val = AccT(-FLT_MAX); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { tmp = (d * H + h) * W + w; if (LDG(offset_x, tmp) > val) { mask_val = tmp; val = LDG(offset_x, mask_val); } } } } y[yi] = convert::To<T>(val); mask[yi] = mask_val; } } template <typename T, typename AccT> __global__ void _MaxPool3dNHWC( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { int tmp = yi / C; const int c = yi % C; const int w_out = tmp % out_w; tmp /= out_w; const int h_out = tmp % out_h; tmp /= out_h; const int d_out = tmp % out_d; const int n = tmp / out_d; int dstart = d_out * stride_d - pad_d; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; const int dend = min(dstart + kernel_d, D); const int hend = min(hstart + kernel_h, H); const int wend = min(wstart + kernel_w, W); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); const int x_offset = n * D * H * W * C + c; int mask_val = -1; AccT val = AccT(-FLT_MAX); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { tmp = x_offset + ((d * H + h) * W + w) * C; if (LDG(x, tmp) > val) { mask_val = tmp; val = LDG(x, tmp); } } } } y[yi] = convert::To<T>(val); mask[yi] = mask_val; } } template <typename T, typename AccT> __global__ void _MaxPool3dGradNCHW( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { int tmp = xi / W; const int w = xi % W; const int h = tmp % H; tmp /= H; const int d = tmp % D; tmp /= D; const int c = tmp % C; const int n = tmp / C; const int out_dstart = (d + pad_d < kernel_d) ? 0 : (d + pad_d - kernel_d) / stride_d + 1; const int out_hstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int out_wstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int out_dend = min((d + pad_d) / stride_d + 1, out_d); const int out_hend = min((h + pad_h) / stride_h + 1, out_h); const int out_wend = min((w + pad_w) / stride_w + 1, out_w); const int y_offset = (n * C + c) * out_d * out_h * out_w; AccT val = AccT(0); for (int d_out = out_dstart; d_out < out_dend; ++d_out) { for (int h_out = out_hstart; h_out < out_hend; ++h_out) { for (int w_out = out_wstart; w_out < out_wend; ++w_out) { tmp = y_offset + (d_out * out_h + h_out) * out_w + w_out; if (mask[tmp] == ((d * H + h) * W + w)) { val += LDG(dy, tmp); } } } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _MaxPool3dGradNHWC( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { int tmp = xi / C; const int c = xi % C; const int w = tmp % W; tmp /= W; const int h = tmp % H; tmp /= H; const int d = tmp % D; const int n = tmp / D; const int out_dstart = (d + pad_d < kernel_d) ? 0 : (d + pad_d - kernel_d) / stride_d + 1; const int out_hstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int out_wstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int out_dend = min((d + pad_d) / stride_d + 1, out_d); const int out_hend = min((h + pad_h) / stride_h + 1, out_h); const int out_wend = min((w + pad_w) / stride_w + 1, out_w); const int y_offset = n * out_d * out_h * out_w * C + c; AccT val = AccT(0); for (int d_out = out_dstart; d_out < out_dend; ++d_out) { for (int h_out = out_hstart; h_out < out_hend; ++h_out) { for (int w_out = out_wstart; w_out < out_wend; ++w_out) { tmp = y_offset + ((d_out * out_h + h_out) * out_w + w_out) * C; if (mask[tmp] == xi) { val += LDG(dy, tmp); } } } } dx[xi] = convert::To<T>(val); } } #undef LDG } // namespace /* ------------------- Launcher Separator ------------------- */ #define DISPATCH_POOL_KERNEL(name, T, AccT, kBlocks, kThreads, ...) \ if (data_format == "NCHW") { \ name##NCHW<T, AccT> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else if (data_format == "NHWC") { \ name##NHWC<T, AccT> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else { \ LOG(FATAL) << "Unknown DataFormat: " << data_format; \ } #define DEFINE_KERNEL_LAUNCHER(name, T, out_dim) \ template <> \ void name<T, CUDAContext>( \ const int N, \ const int C, \ const int H, \ const int W, \ const int out_h, \ const int out_w, \ const int kernel_h, \ const int kernel_w, \ const int stride_h, \ const int stride_w, \ const int pad_h, \ const int pad_w, \ const string& data_format, \ const T* x, \ int* mask, \ T* y, \ CUDAContext* ctx) { \ const int nthreads = N * C * out_dim; \ DISPATCH_POOL_KERNEL( \ _##name, \ math::ScalarType<T>::type, \ math::AccmulatorType<T>::type, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ H, \ W, \ out_h, \ out_w, \ kernel_h, \ kernel_w, \ stride_h, \ stride_w, \ pad_h, \ pad_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ mask, \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } DEFINE_KERNEL_LAUNCHER(MaxPool2d, float16, (out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool2d, float, (out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool2d, double, (out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool2dGrad, float16, (H * W)); // MaxPool2dGrad DEFINE_KERNEL_LAUNCHER(MaxPool2dGrad, float, (H * W)); // MaxPool2dGrad DEFINE_KERNEL_LAUNCHER(MaxPool2dGrad, double, (H * W)); // MaxPool2dGrad #undef DEFINE_KERNEL_LAUNCHER #define DEFINE_KERNEL_LAUNCHER(name, T, out_dim) \ template <> \ void name<T, CUDAContext>( \ const int N, \ const int C, \ const int D, \ const int H, \ const int W, \ const int out_d, \ const int out_h, \ const int out_w, \ const int kernel_d, \ const int kernel_h, \ const int kernel_w, \ const int stride_d, \ const int stride_h, \ const int stride_w, \ const int pad_d, \ const int pad_h, \ const int pad_w, \ const string& data_format, \ const T* x, \ int* mask, \ T* y, \ CUDAContext* ctx) { \ const int nthreads = N * C * out_dim; \ DISPATCH_POOL_KERNEL( \ _##name, \ math::ScalarType<T>::type, \ math::AccmulatorType<T>::type, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ D, \ H, \ W, \ out_d, \ out_h, \ out_w, \ kernel_d, \ kernel_h, \ kernel_w, \ stride_d, \ stride_h, \ stride_w, \ pad_d, \ pad_h, \ pad_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ mask, \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } DEFINE_KERNEL_LAUNCHER(MaxPool3d, float16, (out_d * out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool3d, float, (out_d * out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool3d, double, (out_d * out_h * out_w)); DEFINE_KERNEL_LAUNCHER(MaxPool3dGrad, float16, (D * H * W)); // MaxPool3dGrad DEFINE_KERNEL_LAUNCHER(MaxPool3dGrad, float, (D * H * W)); // MaxPool3dGrad DEFINE_KERNEL_LAUNCHER(MaxPool3dGrad, double, (D * H * W)); // MaxPool3dGrad #undef DEFINE_KERNEL_LAUNCHER #undef DISPATCH_POOL_KERNEL } // namespace kernels } // namespace dragon #endif // USE_CUDA
8590f7ab414a4834f7aea55f0fbedef8550f4fc5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"rgb2gray.hpp" #include"cuda_error_check.hpp" __global__ void rgb_2_gray(unsigned char* d_grayImage, const unsigned char* const d_rgbImage) { int rgb_x = blockIdx.x*blockDim.x + threadIdx.x; int rgb_y = blockIdx.y*blockDim.y + threadIdx.y; if((rgb_x >= IMAGE_WIDTH_RGB) || (rgb_y >=IMAGE_HEIGHT_RGB)) { return; } unsigned char blue = float(*(d_rgbImage + 3*IMAGE_WIDTH_RGB*rgb_y + 3*rgb_x))*0.114f; unsigned char green = float(*(d_rgbImage + 3*IMAGE_WIDTH_RGB*rgb_y + 3*rgb_x +1))*0.587f; unsigned char red = float(*(d_rgbImage + 3*IMAGE_WIDTH_RGB*rgb_y + 3*rgb_x +2))*0.299f; *(d_grayImage + rgb_y*IMAGE_WIDTH_RGB + rgb_x) = uchar(blue + green + red); } unsigned char* rgb2gray(const unsigned char* const rgbImage) { unsigned char* h_grayImage = (unsigned char*)malloc(NUMPIX_RGB*sizeof(unsigned char)); unsigned char* d_rgbImage; unsigned char* d_grayImage; hipMalloc((void**)&d_rgbImage, 3*NUMPIX_RGB*sizeof(unsigned char)); CudaCheckError(); hipMalloc((void**)&d_grayImage, NUMPIX_RGB*sizeof(unsigned char)); CudaCheckError(); hipMemset(d_grayImage, 0, sizeof(unsigned char)*NUMPIX_RGB); CudaCheckError(); hipMemcpy(d_rgbImage, rgbImage, 3*sizeof(unsigned char)*NUMPIX_RGB,hipMemcpyHostToDevice); CudaCheckError(); dim3 blockSize(THREAD_X_RGB, THREAD_Y_RGB); dim3 gridSize((IMAGE_WIDTH_RGB + blockSize.x -1)/blockSize.x, (IMAGE_HEIGHT_RGB +blockSize.y -1)/blockSize.y); hipLaunchKernelGGL(( rgb_2_gray), dim3(gridSize), dim3(blockSize), 0, 0, d_grayImage, d_rgbImage); hipDeviceSynchronize(); CudaCheckError(); hipMemcpy(h_grayImage, d_grayImage, sizeof(unsigned char)*NUMPIX_RGB,hipMemcpyDeviceToHost); CudaCheckError(); return h_grayImage; }
8590f7ab414a4834f7aea55f0fbedef8550f4fc5.cu
#include"rgb2gray.hpp" #include"cuda_error_check.hpp" __global__ void rgb_2_gray(unsigned char* d_grayImage, const unsigned char* const d_rgbImage) { int rgb_x = blockIdx.x*blockDim.x + threadIdx.x; int rgb_y = blockIdx.y*blockDim.y + threadIdx.y; if((rgb_x >= IMAGE_WIDTH_RGB) || (rgb_y >=IMAGE_HEIGHT_RGB)) { return; } unsigned char blue = float(*(d_rgbImage + 3*IMAGE_WIDTH_RGB*rgb_y + 3*rgb_x))*0.114f; unsigned char green = float(*(d_rgbImage + 3*IMAGE_WIDTH_RGB*rgb_y + 3*rgb_x +1))*0.587f; unsigned char red = float(*(d_rgbImage + 3*IMAGE_WIDTH_RGB*rgb_y + 3*rgb_x +2))*0.299f; *(d_grayImage + rgb_y*IMAGE_WIDTH_RGB + rgb_x) = uchar(blue + green + red); } unsigned char* rgb2gray(const unsigned char* const rgbImage) { unsigned char* h_grayImage = (unsigned char*)malloc(NUMPIX_RGB*sizeof(unsigned char)); unsigned char* d_rgbImage; unsigned char* d_grayImage; cudaMalloc((void**)&d_rgbImage, 3*NUMPIX_RGB*sizeof(unsigned char)); CudaCheckError(); cudaMalloc((void**)&d_grayImage, NUMPIX_RGB*sizeof(unsigned char)); CudaCheckError(); cudaMemset(d_grayImage, 0, sizeof(unsigned char)*NUMPIX_RGB); CudaCheckError(); cudaMemcpy(d_rgbImage, rgbImage, 3*sizeof(unsigned char)*NUMPIX_RGB,cudaMemcpyHostToDevice); CudaCheckError(); dim3 blockSize(THREAD_X_RGB, THREAD_Y_RGB); dim3 gridSize((IMAGE_WIDTH_RGB + blockSize.x -1)/blockSize.x, (IMAGE_HEIGHT_RGB +blockSize.y -1)/blockSize.y); rgb_2_gray<<<gridSize, blockSize>>>(d_grayImage, d_rgbImage); cudaDeviceSynchronize(); CudaCheckError(); cudaMemcpy(h_grayImage, d_grayImage, sizeof(unsigned char)*NUMPIX_RGB,cudaMemcpyDeviceToHost); CudaCheckError(); return h_grayImage; }
0caf409e63a8579960a7196460e0ccebc2512dc7.hip
// !!! This is a file automatically generated by hipify!!! // Copyright Contributors to the Open Shading Language project. // SPDX-License-Identifier: BSD-3-Clause // https://github.com/AcademySoftwareFoundation/OpenShadingLanguage #include <optix.h> #include <optix_device.h> #define OPTIX_COMPATIBILITY 7 #include <OSL/oslclosure.h> #include <hip/hip_runtime.h> #include <optix_device.h> #include "rend_lib.h" OSL_NAMESPACE_ENTER namespace pvt { __device__ hipDeviceptr_t s_color_system = 0; __device__ hipDeviceptr_t osl_printf_buffer_start = 0; __device__ hipDeviceptr_t osl_printf_buffer_end = 0; __device__ uint64_t test_str_1 = 0; __device__ uint64_t test_str_2 = 0; __device__ uint64_t num_named_xforms = 0; __device__ hipDeviceptr_t xform_name_buffer = 0; __device__ hipDeviceptr_t xform_buffer = 0; } // namespace pvt OSL_NAMESPACE_EXIT // Taken from the SimplePool class __device__ static inline size_t alignment_offset_calc(void* ptr, size_t alignment) { uintptr_t ptrbits = reinterpret_cast<uintptr_t>(ptr); uintptr_t offset = ((ptrbits + alignment - 1) & -alignment) - ptrbits; return offset; } // These functions are declared extern to prevent name mangling. extern "C" { // add OptiX entry point to prevent OptiX from discarding the module __global__ void __direct_callable__dummy_rend_lib() { } __device__ void* closure_component_allot(void* pool, int id, size_t prim_size, const OSL::Color3& w) { ((OSL::ClosureComponent*)pool)->id = id; ((OSL::ClosureComponent*)pool)->w = w; size_t needed = (sizeof(OSL::ClosureComponent) - sizeof(void*) + prim_size + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*)pool; return (void*)&char_ptr[needed]; } __device__ void* closure_mul_allot(void* pool, const OSL::Color3& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*)pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*)pool)->weight = w; ((OSL::ClosureMul*)pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*)pool; return &char_ptr[needed]; } __device__ void* closure_mul_float_allot(void* pool, const float& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*)pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*)pool)->weight.x = w; ((OSL::ClosureMul*)pool)->weight.y = w; ((OSL::ClosureMul*)pool)->weight.z = w; ((OSL::ClosureMul*)pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*)pool; return &char_ptr[needed]; } __device__ void* closure_add_allot(void* pool, OSL::ClosureColor* a, OSL::ClosureColor* b) { ((OSL::ClosureAdd*)pool)->id = OSL::ClosureColor::ADD; ((OSL::ClosureAdd*)pool)->closureA = a; ((OSL::ClosureAdd*)pool)->closureB = b; size_t needed = (sizeof(OSL::ClosureAdd) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*)pool; return &char_ptr[needed]; } __device__ void* osl_allocate_closure_component(void* sg_, int id, int size) { ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_; OSL::Color3 w = OSL::Color3(1, 1, 1); // Fix up the alignment void* ret = ((char*)sg_ptr->renderstate) + alignment_offset_calc(sg_ptr->renderstate, alignof(OSL::ClosureComponent)); size = max(4, size); sg_ptr->renderstate = closure_component_allot(ret, id, size, w); return ret; } __device__ void* osl_allocate_weighted_closure_component(void* sg_, int id, int size, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_; if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } size = max(4, size); // Fix up the alignment void* ret = ((char*)sg_ptr->renderstate) + alignment_offset_calc(sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_component_allot(ret, id, size, *w); return ret; } __device__ void* osl_mul_closure_color(void* sg_, OSL::ClosureColor* a, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_; if (a == NULL) { return NULL; } if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } if (w->x == 1.0f && w->y == 1.0f && w->z == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*)sg_ptr->renderstate) + alignment_offset_calc(sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_allot(ret, *w, a); return ret; } __device__ void* osl_mul_closure_float(void* sg_, OSL::ClosureColor* a, float w) { ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_; if (a == NULL || w == 0.0f) { return NULL; } if (w == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*)sg_ptr->renderstate) + alignment_offset_calc(sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_float_allot(ret, w, a); return ret; } __device__ void* osl_add_closure_closure(void* sg_, OSL::ClosureColor* a, OSL::ClosureColor* b) { ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_; if (a == NULL) { return b; } if (b == NULL) { return a; } // Fix up the alignment void* ret = ((char*)sg_ptr->renderstate) + alignment_offset_calc(sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_add_allot(ret, a, b); return ret; } #define IS_STRING(type) (type.basetype == OSL::TypeDesc::STRING) #define IS_PTR(type) (type.basetype == OSL::TypeDesc::PTR) #define IS_COLOR(type) (type.vecsemantics == OSL::TypeDesc::COLOR) __device__ bool rend_get_userdata(OSL::StringParam name, void* data, int data_size, const OSL::TypeDesc& type, int index) { // Perform a userdata lookup using the parameter name, type, and // userdata index. If there is a match, memcpy the value into data and // return 1. if (IS_PTR(type) && name.hash() == STRING_PARAMS(colorsystem)) { *(void**)data = *reinterpret_cast<void**>(&OSL::pvt::s_color_system); return true; } // TODO: This is temporary code for initial testing and demonstration. if (IS_STRING(type) && name == HDSTR(OSL::pvt::test_str_1)) { memcpy(data, &OSL::pvt::test_str_2, 8); return true; } return false; } #undef IS_COLOR #undef IS_STRING #undef IS_PTR __device__ int osl_bind_interpolated_param(void* sg_, const char* name, long long type, int userdata_has_derivs, void* userdata_data, int symbol_has_derivs, void* symbol_data, int symbol_data_size, char* userdata_initialized, int userdata_index) { char status = *userdata_initialized; if (status == 0) { bool ok = rend_get_userdata(HDSTR(name), userdata_data, symbol_data_size, (*(OSL::TypeDesc*)&type), userdata_index); *userdata_initialized = status = 1 + ok; } if (status == 2) { memcpy(symbol_data, userdata_data, symbol_data_size); return 1; } return 0; } __device__ int osl_strlen_is(const char* str) { //return HDSTR(str).length(); return 0; } __device__ int osl_hash_is(const char* str) { return HDSTR(str); } __device__ int osl_getchar_isi(const char* str, int index) { // return (str && unsigned(index) < HDSTR(str).length()) // ? str[index] : 0; return 0; } // Printing is handled by the host. Copy format string's hash and // all the arguments to our print buffer. // Note: the first element of 'args' is the size of the argument list __device__ void osl_printf(void* sg_, char* fmt_str, void* args) { uint64_t fmt_str_hash = HDSTR(fmt_str).hash(); uint64_t args_size = reinterpret_cast<uint64_t*>(args)[0]; // This can be used to limit printing to one Cuda thread for debugging // if (launch_index.x == 0 && launch_index.y == 0) hipDeviceptr_t copy_start = atomicAdd(&OSL::pvt::osl_printf_buffer_start, args_size + sizeof(args_size) + sizeof(fmt_str_hash)); // Only perform copy if there's enough space if (copy_start + args_size + sizeof(args_size) + sizeof(fmt_str_hash) < OSL::pvt::osl_printf_buffer_end) { memcpy(reinterpret_cast<void*>(copy_start), &fmt_str_hash, sizeof(fmt_str_hash)); memcpy(reinterpret_cast<void*>(copy_start + sizeof(fmt_str_hash)), &args_size, sizeof(args_size)); memcpy(reinterpret_cast<void*>(copy_start + sizeof(fmt_str_hash) + sizeof(args_size)), reinterpret_cast<char*>(args) + sizeof(args_size), args_size); } } __device__ void* osl_get_noise_options(void* sg_) { ShaderGlobals* sg = ((ShaderGlobals*)sg_); NoiseOptCUDA* opt = (NoiseOptCUDA*)((ShadingContextCUDA*)sg->context)->noise_options_ptr(); new (opt) NoiseOptCUDA; return opt; } __device__ void* osl_get_texture_options(void* sg_) { return 0; } __device__ void osl_texture_set_interp_code(void* opt, int mode) { // ((TextureOpt *)opt)->interpmode = (TextureOpt::InterpMode)mode; } __device__ void osl_texture_set_stwrap_code(void* opt, int mode) { //((TextureOpt *)opt)->swrap = (TextureOpt::Wrap)mode; //((TextureOpt *)opt)->twrap = (TextureOpt::Wrap)mode; } __forceinline__ __device__ float3 make_float3(const float4& a) { return make_float3(a.x, a.y, a.z); } // FIXME: // clang++ 9.0 seems to have trouble with tex2d<float4>() look-ups, // so we'll declare this external and implement texture lookups in // CUDA files compiled by nvcc (optix_grid_renderer.cu and // optix_raytrace.cu). // (clang++ 9.0 error 'undefined __nv_tex_surf_handler') extern __device__ float4 osl_tex2DLookup(void* handle, float s, float t); __device__ int osl_texture(void* sg_, const char* name, void* handle, void* opt_, float s, float t, float dsdx, float dtdx, float dsdy, float dtdy, int chans, void* result, void* dresultdx, void* dresultdy, void* alpha, void* dalphadx, void* dalphady, void* ustring_errormessage) { if (!handle) return 0; // hipTextureObject_t texID = hipTextureObject_t(handle); float4 fromTexture = osl_tex2DLookup(handle, s, t); // see note above // float4 fromTexture = tex2D<float4>(texID, s, t); *((float3*)result) = make_float3(fromTexture.x, fromTexture.y, fromTexture.z); return 1; } __device__ int osl_range_check_err(int indexvalue, int length, OSL::ustring_pod symname, void* sg, OSL::ustring_pod sourcefile, int sourceline, OSL::ustring_pod groupname, int layer, OSL::ustring_pod layername, OSL::ustring_pod shadername) { if (indexvalue < 0 || indexvalue >= length) { return indexvalue < 0 ? 0 : length - 1; } return indexvalue; } __device__ int osl_range_check(int indexvalue, int length, OSL::ustring_pod symname, void* sg, OSL::ustring_pod sourcefile, int sourceline, OSL::ustring_pod groupname, int layer, OSL::ustring_pod layername, OSL::ustring_pod shadername) { if (indexvalue < 0 || indexvalue >= length) { indexvalue = osl_range_check_err(indexvalue, length, symname, sg, sourcefile, sourceline, groupname, layer, layername, shadername); } return indexvalue; } #define MAT(m) (*(OSL::Matrix44*)m) __device__ int osl_get_matrix(void* sg_, void* r, const char* from) { ShaderGlobals* sg = (ShaderGlobals*)sg_; if (HDSTR(from) == STRING_PARAMS(common)) { MAT(r).makeIdentity(); return true; } if (HDSTR(from) == STRING_PARAMS(object)) { MAT(r) = MAT(sg->object2common); return true; } if (HDSTR(from) == STRING_PARAMS(shader)) { MAT(r) = MAT(sg->shader2common); return true; } // Find the index of the named transform in the transform list int match_idx = -1; for (size_t idx = 0; idx < OSL::pvt::num_named_xforms; ++idx) { if (HDSTR(from) == HDSTR(((uint64_t*)OSL::pvt::xform_name_buffer)[idx])) { match_idx = static_cast<int>(idx); break; } } // Return the transform if there is a match if (match_idx >= 0) { MAT(r) = reinterpret_cast<OSL::Matrix44*>( OSL::pvt::xform_buffer)[match_idx]; return true; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } __device__ int osl_get_inverse_matrix(void* sg_, void* r, const char* to) { ShaderGlobals* sg = (ShaderGlobals*)sg_; if (HDSTR(to) == STRING_PARAMS(common)) { MAT(r).makeIdentity(); return true; } if (HDSTR(to) == STRING_PARAMS(object)) { MAT(r) = MAT(sg->object2common); MAT(r).invert(); return true; } if (HDSTR(to) == STRING_PARAMS(shader)) { MAT(r) = MAT(sg->shader2common); MAT(r).invert(); return true; } // Find the index of the named transform in the transform list int match_idx = -1; for (size_t idx = 0; idx < OSL::pvt::num_named_xforms; ++idx) { if (HDSTR(to) == HDSTR(((uint64_t*)OSL::pvt::xform_name_buffer)[idx])) { match_idx = static_cast<int>(idx); break; } } // Return the transform if there is a match if (match_idx >= 0) { MAT(r) = reinterpret_cast<OSL::Matrix44*>( OSL::pvt::xform_buffer)[match_idx]; MAT(r).invert(); return true; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } #undef MAT }
0caf409e63a8579960a7196460e0ccebc2512dc7.cu
// Copyright Contributors to the Open Shading Language project. // SPDX-License-Identifier: BSD-3-Clause // https://github.com/AcademySoftwareFoundation/OpenShadingLanguage #include <optix.h> #include <optix_device.h> #define OPTIX_COMPATIBILITY 7 #include <OSL/oslclosure.h> #include <cuda_runtime.h> #include <optix_device.h> #include "rend_lib.h" OSL_NAMESPACE_ENTER namespace pvt { __device__ CUdeviceptr s_color_system = 0; __device__ CUdeviceptr osl_printf_buffer_start = 0; __device__ CUdeviceptr osl_printf_buffer_end = 0; __device__ uint64_t test_str_1 = 0; __device__ uint64_t test_str_2 = 0; __device__ uint64_t num_named_xforms = 0; __device__ CUdeviceptr xform_name_buffer = 0; __device__ CUdeviceptr xform_buffer = 0; } // namespace pvt OSL_NAMESPACE_EXIT // Taken from the SimplePool class __device__ static inline size_t alignment_offset_calc(void* ptr, size_t alignment) { uintptr_t ptrbits = reinterpret_cast<uintptr_t>(ptr); uintptr_t offset = ((ptrbits + alignment - 1) & -alignment) - ptrbits; return offset; } // These functions are declared extern to prevent name mangling. extern "C" { // add OptiX entry point to prevent OptiX from discarding the module __global__ void __direct_callable__dummy_rend_lib() { } __device__ void* closure_component_allot(void* pool, int id, size_t prim_size, const OSL::Color3& w) { ((OSL::ClosureComponent*)pool)->id = id; ((OSL::ClosureComponent*)pool)->w = w; size_t needed = (sizeof(OSL::ClosureComponent) - sizeof(void*) + prim_size + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*)pool; return (void*)&char_ptr[needed]; } __device__ void* closure_mul_allot(void* pool, const OSL::Color3& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*)pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*)pool)->weight = w; ((OSL::ClosureMul*)pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*)pool; return &char_ptr[needed]; } __device__ void* closure_mul_float_allot(void* pool, const float& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*)pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*)pool)->weight.x = w; ((OSL::ClosureMul*)pool)->weight.y = w; ((OSL::ClosureMul*)pool)->weight.z = w; ((OSL::ClosureMul*)pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*)pool; return &char_ptr[needed]; } __device__ void* closure_add_allot(void* pool, OSL::ClosureColor* a, OSL::ClosureColor* b) { ((OSL::ClosureAdd*)pool)->id = OSL::ClosureColor::ADD; ((OSL::ClosureAdd*)pool)->closureA = a; ((OSL::ClosureAdd*)pool)->closureB = b; size_t needed = (sizeof(OSL::ClosureAdd) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*)pool; return &char_ptr[needed]; } __device__ void* osl_allocate_closure_component(void* sg_, int id, int size) { ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_; OSL::Color3 w = OSL::Color3(1, 1, 1); // Fix up the alignment void* ret = ((char*)sg_ptr->renderstate) + alignment_offset_calc(sg_ptr->renderstate, alignof(OSL::ClosureComponent)); size = max(4, size); sg_ptr->renderstate = closure_component_allot(ret, id, size, w); return ret; } __device__ void* osl_allocate_weighted_closure_component(void* sg_, int id, int size, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_; if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } size = max(4, size); // Fix up the alignment void* ret = ((char*)sg_ptr->renderstate) + alignment_offset_calc(sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_component_allot(ret, id, size, *w); return ret; } __device__ void* osl_mul_closure_color(void* sg_, OSL::ClosureColor* a, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_; if (a == NULL) { return NULL; } if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } if (w->x == 1.0f && w->y == 1.0f && w->z == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*)sg_ptr->renderstate) + alignment_offset_calc(sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_allot(ret, *w, a); return ret; } __device__ void* osl_mul_closure_float(void* sg_, OSL::ClosureColor* a, float w) { ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_; if (a == NULL || w == 0.0f) { return NULL; } if (w == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*)sg_ptr->renderstate) + alignment_offset_calc(sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_float_allot(ret, w, a); return ret; } __device__ void* osl_add_closure_closure(void* sg_, OSL::ClosureColor* a, OSL::ClosureColor* b) { ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_; if (a == NULL) { return b; } if (b == NULL) { return a; } // Fix up the alignment void* ret = ((char*)sg_ptr->renderstate) + alignment_offset_calc(sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_add_allot(ret, a, b); return ret; } #define IS_STRING(type) (type.basetype == OSL::TypeDesc::STRING) #define IS_PTR(type) (type.basetype == OSL::TypeDesc::PTR) #define IS_COLOR(type) (type.vecsemantics == OSL::TypeDesc::COLOR) __device__ bool rend_get_userdata(OSL::StringParam name, void* data, int data_size, const OSL::TypeDesc& type, int index) { // Perform a userdata lookup using the parameter name, type, and // userdata index. If there is a match, memcpy the value into data and // return 1. if (IS_PTR(type) && name.hash() == STRING_PARAMS(colorsystem)) { *(void**)data = *reinterpret_cast<void**>(&OSL::pvt::s_color_system); return true; } // TODO: This is temporary code for initial testing and demonstration. if (IS_STRING(type) && name == HDSTR(OSL::pvt::test_str_1)) { memcpy(data, &OSL::pvt::test_str_2, 8); return true; } return false; } #undef IS_COLOR #undef IS_STRING #undef IS_PTR __device__ int osl_bind_interpolated_param(void* sg_, const char* name, long long type, int userdata_has_derivs, void* userdata_data, int symbol_has_derivs, void* symbol_data, int symbol_data_size, char* userdata_initialized, int userdata_index) { char status = *userdata_initialized; if (status == 0) { bool ok = rend_get_userdata(HDSTR(name), userdata_data, symbol_data_size, (*(OSL::TypeDesc*)&type), userdata_index); *userdata_initialized = status = 1 + ok; } if (status == 2) { memcpy(symbol_data, userdata_data, symbol_data_size); return 1; } return 0; } __device__ int osl_strlen_is(const char* str) { //return HDSTR(str).length(); return 0; } __device__ int osl_hash_is(const char* str) { return HDSTR(str); } __device__ int osl_getchar_isi(const char* str, int index) { // return (str && unsigned(index) < HDSTR(str).length()) // ? str[index] : 0; return 0; } // Printing is handled by the host. Copy format string's hash and // all the arguments to our print buffer. // Note: the first element of 'args' is the size of the argument list __device__ void osl_printf(void* sg_, char* fmt_str, void* args) { uint64_t fmt_str_hash = HDSTR(fmt_str).hash(); uint64_t args_size = reinterpret_cast<uint64_t*>(args)[0]; // This can be used to limit printing to one Cuda thread for debugging // if (launch_index.x == 0 && launch_index.y == 0) CUdeviceptr copy_start = atomicAdd(&OSL::pvt::osl_printf_buffer_start, args_size + sizeof(args_size) + sizeof(fmt_str_hash)); // Only perform copy if there's enough space if (copy_start + args_size + sizeof(args_size) + sizeof(fmt_str_hash) < OSL::pvt::osl_printf_buffer_end) { memcpy(reinterpret_cast<void*>(copy_start), &fmt_str_hash, sizeof(fmt_str_hash)); memcpy(reinterpret_cast<void*>(copy_start + sizeof(fmt_str_hash)), &args_size, sizeof(args_size)); memcpy(reinterpret_cast<void*>(copy_start + sizeof(fmt_str_hash) + sizeof(args_size)), reinterpret_cast<char*>(args) + sizeof(args_size), args_size); } } __device__ void* osl_get_noise_options(void* sg_) { ShaderGlobals* sg = ((ShaderGlobals*)sg_); NoiseOptCUDA* opt = (NoiseOptCUDA*)((ShadingContextCUDA*)sg->context)->noise_options_ptr(); new (opt) NoiseOptCUDA; return opt; } __device__ void* osl_get_texture_options(void* sg_) { return 0; } __device__ void osl_texture_set_interp_code(void* opt, int mode) { // ((TextureOpt *)opt)->interpmode = (TextureOpt::InterpMode)mode; } __device__ void osl_texture_set_stwrap_code(void* opt, int mode) { //((TextureOpt *)opt)->swrap = (TextureOpt::Wrap)mode; //((TextureOpt *)opt)->twrap = (TextureOpt::Wrap)mode; } __forceinline__ __device__ float3 make_float3(const float4& a) { return make_float3(a.x, a.y, a.z); } // FIXME: // clang++ 9.0 seems to have trouble with tex2d<float4>() look-ups, // so we'll declare this external and implement texture lookups in // CUDA files compiled by nvcc (optix_grid_renderer.cu and // optix_raytrace.cu). // (clang++ 9.0 error 'undefined __nv_tex_surf_handler') extern __device__ float4 osl_tex2DLookup(void* handle, float s, float t); __device__ int osl_texture(void* sg_, const char* name, void* handle, void* opt_, float s, float t, float dsdx, float dtdx, float dsdy, float dtdy, int chans, void* result, void* dresultdx, void* dresultdy, void* alpha, void* dalphadx, void* dalphady, void* ustring_errormessage) { if (!handle) return 0; // cudaTextureObject_t texID = cudaTextureObject_t(handle); float4 fromTexture = osl_tex2DLookup(handle, s, t); // see note above // float4 fromTexture = tex2D<float4>(texID, s, t); *((float3*)result) = make_float3(fromTexture.x, fromTexture.y, fromTexture.z); return 1; } __device__ int osl_range_check_err(int indexvalue, int length, OSL::ustring_pod symname, void* sg, OSL::ustring_pod sourcefile, int sourceline, OSL::ustring_pod groupname, int layer, OSL::ustring_pod layername, OSL::ustring_pod shadername) { if (indexvalue < 0 || indexvalue >= length) { return indexvalue < 0 ? 0 : length - 1; } return indexvalue; } __device__ int osl_range_check(int indexvalue, int length, OSL::ustring_pod symname, void* sg, OSL::ustring_pod sourcefile, int sourceline, OSL::ustring_pod groupname, int layer, OSL::ustring_pod layername, OSL::ustring_pod shadername) { if (indexvalue < 0 || indexvalue >= length) { indexvalue = osl_range_check_err(indexvalue, length, symname, sg, sourcefile, sourceline, groupname, layer, layername, shadername); } return indexvalue; } #define MAT(m) (*(OSL::Matrix44*)m) __device__ int osl_get_matrix(void* sg_, void* r, const char* from) { ShaderGlobals* sg = (ShaderGlobals*)sg_; if (HDSTR(from) == STRING_PARAMS(common)) { MAT(r).makeIdentity(); return true; } if (HDSTR(from) == STRING_PARAMS(object)) { MAT(r) = MAT(sg->object2common); return true; } if (HDSTR(from) == STRING_PARAMS(shader)) { MAT(r) = MAT(sg->shader2common); return true; } // Find the index of the named transform in the transform list int match_idx = -1; for (size_t idx = 0; idx < OSL::pvt::num_named_xforms; ++idx) { if (HDSTR(from) == HDSTR(((uint64_t*)OSL::pvt::xform_name_buffer)[idx])) { match_idx = static_cast<int>(idx); break; } } // Return the transform if there is a match if (match_idx >= 0) { MAT(r) = reinterpret_cast<OSL::Matrix44*>( OSL::pvt::xform_buffer)[match_idx]; return true; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } __device__ int osl_get_inverse_matrix(void* sg_, void* r, const char* to) { ShaderGlobals* sg = (ShaderGlobals*)sg_; if (HDSTR(to) == STRING_PARAMS(common)) { MAT(r).makeIdentity(); return true; } if (HDSTR(to) == STRING_PARAMS(object)) { MAT(r) = MAT(sg->object2common); MAT(r).invert(); return true; } if (HDSTR(to) == STRING_PARAMS(shader)) { MAT(r) = MAT(sg->shader2common); MAT(r).invert(); return true; } // Find the index of the named transform in the transform list int match_idx = -1; for (size_t idx = 0; idx < OSL::pvt::num_named_xforms; ++idx) { if (HDSTR(to) == HDSTR(((uint64_t*)OSL::pvt::xform_name_buffer)[idx])) { match_idx = static_cast<int>(idx); break; } } // Return the transform if there is a match if (match_idx >= 0) { MAT(r) = reinterpret_cast<OSL::Matrix44*>( OSL::pvt::xform_buffer)[match_idx]; MAT(r).invert(); return true; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } #undef MAT }
774d8ba4f928e36c96091f7878efe487c4f562fc.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sigmoid_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "../neural_network_exception.h" #include "../nn_types.h" #include "util_cuda.h" static __forceinline__ __device__ float sigmoid(float x) { return __fdividef(1.0F, 1.0F + __expf(-x)); } __global__ void sigmoid_upd_kernel( const float4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = sigmoid(val.x); val.y = sigmoid(val.y); val.z = sigmoid(val.z); val.w = sigmoid(val.w); output[elem_id] = val; } } static __forceinline__ __device__ float sigmoid_derivative(float x) { return x * (1.0F - x); } __global__ void sigmoid_backprop_upd_kernel( float4 * __restrict errors, const float4 * __restrict output_neurons, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = output_neurons[elem_id]; float4 current_error = errors[elem_id]; val.x = sigmoid_derivative(val.x); val.y = sigmoid_derivative(val.y); val.z = sigmoid_derivative(val.z); val.w = sigmoid_derivative(val.w); current_error.x *= val.x; current_error.y *= val.y; current_error.z *= val.z; current_error.w *= val.w; errors[elem_id] = current_error; } } namespace nnforge { namespace cuda { sigmoid_layer_updater_cuda::sigmoid_layer_updater_cuda() { } sigmoid_layer_updater_cuda::~sigmoid_layer_updater_cuda() { } void sigmoid_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { if (offset_input_entry_id > 0) throw neural_network_exception("sigmoid_layer_updater_cuda is not able to run using offset"); int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( sigmoid_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_neurons_buffer, *output_neurons_buffer, elem_count); } void sigmoid_layer_updater_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( sigmoid_backprop_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_errors_buffer, *output_neurons_buffer, elem_count); } bool sigmoid_layer_updater_cuda::is_in_place_backprop() const { return true; } } }
774d8ba4f928e36c96091f7878efe487c4f562fc.cu
/* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sigmoid_layer_updater_cuda.h" #include <cuda_runtime.h> #include "../neural_network_exception.h" #include "../nn_types.h" #include "util_cuda.h" static __forceinline__ __device__ float sigmoid(float x) { return __fdividef(1.0F, 1.0F + __expf(-x)); } __global__ void sigmoid_upd_kernel( const float4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = sigmoid(val.x); val.y = sigmoid(val.y); val.z = sigmoid(val.z); val.w = sigmoid(val.w); output[elem_id] = val; } } static __forceinline__ __device__ float sigmoid_derivative(float x) { return x * (1.0F - x); } __global__ void sigmoid_backprop_upd_kernel( float4 * __restrict errors, const float4 * __restrict output_neurons, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = output_neurons[elem_id]; float4 current_error = errors[elem_id]; val.x = sigmoid_derivative(val.x); val.y = sigmoid_derivative(val.y); val.z = sigmoid_derivative(val.z); val.w = sigmoid_derivative(val.w); current_error.x *= val.x; current_error.y *= val.y; current_error.z *= val.z; current_error.w *= val.w; errors[elem_id] = current_error; } } namespace nnforge { namespace cuda { sigmoid_layer_updater_cuda::sigmoid_layer_updater_cuda() { } sigmoid_layer_updater_cuda::~sigmoid_layer_updater_cuda() { } void sigmoid_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { if (offset_input_entry_id > 0) throw neural_network_exception("sigmoid_layer_updater_cuda is not able to run using offset"); int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); sigmoid_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_neurons_buffer, *output_neurons_buffer, elem_count); } void sigmoid_layer_updater_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); sigmoid_backprop_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_errors_buffer, *output_neurons_buffer, elem_count); } bool sigmoid_layer_updater_cuda::is_in_place_backprop() const { return true; } } }
1bf3cde4be5fd0e441883a2d00c775f06b841aa3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017 Stanford, NVIDIA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::concat(int n, const Tensor* tensors, int axis, const char *name) { Concat *cat = new Concat(*this, n, tensors, axis, name); layers.push_back(cat); return cat->outputs[0]; } Concat::Concat(FFModel& model, int _n, const Tensor* _tensors, int _axis, const char* name) : Op(model, OP_CONCAT, name, _n, _tensors), axis(_axis) { //TODO: swich to use the Legion dim ordering int num_dim = inputs[0].numDim; outputs[0].numDim = num_dim; for (int i = 0; i < num_dim; i++) outputs[0].adim[i] = inputs[0].adim[i]; for (int i = 1; i < numInputs; i++) for (int j = 0; j < num_dim; j++) { if (j != num_dim - 1 - axis) assert(inputs[i].adim[j] == outputs[0].adim[j]); else outputs[0].adim[j] += inputs[i].adim[j]; } numOutputs = 1; numWeights = 0; } void Concat::create_weights(FFModel& model) { // DO nothing } void Concat::create_output_and_partition(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = model.get_or_create_task_is(inputs[0].numDim, pcname); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); int dims[MAX_TENSOR_DIM], num_dim = inputs[0].numDim; assert(num_dim == domain.get_dim()); for (int i = 0; i < num_dim; i++) dims[i] = inputs[0].adim[num_dim-1-i]; for (int i = 1; i < numInputs; i++) for (int j = 0; j < num_dim; j++) { if (j != axis) assert(inputs[i].adim[num_dim-1-j] == dims[j]); else dims[j] += inputs[i].adim[num_dim-1-j]; } //for (int i = 0; i < num_dim; i++) //printf("concat: dim[%d] = %d\n", i, dims[i]); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> part_rect = domain; \ outputs[0] = model.create_tensor<DIM>(dims, DT_FLOAT, this); \ outputs[0].owner_op = this; \ outputs[0].owner_idx = 0; \ for (int i = 0; i < numInputs; i++) { \ Rect<DIM> input_rect = runtime->get_index_partition_color_space( \ ctx, inputs[i].part.get_index_partition()); \ if (input_rect == part_rect) { \ input_lps[i] = inputs[i].part; \ input_grad_lps[i] = inputs[i].part_grad; \ } else { \ model.create_disjoint_partition<DIM>(inputs[i], \ IndexSpaceT<DIM>(task_is), input_lps[i], input_grad_lps[i]); \ } \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { fprintf(stderr, "Unsupported concat dimension number"); assert(false); } } } void Concat::init_meta(ConcatMeta *m) const { m->axis = this->outputs[0].numDim - 1 - this->axis; } __host__ OpMeta* Concat::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Concat* cc = (Concat*) task->args; FFHandler handler = *((const FFHandler*) task->local_args); ConcatMeta* m = new ConcatMeta(handler); // Note that our internal axis index ordering is opposite to other frameworks cc->init_meta(m); m->profiling = cc->profiling; return m; } void Concat::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ ParallelConfig pc; \ std::string pcname = name; \ ff.config.find_parallel_config(DIM, pcname, pc); \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(CONCAT_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Concat)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(0, FID_DATA); for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[i].region)); launcher.add_field(i + 1, FID_DATA); } for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[i], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[i].region_grad)); launcher.add_field(i + numInputs + 1, FID_DATA); } FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int N> void calc_blk_size(coord_t& num_blocks, coord_t& blk_size, Rect<N> rect, int axis) { num_blocks = 1; blk_size = 1; for (int d = 0; d < N; d++) { if (d <= axis) blk_size *= (rect.hi[d] - rect.lo[d] + 1); else num_blocks *= (rect.hi[d] - rect.lo[d] + 1); } } /*static*/ void Concat::forward_kernel(float* output, float const * const *inputs, int num_inputs, int axis, const Domain& out_domain, const Domain* in_domain, hipStream_t stream) { coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS]; assert(num_inputs <= MAX_NUM_INPUTS); switch (out_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = out_domain; \ calc_blk_size<DIM>(num_blocks, output_blk_size, rect, axis); \ for (int i = 0; i < num_inputs; i++) { \ rect = in_domain[i]; \ coord_t input_num_blocks = 1; \ calc_blk_size<DIM>(input_num_blocks, input_blk_sizes[i], rect, axis); \ assert(input_num_blocks == num_blocks); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: fprintf(stderr, "Unsupported concat dimension number"); assert(false); } for (int i = 0; i < num_inputs; i++) { hipLaunchKernelGGL(( copy_with_stride), dim3(GET_BLOCKS(input_blk_sizes[i]*num_blocks)), dim3(CUDA_NUM_THREADS), 0, stream, output, inputs[i], num_blocks, output_blk_size, input_blk_sizes[i]); //printf("output = %x num_blocks=%d output_blk_size=%d input_blk_size[%d]=%d\n", // output, num_blocks, output_blk_size, i, input_blk_sizes[i]); output += input_blk_sizes[i]; } } /* regions[0](O): output regions[1..numInputs](I): inputs */ void Concat::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const Concat* cc = (Concat*) task->args; // Note that our internal axis index ordering is opposite to other frameworks int axis = cc->outputs[0].numDim - 1 - cc->axis; assert(regions.size() == cc->numInputs + 1); assert(task->regions.size() == cc->numInputs + 1); Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); assert(out_domain.get_dim() == cc->outputs[0].numDim); Domain in_domain[MAX_NUM_INPUTS]; for (int i = 0; i < cc->numInputs; i++) in_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+1].region.get_index_space()); float *output = helperGetTensorPointerWO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const float *inputs[MAX_NUM_INPUTS]; for (int i = 0; i < cc->numInputs; i++) inputs[i] = helperGetTensorPointerRO<float>( regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime); hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (cc->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } forward_kernel(output, inputs, cc->numInputs, axis, out_domain, in_domain, stream); if (cc->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); //print_tensor<4, float>(output - output_blk_size, output_rect, "[Concat:forward:output]"); //printf("output_blk_size=%zu\n", output_blk_size); //print_tensor<4, float>(inputs[0], input_rect[0], "[Concat:forward:input0]"); //print_tensor<4, float>(inputs[1], input_rect[1], "[Concat:forward:input1]"); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); printf("[%s] forward time = %.4f ms\n", cc->name, elapsed); hipEventDestroy(t_start); hipEventDestroy(t_end); } } void Concat::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(CONCAT_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(Concat)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(0, FID_DATA); for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[i].region)); launcher.add_field(i + 1, FID_DATA); } runtime->execute_index_space(ctx, launcher); } void Concat::backward_kernel(const float* output_grad, float** input_grads, int num_inputs, int axis, const Domain& out_grad_domain, const Domain* in_grad_domain, hipStream_t stream) { coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS]; assert(num_inputs <= MAX_NUM_INPUTS); switch (out_grad_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = out_grad_domain; \ calc_blk_size<DIM>(num_blocks, output_blk_size, rect, axis); \ for (int i = 0; i < num_inputs; i++) { \ rect = in_grad_domain[i]; \ coord_t input_num_blocks = 1; \ calc_blk_size<DIM>(input_num_blocks, input_blk_sizes[i], rect, axis); \ assert(input_num_blocks == num_blocks); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: fprintf(stderr, "Unsupported concat dimension number"); assert(false); } for (int i = 0; i < num_inputs; i++) { hipLaunchKernelGGL(( add_with_stride), dim3(GET_BLOCKS(input_blk_sizes[i]*num_blocks)), dim3(CUDA_NUM_THREADS), 0, stream, input_grads[i], output_grad, num_blocks, input_blk_sizes[i], output_blk_size); output_grad += input_blk_sizes[i]; } //Rect<2> output_rect(Point<2>(0, 0), Point<2>(output_blk_size-1, batch_size - 1)); //Rect<2> input_rect(Point<2>(0, 0), Point<2>(input_blk_sizes[0]-1, batch_size - 1)); //print_tensor<2, float>(output_grad - output_blk_size, output_rect, "[Concat:backward:output]"); //print_tensor<2, float>(input_grads[0], input_rect, "[Concat:backward:input0]"); } /* regions[0](I): output_grad regions[1..numInputs](I/O): input_grad */ void Concat::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const Concat* cc = (Concat*) task->args; // Note that our internal axis index ordering is opposite to other frameworks int axis = cc->outputs[0].numDim - 1 - cc->axis; assert(regions.size() == cc->numInputs + 1); assert(task->regions.size() == cc->numInputs + 1); assert(cc->numInputs <= MAX_NUM_INPUTS); Domain out_grad_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); assert(out_grad_domain.get_dim() == cc->outputs[0].numDim); Domain in_grad_domains[MAX_NUM_INPUTS]; for (int i = 0; i < cc->numInputs; i++) in_grad_domains[i] = runtime->get_index_space_domain( ctx, task->regions[i+1].region.get_index_space()); const float *output_grad = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); float *input_grads[MAX_NUM_INPUTS]; for (int i = 0; i < cc->numInputs; i++) input_grads[i] = helperGetTensorPointerRW<float>( regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime); hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (cc->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } backward_kernel(output_grad, input_grads, cc->numInputs, axis, out_grad_domain, in_grad_domains, stream); if (cc->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); printf("[%s] forward time = %.4f ms\n", cc->name, elapsed); hipEventDestroy(t_start); hipEventDestroy(t_end); } } void Concat::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(CONCAT_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Concat)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(0, FID_DATA); for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[i], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i].region_grad)); //LogicalRegion lr = inputs[i].region_grad; //printf("concat[%d]: region(%d,%d,%d)\n", i+1, lr.get_index_space().get_id(), lr.get_field_space().get_id(), lr.get_tree_id()); launcher.add_field(i + 1, FID_DATA); } runtime->execute_index_space(ctx, launcher); } bool Concat::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { assert (numInputs <= MAX_NUM_INPUTS); Tensor sub_inputs[MAX_NUM_INPUTS], sub_output; if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type)) { return false; } for (int i = 0; i < numInputs; i++) { if (!inputs[i].get_input_sub_tensor(pc, sub_inputs[i], op_type)) { return false; } } ConcatMeta *m = sim->concat_meta; this->init_meta(m); sim->free_all(); float *input_ptrs[MAX_NUM_INPUTS]; float *input_grad_ptrs[MAX_NUM_INPUTS]; for (int i = 0; i < numInputs; i++) { input_ptrs[i] = (float *)sim->allocate(sub_inputs[i].get_volume(), DT_FLOAT); assert (input_ptrs[i] != NULL); } float *output_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_ptr != NULL); int axis = outputs[0].numDim - 1 - this->axis; Domain out_domain = sub_output.get_domain(); Domain in_domains[MAX_NUM_INPUTS]; for (int i = 0; i < numInputs; i++) { in_domains[i] = sub_inputs[i].get_domain(); } hipStream_t stream; checkCUDA(get_legion_stream(&stream)); std::function<void()> forward, backward; forward = [&] { forward_kernel(output_ptr, input_ptrs, numInputs, axis, out_domain, in_domains, stream); }; if (sim->computationMode == COMP_MODE_TRAINING) { for (int i = 0; i < numInputs; i++) { input_grad_ptrs[i] = (float *)sim->allocate(sub_inputs[i].get_volume(), DT_FLOAT); assert (input_grad_ptrs[i] != NULL); } float *output_grad_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_grad_ptr != NULL); backward = [&] { backward_kernel(output_grad_ptr, input_grad_ptrs, numInputs, axis, out_domain, in_domains, stream); }; } inner_measure_operator_cost(sim, forward, backward, cost_metrics); if (sim->computationMode == COMP_MODE_TRAINING) { printf("[Measure Concat] name(%s) forward_time(%.4lf) backward_time(%.4lf)\n", name, cost_metrics.forward_time, cost_metrics.backward_time); } else { printf("[Measure Concat] name(%s) forward_time(%.4lf)\n", name, cost_metrics.forward_time); } return true; }
1bf3cde4be5fd0e441883a2d00c775f06b841aa3.cu
/* Copyright 2017 Stanford, NVIDIA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::concat(int n, const Tensor* tensors, int axis, const char *name) { Concat *cat = new Concat(*this, n, tensors, axis, name); layers.push_back(cat); return cat->outputs[0]; } Concat::Concat(FFModel& model, int _n, const Tensor* _tensors, int _axis, const char* name) : Op(model, OP_CONCAT, name, _n, _tensors), axis(_axis) { //TODO: swich to use the Legion dim ordering int num_dim = inputs[0].numDim; outputs[0].numDim = num_dim; for (int i = 0; i < num_dim; i++) outputs[0].adim[i] = inputs[0].adim[i]; for (int i = 1; i < numInputs; i++) for (int j = 0; j < num_dim; j++) { if (j != num_dim - 1 - axis) assert(inputs[i].adim[j] == outputs[0].adim[j]); else outputs[0].adim[j] += inputs[i].adim[j]; } numOutputs = 1; numWeights = 0; } void Concat::create_weights(FFModel& model) { // DO nothing } void Concat::create_output_and_partition(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = model.get_or_create_task_is(inputs[0].numDim, pcname); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); int dims[MAX_TENSOR_DIM], num_dim = inputs[0].numDim; assert(num_dim == domain.get_dim()); for (int i = 0; i < num_dim; i++) dims[i] = inputs[0].adim[num_dim-1-i]; for (int i = 1; i < numInputs; i++) for (int j = 0; j < num_dim; j++) { if (j != axis) assert(inputs[i].adim[num_dim-1-j] == dims[j]); else dims[j] += inputs[i].adim[num_dim-1-j]; } //for (int i = 0; i < num_dim; i++) //printf("concat: dim[%d] = %d\n", i, dims[i]); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> part_rect = domain; \ outputs[0] = model.create_tensor<DIM>(dims, DT_FLOAT, this); \ outputs[0].owner_op = this; \ outputs[0].owner_idx = 0; \ for (int i = 0; i < numInputs; i++) { \ Rect<DIM> input_rect = runtime->get_index_partition_color_space( \ ctx, inputs[i].part.get_index_partition()); \ if (input_rect == part_rect) { \ input_lps[i] = inputs[i].part; \ input_grad_lps[i] = inputs[i].part_grad; \ } else { \ model.create_disjoint_partition<DIM>(inputs[i], \ IndexSpaceT<DIM>(task_is), input_lps[i], input_grad_lps[i]); \ } \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { fprintf(stderr, "Unsupported concat dimension number"); assert(false); } } } void Concat::init_meta(ConcatMeta *m) const { m->axis = this->outputs[0].numDim - 1 - this->axis; } __host__ OpMeta* Concat::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Concat* cc = (Concat*) task->args; FFHandler handler = *((const FFHandler*) task->local_args); ConcatMeta* m = new ConcatMeta(handler); // Note that our internal axis index ordering is opposite to other frameworks cc->init_meta(m); m->profiling = cc->profiling; return m; } void Concat::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ ParallelConfig pc; \ std::string pcname = name; \ ff.config.find_parallel_config(DIM, pcname, pc); \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(CONCAT_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Concat)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(0, FID_DATA); for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[i].region)); launcher.add_field(i + 1, FID_DATA); } for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[i], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[i].region_grad)); launcher.add_field(i + numInputs + 1, FID_DATA); } FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int N> void calc_blk_size(coord_t& num_blocks, coord_t& blk_size, Rect<N> rect, int axis) { num_blocks = 1; blk_size = 1; for (int d = 0; d < N; d++) { if (d <= axis) blk_size *= (rect.hi[d] - rect.lo[d] + 1); else num_blocks *= (rect.hi[d] - rect.lo[d] + 1); } } /*static*/ void Concat::forward_kernel(float* output, float const * const *inputs, int num_inputs, int axis, const Domain& out_domain, const Domain* in_domain, cudaStream_t stream) { coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS]; assert(num_inputs <= MAX_NUM_INPUTS); switch (out_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = out_domain; \ calc_blk_size<DIM>(num_blocks, output_blk_size, rect, axis); \ for (int i = 0; i < num_inputs; i++) { \ rect = in_domain[i]; \ coord_t input_num_blocks = 1; \ calc_blk_size<DIM>(input_num_blocks, input_blk_sizes[i], rect, axis); \ assert(input_num_blocks == num_blocks); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: fprintf(stderr, "Unsupported concat dimension number"); assert(false); } for (int i = 0; i < num_inputs; i++) { copy_with_stride<<<GET_BLOCKS(input_blk_sizes[i]*num_blocks), CUDA_NUM_THREADS, 0, stream>>>( output, inputs[i], num_blocks, output_blk_size, input_blk_sizes[i]); //printf("output = %x num_blocks=%d output_blk_size=%d input_blk_size[%d]=%d\n", // output, num_blocks, output_blk_size, i, input_blk_sizes[i]); output += input_blk_sizes[i]; } } /* regions[0](O): output regions[1..numInputs](I): inputs */ void Concat::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const Concat* cc = (Concat*) task->args; // Note that our internal axis index ordering is opposite to other frameworks int axis = cc->outputs[0].numDim - 1 - cc->axis; assert(regions.size() == cc->numInputs + 1); assert(task->regions.size() == cc->numInputs + 1); Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); assert(out_domain.get_dim() == cc->outputs[0].numDim); Domain in_domain[MAX_NUM_INPUTS]; for (int i = 0; i < cc->numInputs; i++) in_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+1].region.get_index_space()); float *output = helperGetTensorPointerWO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const float *inputs[MAX_NUM_INPUTS]; for (int i = 0; i < cc->numInputs; i++) inputs[i] = helperGetTensorPointerRO<float>( regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (cc->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } forward_kernel(output, inputs, cc->numInputs, axis, out_domain, in_domain, stream); if (cc->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); //print_tensor<4, float>(output - output_blk_size, output_rect, "[Concat:forward:output]"); //printf("output_blk_size=%zu\n", output_blk_size); //print_tensor<4, float>(inputs[0], input_rect[0], "[Concat:forward:input0]"); //print_tensor<4, float>(inputs[1], input_rect[1], "[Concat:forward:input1]"); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); printf("[%s] forward time = %.4f ms\n", cc->name, elapsed); cudaEventDestroy(t_start); cudaEventDestroy(t_end); } } void Concat::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(CONCAT_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(Concat)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(0, FID_DATA); for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[i].region)); launcher.add_field(i + 1, FID_DATA); } runtime->execute_index_space(ctx, launcher); } void Concat::backward_kernel(const float* output_grad, float** input_grads, int num_inputs, int axis, const Domain& out_grad_domain, const Domain* in_grad_domain, cudaStream_t stream) { coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS]; assert(num_inputs <= MAX_NUM_INPUTS); switch (out_grad_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = out_grad_domain; \ calc_blk_size<DIM>(num_blocks, output_blk_size, rect, axis); \ for (int i = 0; i < num_inputs; i++) { \ rect = in_grad_domain[i]; \ coord_t input_num_blocks = 1; \ calc_blk_size<DIM>(input_num_blocks, input_blk_sizes[i], rect, axis); \ assert(input_num_blocks == num_blocks); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: fprintf(stderr, "Unsupported concat dimension number"); assert(false); } for (int i = 0; i < num_inputs; i++) { add_with_stride<<<GET_BLOCKS(input_blk_sizes[i]*num_blocks), CUDA_NUM_THREADS, 0, stream>>>( input_grads[i], output_grad, num_blocks, input_blk_sizes[i], output_blk_size); output_grad += input_blk_sizes[i]; } //Rect<2> output_rect(Point<2>(0, 0), Point<2>(output_blk_size-1, batch_size - 1)); //Rect<2> input_rect(Point<2>(0, 0), Point<2>(input_blk_sizes[0]-1, batch_size - 1)); //print_tensor<2, float>(output_grad - output_blk_size, output_rect, "[Concat:backward:output]"); //print_tensor<2, float>(input_grads[0], input_rect, "[Concat:backward:input0]"); } /* regions[0](I): output_grad regions[1..numInputs](I/O): input_grad */ void Concat::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const Concat* cc = (Concat*) task->args; // Note that our internal axis index ordering is opposite to other frameworks int axis = cc->outputs[0].numDim - 1 - cc->axis; assert(regions.size() == cc->numInputs + 1); assert(task->regions.size() == cc->numInputs + 1); assert(cc->numInputs <= MAX_NUM_INPUTS); Domain out_grad_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); assert(out_grad_domain.get_dim() == cc->outputs[0].numDim); Domain in_grad_domains[MAX_NUM_INPUTS]; for (int i = 0; i < cc->numInputs; i++) in_grad_domains[i] = runtime->get_index_space_domain( ctx, task->regions[i+1].region.get_index_space()); const float *output_grad = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); float *input_grads[MAX_NUM_INPUTS]; for (int i = 0; i < cc->numInputs; i++) input_grads[i] = helperGetTensorPointerRW<float>( regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (cc->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } backward_kernel(output_grad, input_grads, cc->numInputs, axis, out_grad_domain, in_grad_domains, stream); if (cc->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); printf("[%s] forward time = %.4f ms\n", cc->name, elapsed); cudaEventDestroy(t_start); cudaEventDestroy(t_end); } } void Concat::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(CONCAT_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Concat)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(0, FID_DATA); for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[i], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i].region_grad)); //LogicalRegion lr = inputs[i].region_grad; //printf("concat[%d]: region(%d,%d,%d)\n", i+1, lr.get_index_space().get_id(), lr.get_field_space().get_id(), lr.get_tree_id()); launcher.add_field(i + 1, FID_DATA); } runtime->execute_index_space(ctx, launcher); } bool Concat::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { assert (numInputs <= MAX_NUM_INPUTS); Tensor sub_inputs[MAX_NUM_INPUTS], sub_output; if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type)) { return false; } for (int i = 0; i < numInputs; i++) { if (!inputs[i].get_input_sub_tensor(pc, sub_inputs[i], op_type)) { return false; } } ConcatMeta *m = sim->concat_meta; this->init_meta(m); sim->free_all(); float *input_ptrs[MAX_NUM_INPUTS]; float *input_grad_ptrs[MAX_NUM_INPUTS]; for (int i = 0; i < numInputs; i++) { input_ptrs[i] = (float *)sim->allocate(sub_inputs[i].get_volume(), DT_FLOAT); assert (input_ptrs[i] != NULL); } float *output_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_ptr != NULL); int axis = outputs[0].numDim - 1 - this->axis; Domain out_domain = sub_output.get_domain(); Domain in_domains[MAX_NUM_INPUTS]; for (int i = 0; i < numInputs; i++) { in_domains[i] = sub_inputs[i].get_domain(); } cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); std::function<void()> forward, backward; forward = [&] { forward_kernel(output_ptr, input_ptrs, numInputs, axis, out_domain, in_domains, stream); }; if (sim->computationMode == COMP_MODE_TRAINING) { for (int i = 0; i < numInputs; i++) { input_grad_ptrs[i] = (float *)sim->allocate(sub_inputs[i].get_volume(), DT_FLOAT); assert (input_grad_ptrs[i] != NULL); } float *output_grad_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert (output_grad_ptr != NULL); backward = [&] { backward_kernel(output_grad_ptr, input_grad_ptrs, numInputs, axis, out_domain, in_domains, stream); }; } inner_measure_operator_cost(sim, forward, backward, cost_metrics); if (sim->computationMode == COMP_MODE_TRAINING) { printf("[Measure Concat] name(%s) forward_time(%.4lf) backward_time(%.4lf)\n", name, cost_metrics.forward_time, cost_metrics.backward_time); } else { printf("[Measure Concat] name(%s) forward_time(%.4lf)\n", name, cost_metrics.forward_time); } return true; }
8ed1c376411009e520d339d4fc4ea4324f95fb68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <iomanip> #include "image.h" #define BUF_SIZE 256 using namespace std; class errorPNM { }; struct Color { unsigned char r; unsigned char g; unsigned char b; }; void readPNM(ifstream &file, char* buf); image<unsigned char>* loadPGM(const char* name); void savePPM(image<Color>* im, const char* name); Color randomColor(); __global__ void evolveContour(unsigned char* intensityDev, unsigned char* labelsDev, signed char* speedDev, signed char* phiDev, int HEIGHT, int WIDTH, int* targetLabels, int kernelID, int numLabels, int* lowerIntensityBounds, int* upperIntensityBounds); __global__ void initSpeedPhi(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int targetLabel, int lowerIntensityBound, int upperIntensityBound); __global__ void switchIn(signed char* speed, signed char* phi, int HEIGHT, int WIDTH); __global__ void switchOut(signed char* speed, signed char* phi, int HEIGHT, int WIDTH); __global__ void checkStopCondition(signed char* speed, signed char* phi, int parentThreadID, int HEIGHT, int WIDTH); __device__ volatile int stopCondition[1024]; void usage() { cout<<"Usage: ./lss <Input intensities path> <Input labels path> <Input params path> <GOLD output path> <#repetitions (HyperQ)>" << endl; } int main(int argc, char* argv[]) { // Parse command line arguments if(argc < 6) { usage(); exit(0); } char* imageFile = argv[1]; char* labelFile = argv[2]; char* paramFile = argv[3]; char* outputFile = argv[4]; int numRepetitions = atoi(argv[5]); // Initialize timers, start the runtime timer hipEvent_t startTime1, startTime2, stopTime1, stopTime2; hipEventCreate(&startTime1); hipEventCreate(&startTime2); hipEventCreate(&stopTime1); hipEventCreate(&stopTime2); float elapsedTime1, elapsedTime2; hipEventRecord(startTime1, 0); // Load image, send to GPU image<unsigned char>* input = loadPGM(imageFile); const int HEIGHT = input->height(); const int WIDTH = input->width(); const int SIZE = HEIGHT*WIDTH*sizeof(char); unsigned char* intensity = new unsigned char[numRepetitions*HEIGHT*WIDTH]; for(int i=0; i<numRepetitions; i++) memcpy(&intensity[i*HEIGHT*WIDTH], input->data, SIZE); unsigned char* intensityDev = NULL; hipMalloc((void**)&intensityDev, numRepetitions*SIZE); hipMemcpyAsync(intensityDev, intensity, numRepetitions*SIZE, hipMemcpyHostToDevice); // Load connected component labels, send to GPU input = loadPGM(labelFile); unsigned char* labels = new unsigned char[numRepetitions*HEIGHT*WIDTH]; for(int i=0; i<numRepetitions; i++) memcpy(&labels[i*HEIGHT*WIDTH], input->data, SIZE); unsigned char* labelsDev = NULL; hipMalloc((void **)&labelsDev, numRepetitions*SIZE); hipMemcpyAsync(labelsDev, labels, numRepetitions*SIZE, hipMemcpyHostToDevice); // Load parameters, send to GPU ifstream paramStream; paramStream.open(paramFile); if(paramStream.is_open() != true) { cerr << "Could not open '" << paramFile << "'." << endl; exit(1); } int targetLabels[1024]; int lowerIntensityBounds[1024]; int upperIntensityBounds[1024]; int numLabels = 0; while(paramStream.eof() == false) { char line[16]; paramStream.getline(line, 16); if(paramStream.eof() == true) break; if(numLabels % 3 == 0) targetLabels[numLabels/3] = strtol(line, NULL, 10); else if(numLabels % 3 == 1) lowerIntensityBounds[numLabels/3] = strtol(line, NULL, 10); else upperIntensityBounds[numLabels/3] = strtol(line, NULL, 10); numLabels++; } if(numLabels % 3 == 0) numLabels /= 3; else { cerr << "Number of lines in " << paramFile << " is not divisible by 3. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } paramStream.close(); int* targetLabelsDev = NULL; hipMalloc((void**)&targetLabelsDev, numLabels*sizeof(int)); hipMemcpyAsync(targetLabelsDev, targetLabels, numLabels*sizeof(int), hipMemcpyHostToDevice); int* lowerIntensityBoundsDev = NULL; hipMalloc((void**)&lowerIntensityBoundsDev, numLabels*sizeof(int)); hipMemcpyAsync(lowerIntensityBoundsDev, lowerIntensityBounds, numLabels*sizeof(int), hipMemcpyHostToDevice); int* upperIntensityBoundsDev = NULL; hipMalloc((void**)&upperIntensityBoundsDev, numLabels*sizeof(int)); hipMemcpyAsync(upperIntensityBoundsDev, upperIntensityBounds, numLabels*sizeof(int), hipMemcpyHostToDevice); // Allocate arrays for speed and phi in GPU memory signed char* speedDev = NULL; signed char* phiDev = NULL; hipMalloc((void**)&speedDev, numRepetitions*numLabels*SIZE); hipMalloc((void**)&phiDev, numRepetitions*numLabels*SIZE); hipDeviceSynchronize(); // Start the segmentation timer hipEventRecord(startTime2, 0); // Launch kernel to begin image segmenation for(int i=0; i<numRepetitions; i++) { hipLaunchKernelGGL(( evolveContour), dim3(1), dim3(numLabels), 0, 0, intensityDev, labelsDev, speedDev, phiDev, HEIGHT, WIDTH, targetLabelsDev, i, numLabels, lowerIntensityBoundsDev, upperIntensityBoundsDev); } hipDeviceSynchronize(); // Stop the segmentation timer hipEventRecord(stopTime2, 0); // Retrieve results from the GPU signed char* phi = new signed char[numRepetitions*numLabels*HEIGHT*WIDTH]; hipMemcpy(phi, phiDev, numRepetitions*numLabels*SIZE, hipMemcpyDeviceToHost); // Stop the runtime timer hipEventRecord(stopTime1, 0); // Caio: Output: DEV FILE *fout; fout = fopen(outputFile, "wb"); if (!fout) { printf("Could not open output file. %s\n", outputFile); exit(0); } fwrite(phi, numRepetitions*numLabels*SIZE, 1, fout); fclose(fout); printf("GOLD written to file.\n"); // Stop runtime timer and print times hipEventElapsedTime(&elapsedTime1, startTime1, stopTime1); hipEventElapsedTime(&elapsedTime2, startTime2, stopTime2); cout << "Computation time: " << setprecision(6) << elapsedTime2 << " ms"<< endl; cout << "Total time: " << setprecision(6) << elapsedTime1 << " ms"<< endl; // Free resources and end the program hipEventDestroy(startTime1); hipEventDestroy(stopTime1); hipEventDestroy(startTime2); hipEventDestroy(stopTime2); hipFree(intensityDev); hipFree(labelsDev); hipFree(speedDev); hipFree(phiDev); hipFree(targetLabelsDev); hipFree(lowerIntensityBoundsDev); hipFree(upperIntensityBoundsDev); return 0; } image<unsigned char>* loadPGM(const char* name) { char buf[BUF_SIZE]; // Read header ifstream file(name, ios::in | ios::binary); readPNM(file, buf); if(strncmp(buf, "P5", 2)) { cerr << "Unable to open '" << name << "'." << endl; throw errorPNM(); } readPNM(file, buf); int width = atoi(buf); readPNM(file, buf); int height = atoi(buf); readPNM(file, buf); if(atoi(buf) > UCHAR_MAX) { cerr << "Unable to open '" << name << "'." << endl; throw errorPNM(); } // Read data image<unsigned char>* im = new image<unsigned char>(width, height); file.read((char*)imPtr(im, 0, 0), width*height*sizeof(unsigned char)); return im; } void readPNM(ifstream &file, char* buf) { char doc[BUF_SIZE]; char c; file >> c; while (c == '#') { file.getline(doc, BUF_SIZE); file >> c; } file.putback(c); file.width(BUF_SIZE); file >> buf; file.ignore(); } void savePPM(image<Color>* im, const char* name) { int width = im->width(); int height = im->height(); ofstream file(name, ios::out | ios::binary); file << "P6\n" << width << " " << height << "\n" << UCHAR_MAX << "\n"; file.write((char*)imPtr(im, 0, 0), width*height*sizeof(Color)); } Color randomColor() { Color c; c.r = (unsigned char) rand(); c.g = (unsigned char) rand(); c.b = (unsigned char) rand(); return c; } __global__ void evolveContour(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int* targetLabels, int kernelID, int numLabels, int* lowerIntensityBounds, int* upperIntensityBounds) { int tid = threadIdx.x; intensity = &intensity[kernelID*HEIGHT*WIDTH]; labels = &labels[kernelID*HEIGHT*WIDTH]; speed = &speed[(kernelID*numLabels+tid)*HEIGHT*WIDTH]; phi = &phi[(kernelID*numLabels+tid)*HEIGHT*WIDTH]; dim3 dimGrid(WIDTH/30+1, HEIGHT/30+1); dim3 dimBlock(32, 32); hipLaunchKernelGGL(( initSpeedPhi), dim3(dimGrid), dim3(dimBlock), 0, 0, intensity, labels, speed, phi, HEIGHT, WIDTH, targetLabels[tid], lowerIntensityBounds[tid], upperIntensityBounds[tid]); int numIterations = 0; stopCondition[tid] = 1; while(stopCondition[tid]) { stopCondition[tid] = 0; numIterations++; dimGrid.x = WIDTH/30+1; dimGrid.y = HEIGHT/30+1; // Outward evolution hipLaunchKernelGGL(( switchIn), dim3(dimGrid), dim3(dimBlock), 0, 0, speed, phi, HEIGHT, WIDTH); // Inward evolution hipLaunchKernelGGL(( switchOut), dim3(dimGrid), dim3(dimBlock), 0, 0, speed, phi, HEIGHT, WIDTH); // Check stopping condition on every third iteration if(numIterations % 3 == 0) { dimGrid.x = WIDTH/32+1; dimGrid.y = HEIGHT/32+1; hipLaunchKernelGGL(( checkStopCondition), dim3(dimGrid), dim3(dimBlock), 0, 0, speed, phi, tid, HEIGHT, WIDTH); hipDeviceSynchronize(); } else stopCondition[tid] = 1; if(stopCondition[tid] == 0) printf("Target label %d (intensities: %d-%d) converged in %d iterations.\n", targetLabels[tid], lowerIntensityBounds[tid], upperIntensityBounds[tid], numIterations); } } __global__ void initSpeedPhi(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int targetLabel, int lowerIntensityBound, int upperIntensityBound) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int intensityReg; int speedReg; int phiReg; __shared__ int labelsTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { labelsTile[ty][tx] = labels[yPos*WIDTH+xPos]; intensityReg = intensity[yPos*WIDTH+xPos]; } // Initialization if(tx > 0 && tx < 31 && ty > 0 && ty < 31 && xPos < WIDTH-1 && yPos < HEIGHT-1) { // Phi if(labelsTile[ty][tx] != targetLabel) { if(labelsTile[ty][tx-1] != targetLabel && labelsTile[ty][tx+1] != targetLabel && labelsTile[ty-1][tx] != targetLabel && labelsTile[ty+1][tx] != targetLabel) phiReg = 3; else phiReg = 1; } else { if(labelsTile[ty][tx-1] != targetLabel || labelsTile[ty][tx+1] != targetLabel || labelsTile[ty-1][tx] != targetLabel || labelsTile[ty+1][tx] != targetLabel) phiReg = -1; else phiReg = -3; } // Speed if(intensityReg >= lowerIntensityBound && intensityReg <= upperIntensityBound) speedReg = 1; else speedReg = -1; // Load data back into global memory speed[yPos*WIDTH+xPos] = speedReg; phi[yPos*WIDTH+xPos] = phiReg; } } __global__ void switchIn(signed char* speed, signed char* phi, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int speedReg; __shared__ int phiTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiTile[ty][tx] = phi[yPos*WIDTH+xPos]; } if(xPos > 0 && xPos < WIDTH-1 && yPos > 0 && yPos < HEIGHT-1) { // Delete points from Lout and add them to Lin if(phiTile[ty][tx] == 1 && speedReg > 0) phiTile[ty][tx] = -1; if(tx > 0 && tx < 31 && ty > 0 && ty < 31) { // Update neighborhood if(phiTile[ty][tx] == 3) { if(phiTile[ty][tx-1] == -1 || phiTile[ty][tx+1] == -1 || phiTile[ty-1][tx] == -1 || phiTile[ty+1][tx] == -1) phiTile[ty][tx] = 1; } // Eliminate redundant points in Lin if(phiTile[ty][tx] == -1) { if(phiTile[ty][tx-1] < 0 && phiTile[ty][tx+1] < 0 && phiTile[ty-1][tx] < 0 && phiTile[ty+1][tx] < 0) phiTile[ty][tx] = -3; } // Load data back into global memory phi[yPos*WIDTH+xPos] = phiTile[ty][tx]; } } } __global__ void switchOut(signed char* speed, signed char* phi, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int speedReg; __shared__ int phiTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiTile[ty][tx] = phi[yPos*WIDTH+xPos]; } if(xPos > 0 && xPos < WIDTH-1 && yPos > 0 && yPos < HEIGHT-1) { // Delete points from Lin and add them to Lout if(phiTile[ty][tx] == -1 && speedReg < 0) phiTile[ty][tx] = 1; if(tx > 0 && tx < 31 && ty > 0 && ty < 31) { // Update neighborhood if(phiTile[ty][tx] == -3) { if(phiTile[ty][tx-1] == 1 || phiTile[ty][tx+1] == 1 || phiTile[ty-1][tx] == 1 || phiTile[ty+1][tx] == 1) phiTile[ty][tx] = -1; } // Eliminate redundant points if(phiTile[ty][tx] == 1) { if(phiTile[ty][tx-1] > 0 && phiTile[ty][tx+1] > 0 && phiTile[ty-1][tx] > 0 && phiTile[ty+1][tx] > 0) phiTile[ty][tx] = 3; } // Load data back into global memory phi[yPos*WIDTH+xPos] = phiTile[ty][tx]; } } } __global__ void checkStopCondition(signed char* speed, signed char* phi, int parentThreadID, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 32*bx + tx; int yPos = 32*by + ty; int speedReg; int phiReg; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiReg = phi[yPos*WIDTH+xPos]; } // Falsify stop condition if criteria are not met if(phiReg == 1 && speedReg > 0) stopCondition[parentThreadID]=0; else if(phiReg == -1 && speedReg < 0) stopCondition[parentThreadID]=1; }
8ed1c376411009e520d339d4fc4ea4324f95fb68.cu
#include <iostream> #include <fstream> #include <iomanip> #include "image.h" #define BUF_SIZE 256 using namespace std; class errorPNM { }; struct Color { unsigned char r; unsigned char g; unsigned char b; }; void readPNM(ifstream &file, char* buf); image<unsigned char>* loadPGM(const char* name); void savePPM(image<Color>* im, const char* name); Color randomColor(); __global__ void evolveContour(unsigned char* intensityDev, unsigned char* labelsDev, signed char* speedDev, signed char* phiDev, int HEIGHT, int WIDTH, int* targetLabels, int kernelID, int numLabels, int* lowerIntensityBounds, int* upperIntensityBounds); __global__ void initSpeedPhi(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int targetLabel, int lowerIntensityBound, int upperIntensityBound); __global__ void switchIn(signed char* speed, signed char* phi, int HEIGHT, int WIDTH); __global__ void switchOut(signed char* speed, signed char* phi, int HEIGHT, int WIDTH); __global__ void checkStopCondition(signed char* speed, signed char* phi, int parentThreadID, int HEIGHT, int WIDTH); __device__ volatile int stopCondition[1024]; void usage() { cout<<"Usage: ./lss <Input intensities path> <Input labels path> <Input params path> <GOLD output path> <#repetitions (HyperQ)>" << endl; } int main(int argc, char* argv[]) { // Parse command line arguments if(argc < 6) { usage(); exit(0); } char* imageFile = argv[1]; char* labelFile = argv[2]; char* paramFile = argv[3]; char* outputFile = argv[4]; int numRepetitions = atoi(argv[5]); // Initialize timers, start the runtime timer cudaEvent_t startTime1, startTime2, stopTime1, stopTime2; cudaEventCreate(&startTime1); cudaEventCreate(&startTime2); cudaEventCreate(&stopTime1); cudaEventCreate(&stopTime2); float elapsedTime1, elapsedTime2; cudaEventRecord(startTime1, 0); // Load image, send to GPU image<unsigned char>* input = loadPGM(imageFile); const int HEIGHT = input->height(); const int WIDTH = input->width(); const int SIZE = HEIGHT*WIDTH*sizeof(char); unsigned char* intensity = new unsigned char[numRepetitions*HEIGHT*WIDTH]; for(int i=0; i<numRepetitions; i++) memcpy(&intensity[i*HEIGHT*WIDTH], input->data, SIZE); unsigned char* intensityDev = NULL; cudaMalloc((void**)&intensityDev, numRepetitions*SIZE); cudaMemcpyAsync(intensityDev, intensity, numRepetitions*SIZE, cudaMemcpyHostToDevice); // Load connected component labels, send to GPU input = loadPGM(labelFile); unsigned char* labels = new unsigned char[numRepetitions*HEIGHT*WIDTH]; for(int i=0; i<numRepetitions; i++) memcpy(&labels[i*HEIGHT*WIDTH], input->data, SIZE); unsigned char* labelsDev = NULL; cudaMalloc((void **)&labelsDev, numRepetitions*SIZE); cudaMemcpyAsync(labelsDev, labels, numRepetitions*SIZE, cudaMemcpyHostToDevice); // Load parameters, send to GPU ifstream paramStream; paramStream.open(paramFile); if(paramStream.is_open() != true) { cerr << "Could not open '" << paramFile << "'." << endl; exit(1); } int targetLabels[1024]; int lowerIntensityBounds[1024]; int upperIntensityBounds[1024]; int numLabels = 0; while(paramStream.eof() == false) { char line[16]; paramStream.getline(line, 16); if(paramStream.eof() == true) break; if(numLabels % 3 == 0) targetLabels[numLabels/3] = strtol(line, NULL, 10); else if(numLabels % 3 == 1) lowerIntensityBounds[numLabels/3] = strtol(line, NULL, 10); else upperIntensityBounds[numLabels/3] = strtol(line, NULL, 10); numLabels++; } if(numLabels % 3 == 0) numLabels /= 3; else { cerr << "Number of lines in " << paramFile << " is not divisible by 3. Try '" << argv[0] << " --help' for additional information." << endl; exit(1); } paramStream.close(); int* targetLabelsDev = NULL; cudaMalloc((void**)&targetLabelsDev, numLabels*sizeof(int)); cudaMemcpyAsync(targetLabelsDev, targetLabels, numLabels*sizeof(int), cudaMemcpyHostToDevice); int* lowerIntensityBoundsDev = NULL; cudaMalloc((void**)&lowerIntensityBoundsDev, numLabels*sizeof(int)); cudaMemcpyAsync(lowerIntensityBoundsDev, lowerIntensityBounds, numLabels*sizeof(int), cudaMemcpyHostToDevice); int* upperIntensityBoundsDev = NULL; cudaMalloc((void**)&upperIntensityBoundsDev, numLabels*sizeof(int)); cudaMemcpyAsync(upperIntensityBoundsDev, upperIntensityBounds, numLabels*sizeof(int), cudaMemcpyHostToDevice); // Allocate arrays for speed and phi in GPU memory signed char* speedDev = NULL; signed char* phiDev = NULL; cudaMalloc((void**)&speedDev, numRepetitions*numLabels*SIZE); cudaMalloc((void**)&phiDev, numRepetitions*numLabels*SIZE); cudaDeviceSynchronize(); // Start the segmentation timer cudaEventRecord(startTime2, 0); // Launch kernel to begin image segmenation for(int i=0; i<numRepetitions; i++) { evolveContour<<<1, numLabels>>>(intensityDev, labelsDev, speedDev, phiDev, HEIGHT, WIDTH, targetLabelsDev, i, numLabels, lowerIntensityBoundsDev, upperIntensityBoundsDev); } cudaDeviceSynchronize(); // Stop the segmentation timer cudaEventRecord(stopTime2, 0); // Retrieve results from the GPU signed char* phi = new signed char[numRepetitions*numLabels*HEIGHT*WIDTH]; cudaMemcpy(phi, phiDev, numRepetitions*numLabels*SIZE, cudaMemcpyDeviceToHost); // Stop the runtime timer cudaEventRecord(stopTime1, 0); // Caio: Output: DEV FILE *fout; fout = fopen(outputFile, "wb"); if (!fout) { printf("Could not open output file. %s\n", outputFile); exit(0); } fwrite(phi, numRepetitions*numLabels*SIZE, 1, fout); fclose(fout); printf("GOLD written to file.\n"); // Stop runtime timer and print times cudaEventElapsedTime(&elapsedTime1, startTime1, stopTime1); cudaEventElapsedTime(&elapsedTime2, startTime2, stopTime2); cout << "Computation time: " << setprecision(6) << elapsedTime2 << " ms"<< endl; cout << "Total time: " << setprecision(6) << elapsedTime1 << " ms"<< endl; // Free resources and end the program cudaEventDestroy(startTime1); cudaEventDestroy(stopTime1); cudaEventDestroy(startTime2); cudaEventDestroy(stopTime2); cudaFree(intensityDev); cudaFree(labelsDev); cudaFree(speedDev); cudaFree(phiDev); cudaFree(targetLabelsDev); cudaFree(lowerIntensityBoundsDev); cudaFree(upperIntensityBoundsDev); return 0; } image<unsigned char>* loadPGM(const char* name) { char buf[BUF_SIZE]; // Read header ifstream file(name, ios::in | ios::binary); readPNM(file, buf); if(strncmp(buf, "P5", 2)) { cerr << "Unable to open '" << name << "'." << endl; throw errorPNM(); } readPNM(file, buf); int width = atoi(buf); readPNM(file, buf); int height = atoi(buf); readPNM(file, buf); if(atoi(buf) > UCHAR_MAX) { cerr << "Unable to open '" << name << "'." << endl; throw errorPNM(); } // Read data image<unsigned char>* im = new image<unsigned char>(width, height); file.read((char*)imPtr(im, 0, 0), width*height*sizeof(unsigned char)); return im; } void readPNM(ifstream &file, char* buf) { char doc[BUF_SIZE]; char c; file >> c; while (c == '#') { file.getline(doc, BUF_SIZE); file >> c; } file.putback(c); file.width(BUF_SIZE); file >> buf; file.ignore(); } void savePPM(image<Color>* im, const char* name) { int width = im->width(); int height = im->height(); ofstream file(name, ios::out | ios::binary); file << "P6\n" << width << " " << height << "\n" << UCHAR_MAX << "\n"; file.write((char*)imPtr(im, 0, 0), width*height*sizeof(Color)); } Color randomColor() { Color c; c.r = (unsigned char) rand(); c.g = (unsigned char) rand(); c.b = (unsigned char) rand(); return c; } __global__ void evolveContour(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int* targetLabels, int kernelID, int numLabels, int* lowerIntensityBounds, int* upperIntensityBounds) { int tid = threadIdx.x; intensity = &intensity[kernelID*HEIGHT*WIDTH]; labels = &labels[kernelID*HEIGHT*WIDTH]; speed = &speed[(kernelID*numLabels+tid)*HEIGHT*WIDTH]; phi = &phi[(kernelID*numLabels+tid)*HEIGHT*WIDTH]; dim3 dimGrid(WIDTH/30+1, HEIGHT/30+1); dim3 dimBlock(32, 32); initSpeedPhi<<<dimGrid, dimBlock>>>(intensity, labels, speed, phi, HEIGHT, WIDTH, targetLabels[tid], lowerIntensityBounds[tid], upperIntensityBounds[tid]); int numIterations = 0; stopCondition[tid] = 1; while(stopCondition[tid]) { stopCondition[tid] = 0; numIterations++; dimGrid.x = WIDTH/30+1; dimGrid.y = HEIGHT/30+1; // Outward evolution switchIn<<<dimGrid, dimBlock>>>(speed, phi, HEIGHT, WIDTH); // Inward evolution switchOut<<<dimGrid, dimBlock>>>(speed, phi, HEIGHT, WIDTH); // Check stopping condition on every third iteration if(numIterations % 3 == 0) { dimGrid.x = WIDTH/32+1; dimGrid.y = HEIGHT/32+1; checkStopCondition<<<dimGrid, dimBlock>>>(speed, phi, tid, HEIGHT, WIDTH); cudaDeviceSynchronize(); } else stopCondition[tid] = 1; if(stopCondition[tid] == 0) printf("Target label %d (intensities: %d-%d) converged in %d iterations.\n", targetLabels[tid], lowerIntensityBounds[tid], upperIntensityBounds[tid], numIterations); } } __global__ void initSpeedPhi(unsigned char* intensity, unsigned char* labels, signed char* speed, signed char* phi, int HEIGHT, int WIDTH, int targetLabel, int lowerIntensityBound, int upperIntensityBound) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int intensityReg; int speedReg; int phiReg; __shared__ int labelsTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { labelsTile[ty][tx] = labels[yPos*WIDTH+xPos]; intensityReg = intensity[yPos*WIDTH+xPos]; } // Initialization if(tx > 0 && tx < 31 && ty > 0 && ty < 31 && xPos < WIDTH-1 && yPos < HEIGHT-1) { // Phi if(labelsTile[ty][tx] != targetLabel) { if(labelsTile[ty][tx-1] != targetLabel && labelsTile[ty][tx+1] != targetLabel && labelsTile[ty-1][tx] != targetLabel && labelsTile[ty+1][tx] != targetLabel) phiReg = 3; else phiReg = 1; } else { if(labelsTile[ty][tx-1] != targetLabel || labelsTile[ty][tx+1] != targetLabel || labelsTile[ty-1][tx] != targetLabel || labelsTile[ty+1][tx] != targetLabel) phiReg = -1; else phiReg = -3; } // Speed if(intensityReg >= lowerIntensityBound && intensityReg <= upperIntensityBound) speedReg = 1; else speedReg = -1; // Load data back into global memory speed[yPos*WIDTH+xPos] = speedReg; phi[yPos*WIDTH+xPos] = phiReg; } } __global__ void switchIn(signed char* speed, signed char* phi, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int speedReg; __shared__ int phiTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiTile[ty][tx] = phi[yPos*WIDTH+xPos]; } if(xPos > 0 && xPos < WIDTH-1 && yPos > 0 && yPos < HEIGHT-1) { // Delete points from Lout and add them to Lin if(phiTile[ty][tx] == 1 && speedReg > 0) phiTile[ty][tx] = -1; if(tx > 0 && tx < 31 && ty > 0 && ty < 31) { // Update neighborhood if(phiTile[ty][tx] == 3) { if(phiTile[ty][tx-1] == -1 || phiTile[ty][tx+1] == -1 || phiTile[ty-1][tx] == -1 || phiTile[ty+1][tx] == -1) phiTile[ty][tx] = 1; } // Eliminate redundant points in Lin if(phiTile[ty][tx] == -1) { if(phiTile[ty][tx-1] < 0 && phiTile[ty][tx+1] < 0 && phiTile[ty-1][tx] < 0 && phiTile[ty+1][tx] < 0) phiTile[ty][tx] = -3; } // Load data back into global memory phi[yPos*WIDTH+xPos] = phiTile[ty][tx]; } } } __global__ void switchOut(signed char* speed, signed char* phi, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 30*bx + tx; int yPos = 30*by + ty; int speedReg; __shared__ int phiTile[32][32]; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiTile[ty][tx] = phi[yPos*WIDTH+xPos]; } if(xPos > 0 && xPos < WIDTH-1 && yPos > 0 && yPos < HEIGHT-1) { // Delete points from Lin and add them to Lout if(phiTile[ty][tx] == -1 && speedReg < 0) phiTile[ty][tx] = 1; if(tx > 0 && tx < 31 && ty > 0 && ty < 31) { // Update neighborhood if(phiTile[ty][tx] == -3) { if(phiTile[ty][tx-1] == 1 || phiTile[ty][tx+1] == 1 || phiTile[ty-1][tx] == 1 || phiTile[ty+1][tx] == 1) phiTile[ty][tx] = -1; } // Eliminate redundant points if(phiTile[ty][tx] == 1) { if(phiTile[ty][tx-1] > 0 && phiTile[ty][tx+1] > 0 && phiTile[ty-1][tx] > 0 && phiTile[ty+1][tx] > 0) phiTile[ty][tx] = 3; } // Load data back into global memory phi[yPos*WIDTH+xPos] = phiTile[ty][tx]; } } } __global__ void checkStopCondition(signed char* speed, signed char* phi, int parentThreadID, int HEIGHT, int WIDTH) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int xPos = 32*bx + tx; int yPos = 32*by + ty; int speedReg; int phiReg; // Load data into shared memory and registers if(xPos < WIDTH && yPos < HEIGHT) { speedReg = speed[yPos*WIDTH+xPos]; phiReg = phi[yPos*WIDTH+xPos]; } // Falsify stop condition if criteria are not met if(phiReg == 1 && speedReg > 0) stopCondition[parentThreadID]=0; else if(phiReg == -1 && speedReg < 0) stopCondition[parentThreadID]=1; }
46db83cb539c1346bbcf9ca7c8f22dd6d22fd738.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } #define N 32 int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof( int ); /* allocate space for device copies of a, b, c */ hipMalloc( (void **) &d_a, size ); hipMalloc( (void **) &d_b, size ); hipMalloc( (void **) &d_c, size ); /* allocate space for host copies of a, b, c and setup input values */ a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for( int i = 0; i < N; i++ ) { a[i] = b[i] = i; c[i] = 0; } /* copy inputs to device */ hipMemcpy( d_a, a, size, hipMemcpyHostToDevice ); hipMemcpy( d_b, b, size, hipMemcpyHostToDevice ); /* launch the kernel on the GPU */ hipLaunchKernelGGL(( add), dim3(N), dim3(1) , 0, 0, d_a, d_b, d_c ); /* copy result back to host */ hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost ); for( int i = 0; i < N; i++ ) { printf("c[%d] = %d\n",i,c[i]); } /* end for */ /* clean up */ free(a); free(b); free(c); hipFree( d_a ); hipFree( d_b ); hipFree( d_c ); return 0; } /* end main */
46db83cb539c1346bbcf9ca7c8f22dd6d22fd738.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } #define N 32 int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof( int ); /* allocate space for device copies of a, b, c */ cudaMalloc( (void **) &d_a, size ); cudaMalloc( (void **) &d_b, size ); cudaMalloc( (void **) &d_c, size ); /* allocate space for host copies of a, b, c and setup input values */ a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for( int i = 0; i < N; i++ ) { a[i] = b[i] = i; c[i] = 0; } /* copy inputs to device */ cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice ); cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice ); /* launch the kernel on the GPU */ add<<< N, 1 >>>( d_a, d_b, d_c ); /* copy result back to host */ cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost ); for( int i = 0; i < N; i++ ) { printf("c[%d] = %d\n",i,c[i]); } /* end for */ /* clean up */ free(a); free(b); free(c); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); return 0; } /* end main */
cca5b61ae760f850b502fd37d7c6d51dc3b242c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/NativeFunctions.h> #include <ATen/NumericUtils.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorCompare.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/hip/HIPApplyUtils.cuh> namespace at { namespace native { namespace { void where_kernel_impl(TensorIterator &iter, ScalarType condition_type) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.dtype(), "where_cuda", [&] { if (condition_type == at::ScalarType::Byte) { gpu_kernel( iter, [=] GPU_LAMBDA (uint8_t cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t { return cond_val ? self_val : other_val; }); } else { gpu_kernel( iter, [=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t { return cond_val ? self_val : other_val; }); } }); } void isposinf_kernel_impl(TensorIterator &iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA (scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); } ); }); } void isneginf_kernel_impl(TensorIterator &iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA (scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); } ); }); } void clamp_kernel_impl(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_cuda", [&] { gpu_kernel(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower, scalar_t upper) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (at::_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_scalar_kernel_impl(TensorIterator& iter, Scalar min, Scalar max) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_scalar_cuda", [&] { const auto lower = min.to<scalar_t>(); const auto upper = max.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (at::_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_min_kernel_impl(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_cuda", [&] { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_min_scalar_kernel_impl(TensorIterator& iter, Scalar min) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_scalar_cuda", [&] { auto lower = min.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_max_kernel_impl(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_cuda", [&] { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t upper) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } void clamp_max_scalar_kernel_impl(TensorIterator& iter, Scalar max) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_scalar_cuda", [&] { const auto upper = max.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } // Composite op implementation for simplicity. This materializes the cross product of elements and test elements, // so it is not very memory efficient, but it is fast on CUDA. void isin_default_kernel_gpu(const Tensor& elements, const Tensor& test_elements, bool invert, const Tensor& out) { std::vector<int64_t> bc_shape(elements.dim(), 1); bc_shape.push_back(-1); out.copy_(invert ? elements.unsqueeze(-1).ne(test_elements.view(bc_shape)).all(-1) : elements.unsqueeze(-1).eq(test_elements.view(bc_shape)).any(-1)); } } // anonymous namespace REGISTER_DISPATCH(where_kernel, &where_kernel_impl); REGISTER_DISPATCH(isposinf_stub, &isposinf_kernel_impl); REGISTER_DISPATCH(isneginf_stub, &isneginf_kernel_impl); REGISTER_DISPATCH(clamp_stub, &clamp_kernel_impl); REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_impl); REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_impl); REGISTER_DISPATCH(clamp_scalar_stub, &clamp_scalar_kernel_impl); REGISTER_DISPATCH(clamp_min_scalar_stub, &clamp_min_scalar_kernel_impl); REGISTER_DISPATCH(clamp_max_scalar_stub, &clamp_max_scalar_kernel_impl); REGISTER_DISPATCH(isin_default_stub, &isin_default_kernel_gpu); template <typename scalar_t> __global__ void _assert_async_cuda_kernel(scalar_t* input) { CUDA_KERNEL_ASSERT(input[0] != 0); } __global__ void _assert_async_cuda_kernel(c10::complex<float>* input) { CUDA_KERNEL_ASSERT(input[0] != c10::complex<float>(0, 0)); } __global__ void _assert_async_cuda_kernel(c10::complex<double>* input) { CUDA_KERNEL_ASSERT(input[0] != c10::complex<double>(0, 0)); } void _assert_async_cuda(const Tensor& self) { auto n = self.numel(); TORCH_CHECK(n != 0, "Boolean value of Tensor with no values is ambiguous"); TORCH_CHECK(n < 2, "Boolean value of Tensor with more than one value is ambiguous"); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "_assert_async_cuda", [&] { hipLaunchKernelGGL(( _assert_async_cuda_kernel), dim3(1), dim3(1), 0, stream, self.data_ptr<scalar_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } }} // namespace at::native
cca5b61ae760f850b502fd37d7c6d51dc3b242c6.cu
#include <ATen/NativeFunctions.h> #include <ATen/NumericUtils.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorCompare.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/cuda/CUDAApplyUtils.cuh> namespace at { namespace native { namespace { void where_kernel_impl(TensorIterator &iter, ScalarType condition_type) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.dtype(), "where_cuda", [&] { if (condition_type == at::ScalarType::Byte) { gpu_kernel( iter, [=] GPU_LAMBDA (uint8_t cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t { return cond_val ? self_val : other_val; }); } else { gpu_kernel( iter, [=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t { return cond_val ? self_val : other_val; }); } }); } void isposinf_kernel_impl(TensorIterator &iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA (scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); } ); }); } void isneginf_kernel_impl(TensorIterator &iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA (scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); } ); }); } void clamp_kernel_impl(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_cuda", [&] { gpu_kernel(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower, scalar_t upper) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (at::_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_scalar_kernel_impl(TensorIterator& iter, Scalar min, Scalar max) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_scalar_cuda", [&] { const auto lower = min.to<scalar_t>(); const auto upper = max.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (at::_isnan(v)) { return v; } else { return ::min(::max(v, lower), upper); } }); }); } void clamp_min_kernel_impl(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_cuda", [&] { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_min_scalar_kernel_impl(TensorIterator& iter, Scalar min) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_scalar_cuda", [&] { auto lower = min.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::max(v, lower); } }); }); } void clamp_max_kernel_impl(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_cuda", [&] { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t upper) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } void clamp_max_scalar_kernel_impl(TensorIterator& iter, Scalar max) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_scalar_cuda", [&] { const auto upper = max.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t { // Propagate nan, which doesn't propagate automatically for ROCm if (_isnan(v)) { return v; } else { return ::min(v, upper); } }); }); } // Composite op implementation for simplicity. This materializes the cross product of elements and test elements, // so it is not very memory efficient, but it is fast on CUDA. void isin_default_kernel_gpu(const Tensor& elements, const Tensor& test_elements, bool invert, const Tensor& out) { std::vector<int64_t> bc_shape(elements.dim(), 1); bc_shape.push_back(-1); out.copy_(invert ? elements.unsqueeze(-1).ne(test_elements.view(bc_shape)).all(-1) : elements.unsqueeze(-1).eq(test_elements.view(bc_shape)).any(-1)); } } // anonymous namespace REGISTER_DISPATCH(where_kernel, &where_kernel_impl); REGISTER_DISPATCH(isposinf_stub, &isposinf_kernel_impl); REGISTER_DISPATCH(isneginf_stub, &isneginf_kernel_impl); REGISTER_DISPATCH(clamp_stub, &clamp_kernel_impl); REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_impl); REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_impl); REGISTER_DISPATCH(clamp_scalar_stub, &clamp_scalar_kernel_impl); REGISTER_DISPATCH(clamp_min_scalar_stub, &clamp_min_scalar_kernel_impl); REGISTER_DISPATCH(clamp_max_scalar_stub, &clamp_max_scalar_kernel_impl); REGISTER_DISPATCH(isin_default_stub, &isin_default_kernel_gpu); template <typename scalar_t> __global__ void _assert_async_cuda_kernel(scalar_t* input) { CUDA_KERNEL_ASSERT(input[0] != 0); } __global__ void _assert_async_cuda_kernel(c10::complex<float>* input) { CUDA_KERNEL_ASSERT(input[0] != c10::complex<float>(0, 0)); } __global__ void _assert_async_cuda_kernel(c10::complex<double>* input) { CUDA_KERNEL_ASSERT(input[0] != c10::complex<double>(0, 0)); } void _assert_async_cuda(const Tensor& self) { auto n = self.numel(); TORCH_CHECK(n != 0, "Boolean value of Tensor with no values is ambiguous"); TORCH_CHECK(n < 2, "Boolean value of Tensor with more than one value is ambiguous"); auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "_assert_async_cuda", [&] { _assert_async_cuda_kernel<<<1, 1, 0, stream>>>(self.data_ptr<scalar_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } }} // namespace at::native
9d079f3c68625374c3f8183b9b487644bb5011c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dcn_v2_im2col_cuda.h" #include <cstdio> #include <algorithm> #include <cstring> const int CUDA_NUM_THREADS = 512; // for jetson TX2: 512 dim3 GET_BLOCKS(uint n) { uint k = (n - 1) /CUDA_NUM_THREADS + 1; uint x = k ; uint y = 1 ; if (x > 65535 ) { x = ceil(sqrt(x)); y = (n - 1 )/(x*CUDA_NUM_THREADS) + 1; } dim3 d = {x,y,1} ; return d; } __device__ float dmcn_im2col_bilinear(const float *bottom_data, const int data_width, const int height, const int width, float h, float w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; float lh = h - h_low; float lw = w - w_low; float hh = 1 - lh, hw = 1 - lw; float v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; float v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; float v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; float v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __global__ void modulated_deformable_im2col_gpu_kernel(const int n, const float *data_im, const float *data_offset, const float *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, float *data_col) { int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; float val = static_cast<float>(0); const float h_im = h_in + i * dilation_h + offset_h; const float w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } void modulated_deformable_im2col_cuda(hipStream_t stream, const float* data_im, const float* data_offset, const float* data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel) , dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err)); } }
9d079f3c68625374c3f8183b9b487644bb5011c3.cu
#include "dcn_v2_im2col_cuda.h" #include <cstdio> #include <algorithm> #include <cstring> const int CUDA_NUM_THREADS = 512; // for jetson TX2: 512 dim3 GET_BLOCKS(uint n) { uint k = (n - 1) /CUDA_NUM_THREADS + 1; uint x = k ; uint y = 1 ; if (x > 65535 ) { x = ceil(sqrt(x)); y = (n - 1 )/(x*CUDA_NUM_THREADS) + 1; } dim3 d = {x,y,1} ; return d; } __device__ float dmcn_im2col_bilinear(const float *bottom_data, const int data_width, const int height, const int width, float h, float w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; float lh = h - h_low; float lw = w - w_low; float hh = 1 - lh, hw = 1 - lw; float v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; float v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; float v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; float v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __global__ void modulated_deformable_im2col_gpu_kernel(const int n, const float *data_im, const float *data_offset, const float *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, float *data_col) { int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; float val = static_cast<float>(0); const float h_im = h_in + i * dilation_h + offset_h; const float w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } void modulated_deformable_im2col_cuda(cudaStream_t stream, const float* data_im, const float* data_offset, const float* data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; modulated_deformable_im2col_gpu_kernel <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } }
888af95025f28635d5acfd1811a8d24b01da822a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************** */ /* Wilson fermion solver in C language */ /* */ /* OpenACC benchmark [5 May 0216 H.Matsufuru] */ /* */ /* Copyright(c) Hideo Matsufuru 2016 */ /* ****************************************************** */ #include "lattice.h" int left, right, up, down; real_t *vt; real_t *tmp_QCDSpinor_s, *tmp_QCDSpinor_r; real_t *tmp_QCDMatrix_s, *tmp_QCDMatrix_r; MPI_Request req_u[8], req_mat[2], req_w[4][4], req_spr[4], req_vt[4]; real_t *p, *x; static real_t u[NDF*NST2*4]; static real_t corr[LT]; #ifdef _PROF double prof_t[PROF_NUMS]; #endif MPI_Comm comm_ud, comm_lr; double dtime() { struct timeval tv; gettimeofday(&tv, NULL); return ((double)(tv.tv_sec) + (double)(tv.tv_usec) * 1.0e-6); } static void create_newcomm(const int pt, const int pz, const int me) { /* 0 4 1 5 2 6 3 7 PT = 2, PZ = 4 */ MPI_Comm_split(MPI_COMM_WORLD, me%pz, me/pz, &comm_lr); // color, key MPI_Comm_split(MPI_COMM_WORLD, me/pz, me%pz, &comm_ud); // color, key } static void uinit(const int me, const int pz, real_t *u) { int Nx2 = 8; int Ny2 = 8; int Nz2 = 8; int Nt2 = 16; int Nst2 = Nx2 * Ny2 * Nz2 * Nt2; FILE *fp; fp = fopen("conf_08080816.txt","r"); double *ur = (double*)malloc(sizeof(double) * NDF * 4 * Nst2); for(int ist = 0; ist < Nst2; ist++){ for(int idir = 0; idir < 4; idir++){ for(int idf = 0; idf < NDF; idf++){ int i = idf + ist*NDF + idir*NDF*Nst2; int ret = fscanf(fp, "%lf", &ur[i]); if(!ret){ fprintf(stderr, "Read Error!\n"); MPI_Finalize(); exit(0); } } } } fclose(fp); int idir, it, iz, iy, ix; for(idir = 0; idir < 4; idir++){ for(it = 1; it < LT2-1; it++){ for(iz = 1; iz < LZ2-1; iz++){ for(iy = 0; iy < NY; iy++){ for(ix = 0; ix < NX; ix++){ int ist = ix + NX*(iy + NY*(iz + LZ2*it)); int ix2 = ix % Nx2; int iy2 = iy % Ny2; int iz2 = ((iz-1)+((me%pz)*(LZ2-2))) % Nz2; int it2 = ((it-1)+((me/pz)*(LT2-2))) % Nt2; int ist2 = ix2 + Nx2*(iy2 + Ny2*(iz2 + Nz2*it2)); for(int idf = 0; idf < NDF; idf++){ int i = idf + NDF*(ist + idir*NX*NY*LZ2*LT2); int i2 = idf + NDF*(ist2 + idir*Nst2); u[i] = (real_t)ur[i2]; } } } } } } free(ur); } __device__ static void setconst(real_t *v, const real_t a) { int i = IDXV(threadIdx.x, blockIdx.x, blockDim.x); while(i < (LT2-2)*(LZ2-2)*yx_Spinor){ int t = i / ((LZ2-2)*yx_Spinor); int z = (i - t * (LZ2-2)*yx_Spinor)/yx_Spinor; // (i % ((LZ2-2)*yx_Spinor)) / yx_Spinor; int offset = i % yx_Spinor; v[(t+1)*LZ2*yx_Spinor + (z+1)*yx_Spinor + offset] = a; i += blockDim.x * gridDim.x; } } __global__ static void set_src(const int me, const int ic, const int id, const int ix, const int iy, const int iz, const int it, real_t *v) { setconst(v, 0.0); if(me == 0){ // fix me if(threadIdx.x == 0 && blockIdx.x == 0){ int i = 2*ic + id*NVC + NVC*ND*(ix + iy*NX + (iz+1)*NX*NY + (it+1)*NX*NY*LZ2); v[i] = 1.0; } } } static void test_mult(const int me, real_t *u) { int nrepeat = 100; real_t *bq2, *xq2; HANDLE_ERROR( hipMalloc( (void**)&bq2, NVST2*sizeof(real_t) ) ); HANDLE_ERROR( hipMalloc( (void**)&xq2, NVST2*sizeof(real_t) ) ); hipLaunchKernelGGL(( set_src) , dim3(NUM_GANGS), dim3(VECTOR_LENGTH) , 0, 0, me, 0, 0, 0, 0, 0, 0, bq2); int QCDSpinor_zyxvec = LZ * yx_Spinor; MPI_Recv_init(bq2 + ((LT2-1)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 10, comm_lr, &req_w[2][0]); MPI_Recv_init(bq2 + yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 11, comm_lr, &req_w[2][1]); MPI_Send_init(bq2 + (LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 10, comm_lr, &req_w[2][2]); MPI_Send_init(bq2 + ((LT2-2)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 11, comm_lr, &req_w[2][3]); MPI_Recv_init(xq2 + ((LT2-1)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 10, comm_lr, &req_w[3][0]); MPI_Recv_init(xq2 + yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 11, comm_lr, &req_w[3][1]); MPI_Send_init(xq2 + (LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 10, comm_lr, &req_w[3][2]); MPI_Send_init(xq2 + ((LT2-2)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 11, comm_lr, &req_w[3][3]); MPI_Barrier(MPI_COMM_WORLD); double time0 = dtime(); for(int i=0; i<nrepeat; i++){ opr_DdagD_alt(xq2, u, bq2, 2); opr_DdagD_alt(bq2, u, xq2, 3); } MPI_Barrier(MPI_COMM_WORLD); double time_tot = dtime() - time0; double fop_mult1 = 2.0 * 1392.0 * (double)(NST); double fop_mult = (double)nrepeat * 2.0 * fop_mult1; if(me == 0){ printf("\nperformance of mult on Host:\n"); printf(" elapsed time for solver = %f\n", time_tot); printf(" floating point operations = %f\n", fop_mult); printf(" performance of mult = %f GFlops\n", fop_mult/time_tot * 1.0e-9); } HANDLE_ERROR( hipFree(bq2) ); HANDLE_ERROR( hipFree(xq2) ); } int main(int argc, char *argv[]) { real_t enorm = 1.E-16; real_t diff; int nconv; int namelen, me, nprocs; char processor_name[MPI_MAX_PROCESSOR_NAME]; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &me); MPI_Get_processor_name(processor_name, &namelen); printf("Process %d of %d is on %s\n", me, nprocs, processor_name); // hipSetDevice(me%NGPUS); HANDLE_ERROR( hipMalloc((void**)&vt, NVST2*sizeof(real_t)) ); HANDLE_ERROR( hipMalloc((void**)&tmp_QCDSpinor_s, 2*LT*yx_Spinor * sizeof(real_t)) ); HANDLE_ERROR( hipMalloc((void**)&tmp_QCDSpinor_r, 2*LT*yx_Spinor * sizeof(real_t)) ); HANDLE_ERROR( hipMalloc((void**)&tmp_QCDMatrix_s, 4*LT*yx_Matrix * sizeof(real_t)) ); HANDLE_ERROR( hipMalloc((void**)&tmp_QCDMatrix_r, 4*LT*yx_Matrix * sizeof(real_t)) ); if(me == 0){ printf("Simple Wilson solver\n\n"); printf("NX = %3d, NY = %3d, NZ = %3d, NT = %3d\n", NX, NY, NZ, NT); printf("LX = %3d, LY = %3d, LZ = %3d, LT = %3d\n", NX, NY, LZ, LT); printf("(PT x PZ) = (%d x %d)\n", PT, PZ); printf("CKs = %10.6f\n", CKs); printf("enorm = %12.4e\n", enorm); printf("NUM=%d LEN=%d\n", NUM_GANGS, VECTOR_LENGTH); } for(int it = 0; it < LT; it++) corr[it] = 0.0; real_t *u_dev, *xq_dev, *bq_dev; HANDLE_ERROR( hipMalloc( (void**)&u_dev, 4*LT2*LZ2*yx_Matrix*sizeof(real_t) ) ); HANDLE_ERROR( hipMalloc( (void**)&xq_dev, NVST2*sizeof(real_t) ) ); HANDLE_ERROR( hipMalloc( (void**)&bq_dev, NVST2*sizeof(real_t) ) ); create_newcomm(PT, PZ, me); create_cart(PT, PZ, me); int QCDSpinor_zyxvec = LZ * yx_Spinor; int QCDMatrix_zyxvec = LZ * yx_Matrix; int QCDSpinor_tyxvec = LT * yx_Spinor; int QCDMatrix_tyxvec = 4*LT*yx_Matrix; HANDLE_ERROR( hipMalloc((void**)&x, NVST2*sizeof(real_t)) ); HANDLE_ERROR( hipMalloc((void**)&p, NVST2*sizeof(real_t)) ); // Sleeve exchange for(int i=0;i<4;i++){ MPI_Recv_init(u_dev + (i*LT2*LZ2 + 1)*yx_Matrix, QCDMatrix_zyxvec, MPI_DOUBLE, left, i, comm_lr, &req_u[i*2]); MPI_Send_init(u_dev + (i*LT2*LZ2 + (LT2-2)*LZ2 + 1)*yx_Matrix, QCDMatrix_zyxvec, MPI_DOUBLE, right, i, comm_lr, &req_u[1+i*2]); } MPI_Recv_init(tmp_QCDMatrix_r, QCDMatrix_tyxvec, MPI_DOUBLE, up, 5, comm_ud, &req_mat[0]); MPI_Send_init(tmp_QCDMatrix_s, QCDMatrix_tyxvec, MPI_DOUBLE, down, 5, comm_ud, &req_mat[1]); MPI_Recv_init(x + ((LT2-1)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 10, comm_lr, &req_w[0][0]); MPI_Recv_init(x + yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 11, comm_lr, &req_w[0][1]); MPI_Send_init(x + (LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 10, comm_lr, &req_w[0][2]); MPI_Send_init(x + ((LT2-2)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 11, comm_lr, &req_w[0][3]); MPI_Recv_init(p + ((LT2-1)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 10, comm_lr, &req_w[1][0]); MPI_Recv_init(p + yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 11, comm_lr, &req_w[1][1]); MPI_Send_init(p + (LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 10, comm_lr, &req_w[1][2]); MPI_Send_init(p + ((LT2-2)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 11, comm_lr, &req_w[1][3]); MPI_Recv_init(tmp_QCDSpinor_r + LT*yx_Spinor, QCDSpinor_tyxvec, MPI_DOUBLE, down, 12, comm_ud, &req_spr[0]); MPI_Recv_init(tmp_QCDSpinor_r, QCDSpinor_tyxvec, MPI_DOUBLE, up, 13, comm_ud, &req_spr[1]); MPI_Send_init(tmp_QCDSpinor_s, QCDSpinor_tyxvec, MPI_DOUBLE, up, 12, comm_ud, &req_spr[2]); MPI_Send_init(tmp_QCDSpinor_s + LT*yx_Spinor, QCDSpinor_tyxvec, MPI_DOUBLE, down, 13, comm_ud, &req_spr[3]); MPI_Recv_init(vt + ((LT2-1)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 10, comm_lr, &req_vt[0]); MPI_Recv_init(vt + yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 11, comm_lr, &req_vt[1]); MPI_Send_init(vt + (LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 10, comm_lr, &req_vt[2]); MPI_Send_init(vt + ((LT2-2)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 11, comm_lr, &req_vt[3]); uinit(me, PZ, u); HANDLE_ERROR( hipMemcpy(u_dev, u, 4*LT2*LZ2*yx_Matrix*sizeof(real_t), hipMemcpyHostToDevice) ); test_mult(me, u_dev); if(me == 0){ printf("Solver:\n"); printf(" ic id nconv diff\n"); } double time_tot = 0.0; double fop_tot = 0.0; for(int ic = 0; ic < NCOL; ic++){ for(int id = 0; id < ND; id++){ hipLaunchKernelGGL(( set_src), dim3(NUM_GANGS),dim3(VECTOR_LENGTH) , 0, 0, me, ic, id, 0, 0, 0, 0, bq_dev); MPI_Barrier(MPI_COMM_WORLD); double time0 = dtime(); solve_CG(enorm, &nconv, &diff, xq_dev, u_dev, bq_dev); MPI_Barrier(MPI_COMM_WORLD); double time1 = dtime(); time_tot += time1 - time0; if(me == 0) printf(" %3d %3d %6d %12.4e\n", ic, id, nconv, diff); double fop_mult1 = 2.0 * 1392.0 * (double)(NST); double fop_mult = (double)(nconv+2) * fop_mult1; double fop_lin = (double)(4+(nconv+1)*11) * (double)(NVST); fop_tot += fop_lin + fop_mult; norm2_t(corr, xq_dev); } } real_t corr2[NT]; if(PZ != 1) MPI_Allreduce(MPI_IN_PLACE, corr, LT, MPI_DOUBLE, MPI_SUM, comm_ud); if(PT != 1) MPI_Allgather(corr, LT, MPI_DOUBLE, corr2, LT, MPI_DOUBLE, comm_lr); else memcpy(corr2, corr, sizeof(real_t)*LT); if(me == 0){ printf("\nperformance of solver:\n"); printf(" elapsed time for solver = %f\n", time_tot); printf(" floating point operations = %f\n", fop_tot); printf(" performance of solver = %f GFlops\n", fop_tot/time_tot * 1.0e-9); printf("\nsolution squared at each time slice:\n"); for(int it = 0; it < NT; it++) printf(" %6d %16.8e\n", it, corr2[it]); } #ifdef _PROF double prof_t_max[PROF_NUMS], prof_t_min[PROF_NUMS], prof_t_ave[PROF_NUMS]; MPI_Allreduce(prof_t, prof_t_max, PROF_NUMS, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(prof_t, prof_t_min, PROF_NUMS, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); MPI_Allreduce(prof_t, prof_t_ave, PROF_NUMS, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); for(int i=0;i<PROF_NUMS;i++) prof_t_ave[i] /= nprocs; if(me == 0) { printf("MAX: PACK %f COMM %f OPR %f COPY %f AXPY %f NORM %f DOT %f SCAL %f\n", prof_t_max[PACK], prof_t_max[COMM], prof_t_max[OPR], prof_t_max[COPY], prof_t_max[AXPY], prof_t_max[NORM], prof_t_max[DOT], prof_t_max[SCAL]); printf("MIN: PACK %f COMM %f OPR %f COPY %f AXPY %f NORM %f DOT %f SCAL %f\n", prof_t_min[PACK], prof_t_min[COMM], prof_t_min[OPR], prof_t_min[COPY], prof_t_min[AXPY], prof_t_min[NORM], prof_t_min[DOT], prof_t_min[SCAL]); printf("AVE: PACK %f COMM %f OPR %f COPY %f AXPY %f NORM %f DOT %f SCAL %f\n", prof_t_ave[PACK], prof_t_ave[COMM], prof_t_ave[OPR], prof_t_ave[COPY], prof_t_ave[AXPY], prof_t_ave[NORM], prof_t_ave[DOT], prof_t_ave[SCAL]); } #endif MPI_Finalize(); return 0; }
888af95025f28635d5acfd1811a8d24b01da822a.cu
/* ****************************************************** */ /* Wilson fermion solver in C language */ /* */ /* OpenACC benchmark [5 May 0216 H.Matsufuru] */ /* */ /* Copyright(c) Hideo Matsufuru 2016 */ /* ****************************************************** */ #include "lattice.h" int left, right, up, down; real_t *vt; real_t *tmp_QCDSpinor_s, *tmp_QCDSpinor_r; real_t *tmp_QCDMatrix_s, *tmp_QCDMatrix_r; MPI_Request req_u[8], req_mat[2], req_w[4][4], req_spr[4], req_vt[4]; real_t *p, *x; static real_t u[NDF*NST2*4]; static real_t corr[LT]; #ifdef _PROF double prof_t[PROF_NUMS]; #endif MPI_Comm comm_ud, comm_lr; double dtime() { struct timeval tv; gettimeofday(&tv, NULL); return ((double)(tv.tv_sec) + (double)(tv.tv_usec) * 1.0e-6); } static void create_newcomm(const int pt, const int pz, const int me) { /* 0 4 1 5 2 6 3 7 PT = 2, PZ = 4 */ MPI_Comm_split(MPI_COMM_WORLD, me%pz, me/pz, &comm_lr); // color, key MPI_Comm_split(MPI_COMM_WORLD, me/pz, me%pz, &comm_ud); // color, key } static void uinit(const int me, const int pz, real_t *u) { int Nx2 = 8; int Ny2 = 8; int Nz2 = 8; int Nt2 = 16; int Nst2 = Nx2 * Ny2 * Nz2 * Nt2; FILE *fp; fp = fopen("conf_08080816.txt","r"); double *ur = (double*)malloc(sizeof(double) * NDF * 4 * Nst2); for(int ist = 0; ist < Nst2; ist++){ for(int idir = 0; idir < 4; idir++){ for(int idf = 0; idf < NDF; idf++){ int i = idf + ist*NDF + idir*NDF*Nst2; int ret = fscanf(fp, "%lf", &ur[i]); if(!ret){ fprintf(stderr, "Read Error!\n"); MPI_Finalize(); exit(0); } } } } fclose(fp); int idir, it, iz, iy, ix; for(idir = 0; idir < 4; idir++){ for(it = 1; it < LT2-1; it++){ for(iz = 1; iz < LZ2-1; iz++){ for(iy = 0; iy < NY; iy++){ for(ix = 0; ix < NX; ix++){ int ist = ix + NX*(iy + NY*(iz + LZ2*it)); int ix2 = ix % Nx2; int iy2 = iy % Ny2; int iz2 = ((iz-1)+((me%pz)*(LZ2-2))) % Nz2; int it2 = ((it-1)+((me/pz)*(LT2-2))) % Nt2; int ist2 = ix2 + Nx2*(iy2 + Ny2*(iz2 + Nz2*it2)); for(int idf = 0; idf < NDF; idf++){ int i = idf + NDF*(ist + idir*NX*NY*LZ2*LT2); int i2 = idf + NDF*(ist2 + idir*Nst2); u[i] = (real_t)ur[i2]; } } } } } } free(ur); } __device__ static void setconst(real_t *v, const real_t a) { int i = IDXV(threadIdx.x, blockIdx.x, blockDim.x); while(i < (LT2-2)*(LZ2-2)*yx_Spinor){ int t = i / ((LZ2-2)*yx_Spinor); int z = (i - t * (LZ2-2)*yx_Spinor)/yx_Spinor; // (i % ((LZ2-2)*yx_Spinor)) / yx_Spinor; int offset = i % yx_Spinor; v[(t+1)*LZ2*yx_Spinor + (z+1)*yx_Spinor + offset] = a; i += blockDim.x * gridDim.x; } } __global__ static void set_src(const int me, const int ic, const int id, const int ix, const int iy, const int iz, const int it, real_t *v) { setconst(v, 0.0); if(me == 0){ // fix me if(threadIdx.x == 0 && blockIdx.x == 0){ int i = 2*ic + id*NVC + NVC*ND*(ix + iy*NX + (iz+1)*NX*NY + (it+1)*NX*NY*LZ2); v[i] = 1.0; } } } static void test_mult(const int me, real_t *u) { int nrepeat = 100; real_t *bq2, *xq2; HANDLE_ERROR( cudaMalloc( (void**)&bq2, NVST2*sizeof(real_t) ) ); HANDLE_ERROR( cudaMalloc( (void**)&xq2, NVST2*sizeof(real_t) ) ); set_src <<< NUM_GANGS, VECTOR_LENGTH >>> (me, 0, 0, 0, 0, 0, 0, bq2); int QCDSpinor_zyxvec = LZ * yx_Spinor; MPI_Recv_init(bq2 + ((LT2-1)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 10, comm_lr, &req_w[2][0]); MPI_Recv_init(bq2 + yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 11, comm_lr, &req_w[2][1]); MPI_Send_init(bq2 + (LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 10, comm_lr, &req_w[2][2]); MPI_Send_init(bq2 + ((LT2-2)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 11, comm_lr, &req_w[2][3]); MPI_Recv_init(xq2 + ((LT2-1)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 10, comm_lr, &req_w[3][0]); MPI_Recv_init(xq2 + yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 11, comm_lr, &req_w[3][1]); MPI_Send_init(xq2 + (LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 10, comm_lr, &req_w[3][2]); MPI_Send_init(xq2 + ((LT2-2)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 11, comm_lr, &req_w[3][3]); MPI_Barrier(MPI_COMM_WORLD); double time0 = dtime(); for(int i=0; i<nrepeat; i++){ opr_DdagD_alt(xq2, u, bq2, 2); opr_DdagD_alt(bq2, u, xq2, 3); } MPI_Barrier(MPI_COMM_WORLD); double time_tot = dtime() - time0; double fop_mult1 = 2.0 * 1392.0 * (double)(NST); double fop_mult = (double)nrepeat * 2.0 * fop_mult1; if(me == 0){ printf("\nperformance of mult on Host:\n"); printf(" elapsed time for solver = %f\n", time_tot); printf(" floating point operations = %f\n", fop_mult); printf(" performance of mult = %f GFlops\n", fop_mult/time_tot * 1.0e-9); } HANDLE_ERROR( cudaFree(bq2) ); HANDLE_ERROR( cudaFree(xq2) ); } int main(int argc, char *argv[]) { real_t enorm = 1.E-16; real_t diff; int nconv; int namelen, me, nprocs; char processor_name[MPI_MAX_PROCESSOR_NAME]; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &me); MPI_Get_processor_name(processor_name, &namelen); printf("Process %d of %d is on %s\n", me, nprocs, processor_name); // cudaSetDevice(me%NGPUS); HANDLE_ERROR( cudaMalloc((void**)&vt, NVST2*sizeof(real_t)) ); HANDLE_ERROR( cudaMalloc((void**)&tmp_QCDSpinor_s, 2*LT*yx_Spinor * sizeof(real_t)) ); HANDLE_ERROR( cudaMalloc((void**)&tmp_QCDSpinor_r, 2*LT*yx_Spinor * sizeof(real_t)) ); HANDLE_ERROR( cudaMalloc((void**)&tmp_QCDMatrix_s, 4*LT*yx_Matrix * sizeof(real_t)) ); HANDLE_ERROR( cudaMalloc((void**)&tmp_QCDMatrix_r, 4*LT*yx_Matrix * sizeof(real_t)) ); if(me == 0){ printf("Simple Wilson solver\n\n"); printf("NX = %3d, NY = %3d, NZ = %3d, NT = %3d\n", NX, NY, NZ, NT); printf("LX = %3d, LY = %3d, LZ = %3d, LT = %3d\n", NX, NY, LZ, LT); printf("(PT x PZ) = (%d x %d)\n", PT, PZ); printf("CKs = %10.6f\n", CKs); printf("enorm = %12.4e\n", enorm); printf("NUM=%d LEN=%d\n", NUM_GANGS, VECTOR_LENGTH); } for(int it = 0; it < LT; it++) corr[it] = 0.0; real_t *u_dev, *xq_dev, *bq_dev; HANDLE_ERROR( cudaMalloc( (void**)&u_dev, 4*LT2*LZ2*yx_Matrix*sizeof(real_t) ) ); HANDLE_ERROR( cudaMalloc( (void**)&xq_dev, NVST2*sizeof(real_t) ) ); HANDLE_ERROR( cudaMalloc( (void**)&bq_dev, NVST2*sizeof(real_t) ) ); create_newcomm(PT, PZ, me); create_cart(PT, PZ, me); int QCDSpinor_zyxvec = LZ * yx_Spinor; int QCDMatrix_zyxvec = LZ * yx_Matrix; int QCDSpinor_tyxvec = LT * yx_Spinor; int QCDMatrix_tyxvec = 4*LT*yx_Matrix; HANDLE_ERROR( cudaMalloc((void**)&x, NVST2*sizeof(real_t)) ); HANDLE_ERROR( cudaMalloc((void**)&p, NVST2*sizeof(real_t)) ); // Sleeve exchange for(int i=0;i<4;i++){ MPI_Recv_init(u_dev + (i*LT2*LZ2 + 1)*yx_Matrix, QCDMatrix_zyxvec, MPI_DOUBLE, left, i, comm_lr, &req_u[i*2]); MPI_Send_init(u_dev + (i*LT2*LZ2 + (LT2-2)*LZ2 + 1)*yx_Matrix, QCDMatrix_zyxvec, MPI_DOUBLE, right, i, comm_lr, &req_u[1+i*2]); } MPI_Recv_init(tmp_QCDMatrix_r, QCDMatrix_tyxvec, MPI_DOUBLE, up, 5, comm_ud, &req_mat[0]); MPI_Send_init(tmp_QCDMatrix_s, QCDMatrix_tyxvec, MPI_DOUBLE, down, 5, comm_ud, &req_mat[1]); MPI_Recv_init(x + ((LT2-1)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 10, comm_lr, &req_w[0][0]); MPI_Recv_init(x + yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 11, comm_lr, &req_w[0][1]); MPI_Send_init(x + (LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 10, comm_lr, &req_w[0][2]); MPI_Send_init(x + ((LT2-2)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 11, comm_lr, &req_w[0][3]); MPI_Recv_init(p + ((LT2-1)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 10, comm_lr, &req_w[1][0]); MPI_Recv_init(p + yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 11, comm_lr, &req_w[1][1]); MPI_Send_init(p + (LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 10, comm_lr, &req_w[1][2]); MPI_Send_init(p + ((LT2-2)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 11, comm_lr, &req_w[1][3]); MPI_Recv_init(tmp_QCDSpinor_r + LT*yx_Spinor, QCDSpinor_tyxvec, MPI_DOUBLE, down, 12, comm_ud, &req_spr[0]); MPI_Recv_init(tmp_QCDSpinor_r, QCDSpinor_tyxvec, MPI_DOUBLE, up, 13, comm_ud, &req_spr[1]); MPI_Send_init(tmp_QCDSpinor_s, QCDSpinor_tyxvec, MPI_DOUBLE, up, 12, comm_ud, &req_spr[2]); MPI_Send_init(tmp_QCDSpinor_s + LT*yx_Spinor, QCDSpinor_tyxvec, MPI_DOUBLE, down, 13, comm_ud, &req_spr[3]); MPI_Recv_init(vt + ((LT2-1)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 10, comm_lr, &req_vt[0]); MPI_Recv_init(vt + yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 11, comm_lr, &req_vt[1]); MPI_Send_init(vt + (LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, left, 10, comm_lr, &req_vt[2]); MPI_Send_init(vt + ((LT2-2)*LZ2 + 1)*yx_Spinor, QCDSpinor_zyxvec, MPI_DOUBLE, right, 11, comm_lr, &req_vt[3]); uinit(me, PZ, u); HANDLE_ERROR( cudaMemcpy(u_dev, u, 4*LT2*LZ2*yx_Matrix*sizeof(real_t), cudaMemcpyHostToDevice) ); test_mult(me, u_dev); if(me == 0){ printf("Solver:\n"); printf(" ic id nconv diff\n"); } double time_tot = 0.0; double fop_tot = 0.0; for(int ic = 0; ic < NCOL; ic++){ for(int id = 0; id < ND; id++){ set_src<<< NUM_GANGS,VECTOR_LENGTH >>>(me, ic, id, 0, 0, 0, 0, bq_dev); MPI_Barrier(MPI_COMM_WORLD); double time0 = dtime(); solve_CG(enorm, &nconv, &diff, xq_dev, u_dev, bq_dev); MPI_Barrier(MPI_COMM_WORLD); double time1 = dtime(); time_tot += time1 - time0; if(me == 0) printf(" %3d %3d %6d %12.4e\n", ic, id, nconv, diff); double fop_mult1 = 2.0 * 1392.0 * (double)(NST); double fop_mult = (double)(nconv+2) * fop_mult1; double fop_lin = (double)(4+(nconv+1)*11) * (double)(NVST); fop_tot += fop_lin + fop_mult; norm2_t(corr, xq_dev); } } real_t corr2[NT]; if(PZ != 1) MPI_Allreduce(MPI_IN_PLACE, corr, LT, MPI_DOUBLE, MPI_SUM, comm_ud); if(PT != 1) MPI_Allgather(corr, LT, MPI_DOUBLE, corr2, LT, MPI_DOUBLE, comm_lr); else memcpy(corr2, corr, sizeof(real_t)*LT); if(me == 0){ printf("\nperformance of solver:\n"); printf(" elapsed time for solver = %f\n", time_tot); printf(" floating point operations = %f\n", fop_tot); printf(" performance of solver = %f GFlops\n", fop_tot/time_tot * 1.0e-9); printf("\nsolution squared at each time slice:\n"); for(int it = 0; it < NT; it++) printf(" %6d %16.8e\n", it, corr2[it]); } #ifdef _PROF double prof_t_max[PROF_NUMS], prof_t_min[PROF_NUMS], prof_t_ave[PROF_NUMS]; MPI_Allreduce(prof_t, prof_t_max, PROF_NUMS, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(prof_t, prof_t_min, PROF_NUMS, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); MPI_Allreduce(prof_t, prof_t_ave, PROF_NUMS, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); for(int i=0;i<PROF_NUMS;i++) prof_t_ave[i] /= nprocs; if(me == 0) { printf("MAX: PACK %f COMM %f OPR %f COPY %f AXPY %f NORM %f DOT %f SCAL %f\n", prof_t_max[PACK], prof_t_max[COMM], prof_t_max[OPR], prof_t_max[COPY], prof_t_max[AXPY], prof_t_max[NORM], prof_t_max[DOT], prof_t_max[SCAL]); printf("MIN: PACK %f COMM %f OPR %f COPY %f AXPY %f NORM %f DOT %f SCAL %f\n", prof_t_min[PACK], prof_t_min[COMM], prof_t_min[OPR], prof_t_min[COPY], prof_t_min[AXPY], prof_t_min[NORM], prof_t_min[DOT], prof_t_min[SCAL]); printf("AVE: PACK %f COMM %f OPR %f COPY %f AXPY %f NORM %f DOT %f SCAL %f\n", prof_t_ave[PACK], prof_t_ave[COMM], prof_t_ave[OPR], prof_t_ave[COPY], prof_t_ave[AXPY], prof_t_ave[NORM], prof_t_ave[DOT], prof_t_ave[SCAL]); } #endif MPI_Finalize(); return 0; }
02f3a5a1c5129e1529d0e12417496a89cd2ff607.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include <cmath> #include "add.h" bool isSquare(int num){ return (floor (sqrt(num)) == sqrt(num));} int main (int argc, char* argv[]){ //variables int matDim, blockDim, threadDim; // get inputs if (argc < 4){ std::cout << "Not enough arguments. <<matrix dimension>> << block dimension>> << thread dimension>>" << std::endl; return 1; } else{ matDim = atoi (argv [1]); blockDim = atoi(argv [2]); threadDim = atoi(argv [3]); } hipDeviceProp_t prop; hipGetDeviceProperties( &prop, 0 ); // bounds checking if ( matDim <=0 || matDim >= 32000){ std::cout << "Matrix dimension not valid. Must be between 0 and 32000." << std::endl; return 1; } if ( blockDim <=0 || blockDim >= 25000 ){ std::cout << "Block dimension not valid. Must be between 0 and 25000." << std::endl; return 1; } if ( threadDim <=0 || threadDim > sqrt(prop.maxThreadsPerBlock) ){ std::cout << "Thread dimension not valid. Must be between 0 and " << sqrt(prop.maxThreadsPerBlock) << "." << std::endl; return 1; } if ( blockDim * threadDim != matDim){ std::cout << "Not enough/too many blocks and threads for given matrix dimensions" << std::endl; return 1; } // initalize more varaibles dim3 grid (blockDim, blockDim); dim3 block (threadDim, threadDim); //create arrays float *MatA = new float [(int)pow(matDim, 2)]; float *MatB = new float [(int)pow(matDim, 2)]; float *MatC = new float [(int)pow(matDim, 2)]; for (int i=0; i < (int)pow(matDim, 2); i++) { MatA[i] = i; MatB[i] = i; } //alloc memory float *a, *b, *c; hipMalloc( (void**)&a,(float)pow(matDim, 2) * sizeof(float) ); hipMalloc( (void**)&b, (float)pow(matDim, 2) * sizeof(float) ); hipMalloc( (void**)&c, (float)pow(matDim, 2) * sizeof(float) ); // begin timing hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord( start, 0 ); //send to GPU hipMemcpy (a, MatA, (float)pow(matDim, 2) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy (b, MatB, (float)pow(matDim, 2) * sizeof(float), hipMemcpyHostToDevice); //add hipLaunchKernelGGL(( add) , dim3(grid), dim3(block), 0, 0, a, b, c); // get result from GPU hipMemcpy (MatC, c, (float)pow(matDim, 2) * sizeof(float), hipMemcpyDeviceToHost ); //end time hipEventRecord( end, 0 ); hipEventSynchronize( end ); //for testing output /*for (int i = 0; i < matDim; i++){ for (int j = 0; j < matDim; j++){ std::cout << MatC[(i*matDim)+j] << " "; } std::cout << std::endl; }*/ float elapsedTime; hipEventElapsedTime( &elapsedTime, start, end ); std::cout << "Time: " << elapsedTime << " ms." << std::endl; //dealloc memory hipEventDestroy( start ); hipEventDestroy( end ); hipFree (a); hipFree (b); hipFree (c); delete MatA; MatA = NULL; delete MatB; MatB = NULL; delete MatC; MatC = NULL; }
02f3a5a1c5129e1529d0e12417496a89cd2ff607.cu
#include <iostream> #include <stdio.h> #include <cmath> #include "add.h" bool isSquare(int num){ return (floor (sqrt(num)) == sqrt(num));} int main (int argc, char* argv[]){ //variables int matDim, blockDim, threadDim; // get inputs if (argc < 4){ std::cout << "Not enough arguments. <<matrix dimension>> << block dimension>> << thread dimension>>" << std::endl; return 1; } else{ matDim = atoi (argv [1]); blockDim = atoi(argv [2]); threadDim = atoi(argv [3]); } cudaDeviceProp prop; cudaGetDeviceProperties( &prop, 0 ); // bounds checking if ( matDim <=0 || matDim >= 32000){ std::cout << "Matrix dimension not valid. Must be between 0 and 32000." << std::endl; return 1; } if ( blockDim <=0 || blockDim >= 25000 ){ std::cout << "Block dimension not valid. Must be between 0 and 25000." << std::endl; return 1; } if ( threadDim <=0 || threadDim > sqrt(prop.maxThreadsPerBlock) ){ std::cout << "Thread dimension not valid. Must be between 0 and " << sqrt(prop.maxThreadsPerBlock) << "." << std::endl; return 1; } if ( blockDim * threadDim != matDim){ std::cout << "Not enough/too many blocks and threads for given matrix dimensions" << std::endl; return 1; } // initalize more varaibles dim3 grid (blockDim, blockDim); dim3 block (threadDim, threadDim); //create arrays float *MatA = new float [(int)pow(matDim, 2)]; float *MatB = new float [(int)pow(matDim, 2)]; float *MatC = new float [(int)pow(matDim, 2)]; for (int i=0; i < (int)pow(matDim, 2); i++) { MatA[i] = i; MatB[i] = i; } //alloc memory float *a, *b, *c; cudaMalloc( (void**)&a,(float)pow(matDim, 2) * sizeof(float) ); cudaMalloc( (void**)&b, (float)pow(matDim, 2) * sizeof(float) ); cudaMalloc( (void**)&c, (float)pow(matDim, 2) * sizeof(float) ); // begin timing cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord( start, 0 ); //send to GPU cudaMemcpy (a, MatA, (float)pow(matDim, 2) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy (b, MatB, (float)pow(matDim, 2) * sizeof(float), cudaMemcpyHostToDevice); //add add <<<grid, block>>> (a, b, c); // get result from GPU cudaMemcpy (MatC, c, (float)pow(matDim, 2) * sizeof(float), cudaMemcpyDeviceToHost ); //end time cudaEventRecord( end, 0 ); cudaEventSynchronize( end ); //for testing output /*for (int i = 0; i < matDim; i++){ for (int j = 0; j < matDim; j++){ std::cout << MatC[(i*matDim)+j] << " "; } std::cout << std::endl; }*/ float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, end ); std::cout << "Time: " << elapsedTime << " ms." << std::endl; //dealloc memory cudaEventDestroy( start ); cudaEventDestroy( end ); cudaFree (a); cudaFree (b); cudaFree (c); delete MatA; MatA = NULL; delete MatB; MatB = NULL; delete MatC; MatC = NULL; }
d6cd31c4909806489c8e73ee9d6a0e1b771f7203.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define W 500 #define H 500 #define TX 32 #define TY 32 __device__ unsigned char clip(int n){ return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos){ const int c = blockIdx.x * blockDim.x + threadIdx.x; const int c = blockIdx.y * blockDim.y + threadIdx.y; const int i = r * w + c; if ((c >= w) || (r >= h)) return; //Compute the distance and set d_out[i] d_out[i] = sqrtf((c - pos.x) * (c - pos.x) + (r - pos.y) * (r - pos.y)); // Convert distance to intensity value on interval [0, 255] const unsigned char intensity = clip(255 - d); d_out[i].x = intensity; d_out[i].y = intensity; d_out[i].z = 0; d_out[i].z = 255; //fully opaque } int main() { uchar4 *out = (uchar4) calloc(W*H, sizeof(uchar4)); uchar4 *d_out; hipMalloc(&d_out, W*H*sizeof(uchar4)); const int2 pos = {0, 0}; const dim3 blockSize(TX, TY); const int bx = (W + TX - 1) / TX; const int bx = (W + TY - 1) / TY; const dim3 gridSize = dim3(bx, by); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, W, H, pos); // Copy results to host // Free Memory }
d6cd31c4909806489c8e73ee9d6a0e1b771f7203.cu
#define W 500 #define H 500 #define TX 32 #define TY 32 __device__ unsigned char clip(int n){ return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos){ const int c = blockIdx.x * blockDim.x + threadIdx.x; const int c = blockIdx.y * blockDim.y + threadIdx.y; const int i = r * w + c; if ((c >= w) || (r >= h)) return; //Compute the distance and set d_out[i] d_out[i] = sqrtf((c - pos.x) * (c - pos.x) + (r - pos.y) * (r - pos.y)); // Convert distance to intensity value on interval [0, 255] const unsigned char intensity = clip(255 - d); d_out[i].x = intensity; d_out[i].y = intensity; d_out[i].z = 0; d_out[i].z = 255; //fully opaque } int main() { uchar4 *out = (uchar4) calloc(W*H, sizeof(uchar4)); uchar4 *d_out; cudaMalloc(&d_out, W*H*sizeof(uchar4)); const int2 pos = {0, 0}; const dim3 blockSize(TX, TY); const int bx = (W + TX - 1) / TX; const int bx = (W + TY - 1) / TY; const dim3 gridSize = dim3(bx, by); distanceKernel<<<gridSize, blockSize>>> (d_out, W, H, pos); // Copy results to host // Free Memory }
06d6522cda82068d88aa3f09926e5bdfbbea993f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "csv_common.h" #include "csv_gpu.h" #include "datetime.cuh" #include <io/utilities/block_utils.cuh> #include <io/utilities/parsing_utils.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/fixed_point/fixed_point.hpp> #include <cudf/lists/list_view.cuh> #include <cudf/null_mask.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/structs/struct_view.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <io/utilities/trie.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/detail/copy.h> #include <thrust/transform.h> #include <type_traits> using namespace ::cudf::io; using cudf::device_span; namespace cudf { namespace io { namespace csv { namespace gpu { /// Block dimension for dtype detection and conversion kernels constexpr uint32_t csvparse_block_dim = 128; /* * @brief Returns true is the input character is a valid digit. * Supports both decimal and hexadecimal digits (uppercase and lowercase). * * @param c Character to check * @param is_hex Whether to check as a hexadecimal * * @return `true` if it is digit-like, `false` otherwise */ __device__ __inline__ bool is_digit(char c, bool is_hex = false) { if (c >= '0' && c <= '9') return true; if (is_hex) { if (c >= 'A' && c <= 'F') return true; if (c >= 'a' && c <= 'f') return true; } return false; } /* * @brief Checks whether the given character counters indicate a potentially * valid date and/or time field. * * For performance and simplicity, we detect only the most common date * formats. Example formats that are detectable: * * `2001/02/30` * `2001-02-30 00:00:00` * `2/30/2001 T04:05:60.7` * `2 / 1 / 2011` * `02/January` * * @param len Number of non special-symbol or numeric characters * @param decimal_count Number of '.' characters * @param colon_count Number of ':' characters * @param dash_count Number of '-' characters * @param slash_count Number of '/' characters * * @return `true` if it is date-like, `false` otherwise */ __device__ __inline__ bool is_datetime( long len, long decimal_count, long colon_count, long dash_count, long slash_count) { // Must not exceed count of longest month (September) plus `T` time indicator if (len > 10) { return false; } // Must not exceed more than one decimals or more than two time separators if (decimal_count > 1 || colon_count > 2) { return false; } // Must have one or two '-' or '/' but not both as date separators if ((dash_count > 0 && dash_count < 3 && slash_count == 0) || (dash_count == 0 && slash_count > 0 && slash_count < 3)) { return true; } return false; } /* * @brief Returns true if the counters indicate a potentially valid float. * False positives are possible because positions are not taken into account. * For example, field "e.123-" would match the pattern. * * @param len Number of non special-symbol or numeric characters * @param digit_count Number of digits characters * @param decimal_count Number of occurrences of the decimal point character * @param thousands_count Number of occurrences of the thousands separator character * @param dash_count Number of '-' characters * @param exponent_count Number of 'e or E' characters * * @return `true` if it is floating point-like, `false` otherwise */ __device__ __inline__ bool is_floatingpoint(long len, long digit_count, long decimal_count, long thousands_count, long dash_count, long exponent_count) { // Can't have more than one exponent and one decimal point if (decimal_count > 1) return false; if (exponent_count > 1) return false; // Without the exponent or a decimal point, this is an integer, not a float if (decimal_count == 0 && exponent_count == 0) return false; // Can only have one '-' per component if (dash_count > 1 + exponent_count) return false; // If anything other than these characters is present, it's not a float if (digit_count + decimal_count + dash_count + exponent_count + thousands_count != len) { return false; } // Needs at least 1 digit, 2 if exponent is present if (digit_count < 1 + exponent_count) return false; return true; } /* * @brief CUDA kernel that parses and converts CSV data into cuDF column data. * * Data is processed in one row/record at a time, so the number of total * threads (tid) is equal to the number of rows. * * @param opts A set of parsing options * @param csv_text The entire CSV data to read * @param column_flags Per-column parsing behavior flags * @param row_offsets The start the CSV data of interest * @param d_column_data The count for each column data type */ __global__ void __launch_bounds__(csvparse_block_dim) data_type_detection(parse_options_view const opts, device_span<char const> csv_text, device_span<column_parse::flags const> const column_flags, device_span<uint64_t const> const row_offsets, device_span<column_type_histogram> d_column_data) { auto const raw_csv = csv_text.data(); // ThreadIds range per block, so also need the blockId // This is entry into the fields; threadId is an element within `num_records` long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x); long const rec_id_next = rec_id + 1; // we can have more threads than data, make sure we are not past the end of // the data if (rec_id_next >= row_offsets.size()) { return; } auto field_start = raw_csv + row_offsets[rec_id]; auto const row_end = raw_csv + row_offsets[rec_id_next]; auto next_field = field_start; int col = 0; int actual_col = 0; // Going through all the columns of a given record while (col < column_flags.size() && field_start <= row_end) { auto next_delimiter = cudf::io::gpu::seek_field_end(field_start, row_end, opts); // Checking if this is a column that the user wants --- user can filter columns if (column_flags[col] & column_parse::enabled) { // points to last character in the field auto const field_len = static_cast<size_t>(next_delimiter - field_start); if (serialized_trie_contains(opts.trie_na, {field_start, field_len})) { atomicAdd(&d_column_data[actual_col].null_count, 1); } else if (serialized_trie_contains(opts.trie_true, {field_start, field_len}) || serialized_trie_contains(opts.trie_false, {field_start, field_len})) { atomicAdd(&d_column_data[actual_col].bool_count, 1); } else if (cudf::io::is_infinity(field_start, next_delimiter)) { atomicAdd(&d_column_data[actual_col].float_count, 1); } else { long count_number = 0; long count_decimal = 0; long count_thousands = 0; long count_slash = 0; long count_dash = 0; long count_plus = 0; long count_colon = 0; long count_string = 0; long count_exponent = 0; // Modify field_start & end to ignore whitespace and quotechars // This could possibly result in additional empty fields auto const trimmed_field_range = trim_whitespaces_quotes(field_start, next_delimiter); auto const trimmed_field_len = trimmed_field_range.second - trimmed_field_range.first; for (auto cur = trimmed_field_range.first; cur < trimmed_field_range.second; ++cur) { if (is_digit(*cur)) { count_number++; continue; } if (*cur == opts.decimal) { count_decimal++; continue; } if (*cur == opts.thousands) { count_thousands++; continue; } // Looking for unique characters that will help identify column types. switch (*cur) { case '-': count_dash++; break; case '+': count_plus++; break; case '/': count_slash++; break; case ':': count_colon++; break; case 'e': case 'E': if (cur > trimmed_field_range.first && cur < trimmed_field_range.second - 1) count_exponent++; break; default: count_string++; break; } } // Integers have to have the length of the string // Off by one if they start with a minus sign auto const int_req_number_cnt = trimmed_field_len - count_thousands - ((*trimmed_field_range.first == '-' || *trimmed_field_range.first == '+') && trimmed_field_len > 1); if (column_flags[col] & column_parse::as_datetime) { // PANDAS uses `object` dtype if the date is unparseable if (is_datetime(count_string, count_decimal, count_colon, count_dash, count_slash)) { atomicAdd(&d_column_data[actual_col].datetime_count, 1); } else { atomicAdd(&d_column_data[actual_col].string_count, 1); } } else if (count_number == int_req_number_cnt) { auto const is_negative = (*trimmed_field_range.first == '-'); auto const data_begin = trimmed_field_range.first + (is_negative || (*trimmed_field_range.first == '+')); cudf::size_type *ptr = cudf::io::gpu::infer_integral_field_counter( data_begin, data_begin + count_number, is_negative, d_column_data[actual_col]); atomicAdd(ptr, 1); } else if (is_floatingpoint(trimmed_field_len, count_number, count_decimal, count_thousands, count_dash + count_plus, count_exponent)) { atomicAdd(&d_column_data[actual_col].float_count, 1); } else { atomicAdd(&d_column_data[actual_col].string_count, 1); } } actual_col++; } next_field = next_delimiter + 1; field_start = next_field; col++; } } template <typename T, int base> __inline__ __device__ T decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::io::parse_numeric<T, base>(begin, end, opts); } template <typename T> __inline__ __device__ T decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::io::parse_numeric<T>(begin, end, opts); } template <> __inline__ __device__ cudf::timestamp_D decode_value(char const *begin, char const *end, parse_options_view const &opts) { return timestamp_D{cudf::duration_D{to_date(begin, end, opts.dayfirst)}}; } template <> __inline__ __device__ cudf::timestamp_s decode_value(char const *begin, char const *end, parse_options_view const &opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_s{cudf::duration_s{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_s{cudf::duration_s{milli / 1000}}; } } template <> __inline__ __device__ cudf::timestamp_ms decode_value(char const *begin, char const *end, parse_options_view const &opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_ms{cudf::duration_ms{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_ms{cudf::duration_ms{milli}}; } } template <> __inline__ __device__ cudf::timestamp_us decode_value(char const *begin, char const *end, parse_options_view const &opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_us{cudf::duration_us{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_us{cudf::duration_us{milli * 1000}}; } } template <> __inline__ __device__ cudf::timestamp_ns decode_value(char const *begin, char const *end, parse_options_view const &opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_ns{cudf::duration_ns{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_ns{cudf::duration_ns{milli * 1000000}}; } } #ifndef DURATION_DECODE_VALUE #define DURATION_DECODE_VALUE(Type) \ template <> \ __inline__ __device__ Type decode_value( \ const char *begin, const char *end, parse_options_view const &opts) \ { \ return Type{to_time_delta<Type>(begin, end)}; \ } #endif DURATION_DECODE_VALUE(duration_D) DURATION_DECODE_VALUE(duration_s) DURATION_DECODE_VALUE(duration_ms) DURATION_DECODE_VALUE(duration_us) DURATION_DECODE_VALUE(duration_ns) // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::string_view decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::string_view{}; } // The purpose of this is merely to allow compilation ONLY template <> __inline__ __device__ cudf::dictionary32 decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::dictionary32{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::list_view decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::list_view{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ numeric::decimal32 decode_value(char const *begin, char const *end, parse_options_view const &opts) { return numeric::decimal32{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ numeric::decimal64 decode_value(char const *begin, char const *end, parse_options_view const &opts) { return numeric::decimal64{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::struct_view decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::struct_view{}; } /** * @brief Functor for converting CSV raw data to typed value. */ struct decode_op { /** * @brief Dispatch for numeric types whose values can be convertible to * 0 or 1 to represent boolean false/true, based upon checking against a * true/false values list. * * @return bool Whether the parsed value is valid. */ template <typename T, typename std::enable_if_t<std::is_integral<T>::value and !std::is_same<T, bool>::value> * = nullptr> __host__ __device__ __forceinline__ bool operator()(void *out_buffer, size_t row, char const *begin, char const *end, parse_options_view const &opts, column_parse::flags flags) { static_cast<T *>(out_buffer)[row] = [&flags, &opts, begin, end]() -> T { // Check for user-specified true/false values auto const field_len = static_cast<size_t>(end - begin); if (serialized_trie_contains(opts.trie_true, {begin, field_len})) { return 1; } if (serialized_trie_contains(opts.trie_false, {begin, field_len})) { return 0; } return flags & column_parse::as_hexadecimal ? decode_value<T, 16>(begin, end, opts) : decode_value<T>(begin, end, opts); }(); return true; } /** * @brief Dispatch for boolean type types. */ template <typename T, typename std::enable_if_t<std::is_same<T, bool>::value> * = nullptr> __host__ __device__ __forceinline__ bool operator()(void *out_buffer, size_t row, char const *begin, char const *end, parse_options_view const &opts, column_parse::flags flags) { static_cast<T *>(out_buffer)[row] = [&opts, begin, end]() { // Check for user-specified true/false values auto const field_len = static_cast<size_t>(end - begin); if (serialized_trie_contains(opts.trie_true, {begin, field_len})) { return true; } if (serialized_trie_contains(opts.trie_false, {begin, field_len})) { return false; } return decode_value<T>(begin, end, opts); }(); return true; } /** * @brief Dispatch for floating points, which are set to NaN if the input * is not valid. In such case, the validity mask is set to zero too. */ template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value> * = nullptr> __host__ __device__ __forceinline__ bool operator()(void *out_buffer, size_t row, char const *begin, char const *end, parse_options_view const &opts, column_parse::flags flags) { T const value = decode_value<T>(begin, end, opts); static_cast<T *>(out_buffer)[row] = value; return !std::isnan(value); } /** * @brief Dispatch for all other types. */ template <typename T, typename std::enable_if_t<!std::is_integral<T>::value and !std::is_floating_point<T>::value> * = nullptr> __host__ __device__ __forceinline__ bool operator()(void *out_buffer, size_t row, char const *begin, char const *end, parse_options_view const &opts, column_parse::flags flags) { static_cast<T *>(out_buffer)[row] = decode_value<T>(begin, end, opts); return true; } }; /** * @brief CUDA kernel that parses and converts CSV data into cuDF column data. * * Data is processed one record at a time * * @param[in] options A set of parsing options * @param[in] data The entire CSV data to read * @param[in] column_flags Per-column parsing behavior flags * @param[in] row_offsets The start the CSV data of interest * @param[in] dtypes The data type of the column * @param[out] columns The output column data * @param[out] valids The bitmaps indicating whether column fields are valid */ __global__ void __launch_bounds__(csvparse_block_dim) convert_csv_to_cudf(cudf::io::parse_options_view options, device_span<char const> data, device_span<column_parse::flags const> column_flags, device_span<uint64_t const> row_offsets, device_span<cudf::data_type const> dtypes, device_span<void *const> columns, device_span<cudf::bitmask_type *const> valids) { auto const raw_csv = data.data(); // thread IDs range per block, so also need the block id. // this is entry into the field array - tid is an elements within the num_entries array long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x); long const rec_id_next = rec_id + 1; // we can have more threads than data, make sure we are not past the end of // the data if (rec_id_next >= row_offsets.size()) return; auto field_start = raw_csv + row_offsets[rec_id]; auto const row_end = raw_csv + row_offsets[rec_id_next]; auto next_field = field_start; int col = 0; int actual_col = 0; while (col < column_flags.size() && field_start <= row_end) { auto next_delimiter = cudf::io::gpu::seek_field_end(next_field, row_end, options); if (column_flags[col] & column_parse::enabled) { // check if the entire field is a NaN string - consistent with pandas auto const is_valid = !serialized_trie_contains( options.trie_na, {field_start, static_cast<size_t>(next_delimiter - field_start)}); // Modify field_start & end to ignore whitespace and quotechars auto field_end = next_delimiter; if (is_valid && dtypes[actual_col].id() != cudf::type_id::STRING) { auto const trimmed_field = trim_whitespaces_quotes(field_start, field_end, options.quotechar); field_start = trimmed_field.first; field_end = trimmed_field.second; } if (is_valid) { // Type dispatcher does not handle STRING if (dtypes[actual_col].id() == cudf::type_id::STRING) { auto end = next_delimiter; if (options.keepquotes == false) { if ((*field_start == options.quotechar) && (*(end - 1) == options.quotechar)) { ++field_start; --end; } } auto str_list = static_cast<std::pair<const char *, size_t> *>(columns[actual_col]); str_list[rec_id].first = field_start; str_list[rec_id].second = end - field_start; } else { if (cudf::type_dispatcher(dtypes[actual_col], decode_op{}, columns[actual_col], rec_id, field_start, field_end, options, column_flags[col])) { // set the valid bitmap - all bits were set to 0 to start set_bit(valids[actual_col], rec_id); } } } else if (dtypes[actual_col].id() == cudf::type_id::STRING) { auto str_list = static_cast<std::pair<const char *, size_t> *>(columns[actual_col]); str_list[rec_id].first = nullptr; str_list[rec_id].second = 0; } ++actual_col; } next_field = next_delimiter + 1; field_start = next_field; ++col; } } /* * @brief Merge two packed row contexts (each corresponding to a block of characters) * and return the packed row context corresponding to the merged character block */ inline __device__ packed_rowctx_t merge_row_contexts(packed_rowctx_t first_ctx, packed_rowctx_t second_ctx) { uint32_t id0 = get_row_context(first_ctx, ROW_CTX_NONE) & 3; uint32_t id1 = get_row_context(first_ctx, ROW_CTX_QUOTE) & 3; uint32_t id2 = get_row_context(first_ctx, ROW_CTX_COMMENT) & 3; return (first_ctx & ~pack_row_contexts(3, 3, 3)) + pack_row_contexts(get_row_context(second_ctx, id0), get_row_context(second_ctx, id1), get_row_context(second_ctx, id2)); } /* * @brief Per-character context: * 1-bit count (0 or 1) per context in the lower 4 bits * 2-bit output context id per input context in bits 8..15 */ constexpr __device__ uint32_t make_char_context(uint32_t id0, uint32_t id1, uint32_t id2 = ROW_CTX_COMMENT, uint32_t c0 = 0, uint32_t c1 = 0, uint32_t c2 = 0) { return (id0 << 8) | (id1 << 10) | (id2 << 12) | (ROW_CTX_EOF << 14) | (c0) | (c1 << 1) | (c2 << 2); } /* * @brief Merge a 1-character context to keep track of bitmasks where new rows occur * Merges a single-character "block" row context at position pos with the current * block's row context (the current block contains 32-pos characters) * * @param ctx Current block context and new rows bitmaps * @param char_ctx state transitions associated with new character * @param pos Position within the current 32-character block * * NOTE: This is probably the most performance-critical piece of the row gathering kernel. * The char_ctx value should be created via make_char_context, and its value should * have been evaluated at compile-time. */ inline __device__ void merge_char_context(uint4 &ctx, uint32_t char_ctx, uint32_t pos) { uint32_t id0 = (ctx.w >> 0) & 3; uint32_t id1 = (ctx.w >> 2) & 3; uint32_t id2 = (ctx.w >> 4) & 3; // Set the newrow bit in the bitmap at the corresponding position ctx.x |= ((char_ctx >> id0) & 1) << pos; ctx.y |= ((char_ctx >> id1) & 1) << pos; ctx.z |= ((char_ctx >> id2) & 1) << pos; // Update the output context ids ctx.w = ((char_ctx >> (8 + id0 * 2)) & 0x03) | ((char_ctx >> (6 + id1 * 2)) & 0x0c) | ((char_ctx >> (4 + id2 * 2)) & 0x30) | (ROW_CTX_EOF << 6); } /* * Convert the context-with-row-bitmaps version to a packed row context */ inline __device__ packed_rowctx_t pack_rowmaps(uint4 ctx_map) { return pack_row_contexts(make_row_context(__popc(ctx_map.x), (ctx_map.w >> 0) & 3), make_row_context(__popc(ctx_map.y), (ctx_map.w >> 2) & 3), make_row_context(__popc(ctx_map.z), (ctx_map.w >> 4) & 3)); } /* * Selects the row bitmap corresponding to the given parser state */ inline __device__ uint32_t select_rowmap(uint4 ctx_map, uint32_t ctxid) { return (ctxid == ROW_CTX_NONE) ? ctx_map.x : (ctxid == ROW_CTX_QUOTE) ? ctx_map.y : (ctxid == ROW_CTX_COMMENT) ? ctx_map.z : 0; } /** * @brief Single pair-wise 512-wide row context merge transform * * Merge row context blocks and record the merge operation in a context * tree so that the transform is reversible. * The tree is organized such that the left and right children of node n * are located at indices n*2 and n*2+1, the root node starting at index 1 * * @tparam lanemask mask to specify source of packed row context * @tparam tmask mask to specify principle thread for merging row context * @tparam base start location for writing into packed row context tree * @tparam level_scale level of the node in the tree * @param ctxtree[out] packed row context tree * @param ctxb[in] packed row context for the current character block * @param t thread id (leaf node id) */ template <uint32_t lanemask, uint32_t tmask, uint32_t base, uint32_t level_scale> inline __device__ void ctx_merge(uint64_t *ctxtree, packed_rowctx_t *ctxb, uint32_t t) { uint64_t tmp = shuffle_xor(*ctxb, lanemask); if (!(t & tmask)) { *ctxb = merge_row_contexts(*ctxb, tmp); ctxtree[base + (t >> level_scale)] = *ctxb; } } /** * @brief Single 512-wide row context inverse merge transform * * Walks the context tree starting from a root node * * @tparam rmask Mask to specify which threads write input row context * @param[in] base Start read location of the merge transform tree * @param[in] ctxtree Merge transform tree * @param[in] ctx Input context * @param[in] brow4 output row in block *4 * @param[in] t thread id (leaf node id) */ template <uint32_t rmask> inline __device__ void ctx_unmerge( uint32_t base, uint64_t *ctxtree, uint32_t *ctx, uint32_t *brow4, uint32_t t) { rowctx32_t ctxb_left, ctxb_right, ctxb_sum; ctxb_sum = get_row_context(ctxtree[base], *ctx); ctxb_left = get_row_context(ctxtree[(base)*2 + 0], *ctx); ctxb_right = get_row_context(ctxtree[(base)*2 + 1], ctxb_left & 3); if (t & (rmask)) { *brow4 += (ctxb_sum & ~3) - (ctxb_right & ~3); *ctx = ctxb_left & 3; } } /* * @brief 512-wide row context merge transform * * Repeatedly merge row context blocks, keeping track of each merge operation * in a context tree so that the transform is reversible * The tree is organized such that the left and right children of node n * are located at indices n*2 and n*2+1, the root node starting at index 1 * * Each node contains the counts and output contexts corresponding to the * possible input contexts. * Each parent node's count is obtained by adding the corresponding counts * from the left child node with the right child node's count selected from * the left child node's output context: * parent.count[k] = left.count[k] + right.count[left.outctx[k]] * parent.outctx[k] = right.outctx[left.outctx[k]] * * @param ctxtree[out] packed row context tree * @param ctxb[in] packed row context for the current character block * @param t thread id (leaf node id) */ static inline __device__ void rowctx_merge_transform(uint64_t ctxtree[1024], packed_rowctx_t ctxb, uint32_t t) { ctxtree[512 + t] = ctxb; ctx_merge<1, 0x1, 256, 1>(ctxtree, &ctxb, t); ctx_merge<2, 0x3, 128, 2>(ctxtree, &ctxb, t); ctx_merge<4, 0x7, 64, 3>(ctxtree, &ctxb, t); ctx_merge<8, 0xf, 32, 4>(ctxtree, &ctxb, t); __syncthreads(); if (t < 32) { ctxb = ctxtree[32 + t]; ctx_merge<1, 0x1, 16, 1>(ctxtree, &ctxb, t); ctx_merge<2, 0x3, 8, 2>(ctxtree, &ctxb, t); ctx_merge<4, 0x7, 4, 3>(ctxtree, &ctxb, t); ctx_merge<8, 0xf, 2, 4>(ctxtree, &ctxb, t); // Final stage uint64_t tmp = shuffle_xor(ctxb, 16); if (t == 0) { ctxtree[1] = merge_row_contexts(ctxb, tmp); } } } /* * @brief 512-wide row context inverse merge transform * * Walks the context tree starting from the root node (index 1) using * the starting context in node index 0. * The return value is the starting row and input context for the given leaf node * * @param[in] ctxtree Merge transform tree * @param[in] t thread id (leaf node id) * * @return Final row context and count (row_position*4 + context_id format) */ static inline __device__ rowctx32_t rowctx_inverse_merge_transform(uint64_t ctxtree[1024], uint32_t t) { uint32_t ctx = ctxtree[0] & 3; // Starting input context rowctx32_t brow4 = 0; // output row in block *4 ctx_unmerge<256>(1, ctxtree, &ctx, &brow4, t); ctx_unmerge<128>(2 + (t >> 8), ctxtree, &ctx, &brow4, t); ctx_unmerge<64>(4 + (t >> 7), ctxtree, &ctx, &brow4, t); ctx_unmerge<32>(8 + (t >> 6), ctxtree, &ctx, &brow4, t); ctx_unmerge<16>(16 + (t >> 5), ctxtree, &ctx, &brow4, t); ctx_unmerge<8>(32 + (t >> 4), ctxtree, &ctx, &brow4, t); ctx_unmerge<4>(64 + (t >> 3), ctxtree, &ctx, &brow4, t); ctx_unmerge<2>(128 + (t >> 2), ctxtree, &ctx, &brow4, t); ctx_unmerge<1>(256 + (t >> 1), ctxtree, &ctx, &brow4, t); return brow4 + ctx; } /** * @brief Gather row offsets from CSV character data split into 16KB chunks * * This is done in two phases: the first phase returns the possible row counts * per 16K character block for each possible parsing context at the start of the block, * along with the resulting parsing context at the end of the block. * The caller can then compute the actual parsing context at the beginning of each * individual block and total row count. * The second phase outputs the location of each row in the block, using the parsing * context and initial row counter accumulated from the results of the previous phase. * Row parsing context will be updated after phase 2 such that the value contains * the number of rows starting at byte_range_end or beyond. * * @param row_ctx Row parsing context (output of phase 1 or input to phase 2) * @param offsets_out Row offsets (nullptr for phase1, non-null indicates phase 2) * @param data Base pointer of character data (all row offsets are relative to this) * @param chunk_size Total number of characters to parse * @param parse_pos Current parsing position in the file * @param start_offset Position of the start of the character buffer in the file * @param data_size CSV file size * @param byte_range_start Ignore rows starting before this position in the file * @param byte_range_end In phase 2, store the number of rows beyond range in row_ctx * @param skip_rows Number of rows to skip (ignored in phase 1) * @param terminator Line terminator character * @param delimiter Column delimiter character * @param quotechar Quote character * @param escapechar Delimiter escape character * @param commentchar Comment line character (skip rows starting with this character) */ __global__ void __launch_bounds__(rowofs_block_dim) gather_row_offsets_gpu(uint64_t *row_ctx, device_span<uint64_t> offsets_out, device_span<char const> const data, size_t chunk_size, size_t parse_pos, size_t start_offset, size_t data_size, size_t byte_range_start, size_t byte_range_end, size_t skip_rows, int terminator, int delimiter, int quotechar, int escapechar, int commentchar) { auto start = data.begin(); using block_reduce = typename hipcub::BlockReduce<uint32_t, rowofs_block_dim>; __shared__ union { typename block_reduce::TempStorage bk_storage; __align__(8) uint64_t ctxtree[rowofs_block_dim * 2]; } temp_storage; const char *end = start + (min(parse_pos + chunk_size, data_size) - start_offset); uint32_t t = threadIdx.x; size_t block_pos = (parse_pos - start_offset) + blockIdx.x * static_cast<size_t>(rowofs_block_bytes) + t * 32; const char *cur = start + block_pos; // Initial state is neutral context (no state transitions), zero rows uint4 ctx_map = { .x = 0, .y = 0, .z = 0, .w = (ROW_CTX_NONE << 0) | (ROW_CTX_QUOTE << 2) | (ROW_CTX_COMMENT << 4) | (ROW_CTX_EOF << 6)}; int c, c_prev = (cur > start && cur <= end) ? cur[-1] : terminator; // Loop through all 32 bytes and keep a bitmask of row starts for each possible input context for (uint32_t pos = 0; pos < 32; pos++, cur++, c_prev = c) { uint32_t ctx; if (cur < end) { c = cur[0]; if (c_prev == terminator) { if (c == commentchar) { // Start of a new comment row ctx = make_char_context(ROW_CTX_COMMENT, ROW_CTX_QUOTE, ROW_CTX_COMMENT, 1, 0, 1); } else if (c == quotechar) { // Quoted string on newrow, or quoted string ending in terminator ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE, ROW_CTX_QUOTE, 1, 0, 1); } else { // Start of a new row unless within a quote ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_NONE, 1, 0, 1); } } else if (c == quotechar) { if (c_prev == delimiter || c_prev == quotechar) { // Quoted string after delimiter, quoted string ending in delimiter, or double-quote ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE); } else { // Closing or ignored quote ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_NONE); } } else { // Neutral character ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE); } } else { const char *data_end = start + data_size - start_offset; if (cur <= end && cur == data_end) { // Add a newline at data end (need the extra row offset to infer length of previous row) ctx = make_char_context(ROW_CTX_EOF, ROW_CTX_EOF, ROW_CTX_EOF, 1, 1, 1); } else { // Pass-through context (beyond chunk_size or data_end) ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_COMMENT); } } // Merge with current context, keeping track of where new rows occur merge_char_context(ctx_map, ctx, pos); } // Eliminate rows that start before byte_range_start if (start_offset + block_pos < byte_range_start) { uint32_t dist_minus1 = min(byte_range_start - (start_offset + block_pos) - 1, UINT64_C(31)); uint32_t mask = 0xfffffffe << dist_minus1; ctx_map.x &= mask; ctx_map.y &= mask; ctx_map.z &= mask; } // Convert the long-form {rowmap,outctx}[inctx] version into packed version // {rowcount,ouctx}[inctx], then merge the row contexts of the 32-character blocks into // a single 16K-character block context rowctx_merge_transform(temp_storage.ctxtree, pack_rowmaps(ctx_map), t); // If this is the second phase, get the block's initial parser state and row counter if (offsets_out.data()) { if (t == 0) { temp_storage.ctxtree[0] = row_ctx[blockIdx.x]; } __syncthreads(); // Walk back the transform tree with the known initial parser state rowctx32_t ctx = rowctx_inverse_merge_transform(temp_storage.ctxtree, t); uint64_t row = (temp_storage.ctxtree[0] >> 2) + (ctx >> 2); uint32_t rows_out_of_range = 0; uint32_t rowmap = select_rowmap(ctx_map, ctx & 3); // Output row positions while (rowmap != 0) { uint32_t pos = __ffs(rowmap); block_pos += pos; if (row >= skip_rows && row - skip_rows < offsets_out.size()) { // Output byte offsets are relative to the base of the input buffer offsets_out[row - skip_rows] = block_pos - 1; rows_out_of_range += (start_offset + block_pos - 1 >= byte_range_end); } row++; rowmap >>= pos; } __syncthreads(); // Return the number of rows out of range rows_out_of_range = block_reduce(temp_storage.bk_storage).Sum(rows_out_of_range); if (t == 0) { row_ctx[blockIdx.x] = rows_out_of_range; } } else { // Just store the row counts and output contexts if (t == 0) { row_ctx[blockIdx.x] = temp_storage.ctxtree[1]; } } } size_t __host__ count_blank_rows(const cudf::io::parse_options_view &opts, device_span<char const> data, device_span<uint64_t const> row_offsets, rmm::cuda_stream_view stream) { const auto newline = opts.skipblanklines ? opts.terminator : opts.comment; const auto comment = opts.comment != '\0' ? opts.comment : newline; const auto carriage = (opts.skipblanklines && opts.terminator == '\n') ? '\r' : comment; return thrust::count_if( rmm::exec_policy(stream), row_offsets.begin(), row_offsets.end(), [data = data, newline, comment, carriage] __device__(const uint64_t pos) { return ((pos != data.size()) && (data[pos] == newline || data[pos] == comment || data[pos] == carriage)); }); } device_span<uint64_t> __host__ remove_blank_rows(cudf::io::parse_options_view const &options, device_span<char const> data, device_span<uint64_t> row_offsets, rmm::cuda_stream_view stream) { size_t d_size = data.size(); const auto newline = options.skipblanklines ? options.terminator : options.comment; const auto comment = options.comment != '\0' ? options.comment : newline; const auto carriage = (options.skipblanklines && options.terminator == '\n') ? '\r' : comment; auto new_end = thrust::remove_if( rmm::exec_policy(stream), row_offsets.begin(), row_offsets.end(), [data = data, d_size, newline, comment, carriage] __device__(const uint64_t pos) { return ((pos != d_size) && (data[pos] == newline || data[pos] == comment || data[pos] == carriage)); }); return row_offsets.subspan(0, new_end - row_offsets.begin()); } std::vector<column_type_histogram> detect_column_types( cudf::io::parse_options_view const &options, device_span<char const> const data, device_span<column_parse::flags const> const column_flags, device_span<uint64_t const> const row_starts, size_t const num_active_columns, rmm::cuda_stream_view stream) { // Calculate actual block count to use based on records count const int block_size = csvparse_block_dim; const int grid_size = (row_starts.size() + block_size - 1) / block_size; auto d_stats = detail::make_zeroed_device_uvector_async<column_type_histogram>(num_active_columns, stream); hipLaunchKernelGGL(( data_type_detection), dim3(grid_size), dim3(block_size), 0, stream.value(), options, data, column_flags, row_starts, d_stats); return detail::make_std_vector_sync(d_stats, stream); } void __host__ decode_row_column_data(cudf::io::parse_options_view const &options, device_span<char const> data, device_span<column_parse::flags const> column_flags, device_span<uint64_t const> row_offsets, device_span<cudf::data_type const> dtypes, device_span<void *const> columns, device_span<cudf::bitmask_type *const> valids, rmm::cuda_stream_view stream) { // Calculate actual block count to use based on records count auto const block_size = csvparse_block_dim; auto const num_rows = row_offsets.size() - 1; auto const grid_size = (num_rows + block_size - 1) / block_size; hipLaunchKernelGGL(( convert_csv_to_cudf), dim3(grid_size), dim3(block_size), 0, stream.value(), options, data, column_flags, row_offsets, dtypes, columns, valids); } uint32_t __host__ gather_row_offsets(const parse_options_view &options, uint64_t *row_ctx, device_span<uint64_t> const offsets_out, device_span<char const> const data, size_t chunk_size, size_t parse_pos, size_t start_offset, size_t data_size, size_t byte_range_start, size_t byte_range_end, size_t skip_rows, rmm::cuda_stream_view stream) { uint32_t dim_grid = 1 + (chunk_size / rowofs_block_bytes); hipLaunchKernelGGL(( gather_row_offsets_gpu), dim3(dim_grid), dim3(rowofs_block_dim), 0, stream.value(), row_ctx, offsets_out, data, chunk_size, parse_pos, start_offset, data_size, byte_range_start, byte_range_end, skip_rows, options.terminator, options.delimiter, (options.quotechar) ? options.quotechar : 0x100, /*(options.escapechar) ? options.escapechar :*/ 0x100, (options.comment) ? options.comment : 0x100); return dim_grid; } } // namespace gpu } // namespace csv } // namespace io } // namespace cudf
06d6522cda82068d88aa3f09926e5bdfbbea993f.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "csv_common.h" #include "csv_gpu.h" #include "datetime.cuh" #include <io/utilities/block_utils.cuh> #include <io/utilities/parsing_utils.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/fixed_point/fixed_point.hpp> #include <cudf/lists/list_view.cuh> #include <cudf/null_mask.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/structs/struct_view.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <io/utilities/trie.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/detail/copy.h> #include <thrust/transform.h> #include <type_traits> using namespace ::cudf::io; using cudf::device_span; namespace cudf { namespace io { namespace csv { namespace gpu { /// Block dimension for dtype detection and conversion kernels constexpr uint32_t csvparse_block_dim = 128; /* * @brief Returns true is the input character is a valid digit. * Supports both decimal and hexadecimal digits (uppercase and lowercase). * * @param c Character to check * @param is_hex Whether to check as a hexadecimal * * @return `true` if it is digit-like, `false` otherwise */ __device__ __inline__ bool is_digit(char c, bool is_hex = false) { if (c >= '0' && c <= '9') return true; if (is_hex) { if (c >= 'A' && c <= 'F') return true; if (c >= 'a' && c <= 'f') return true; } return false; } /* * @brief Checks whether the given character counters indicate a potentially * valid date and/or time field. * * For performance and simplicity, we detect only the most common date * formats. Example formats that are detectable: * * `2001/02/30` * `2001-02-30 00:00:00` * `2/30/2001 T04:05:60.7` * `2 / 1 / 2011` * `02/January` * * @param len Number of non special-symbol or numeric characters * @param decimal_count Number of '.' characters * @param colon_count Number of ':' characters * @param dash_count Number of '-' characters * @param slash_count Number of '/' characters * * @return `true` if it is date-like, `false` otherwise */ __device__ __inline__ bool is_datetime( long len, long decimal_count, long colon_count, long dash_count, long slash_count) { // Must not exceed count of longest month (September) plus `T` time indicator if (len > 10) { return false; } // Must not exceed more than one decimals or more than two time separators if (decimal_count > 1 || colon_count > 2) { return false; } // Must have one or two '-' or '/' but not both as date separators if ((dash_count > 0 && dash_count < 3 && slash_count == 0) || (dash_count == 0 && slash_count > 0 && slash_count < 3)) { return true; } return false; } /* * @brief Returns true if the counters indicate a potentially valid float. * False positives are possible because positions are not taken into account. * For example, field "e.123-" would match the pattern. * * @param len Number of non special-symbol or numeric characters * @param digit_count Number of digits characters * @param decimal_count Number of occurrences of the decimal point character * @param thousands_count Number of occurrences of the thousands separator character * @param dash_count Number of '-' characters * @param exponent_count Number of 'e or E' characters * * @return `true` if it is floating point-like, `false` otherwise */ __device__ __inline__ bool is_floatingpoint(long len, long digit_count, long decimal_count, long thousands_count, long dash_count, long exponent_count) { // Can't have more than one exponent and one decimal point if (decimal_count > 1) return false; if (exponent_count > 1) return false; // Without the exponent or a decimal point, this is an integer, not a float if (decimal_count == 0 && exponent_count == 0) return false; // Can only have one '-' per component if (dash_count > 1 + exponent_count) return false; // If anything other than these characters is present, it's not a float if (digit_count + decimal_count + dash_count + exponent_count + thousands_count != len) { return false; } // Needs at least 1 digit, 2 if exponent is present if (digit_count < 1 + exponent_count) return false; return true; } /* * @brief CUDA kernel that parses and converts CSV data into cuDF column data. * * Data is processed in one row/record at a time, so the number of total * threads (tid) is equal to the number of rows. * * @param opts A set of parsing options * @param csv_text The entire CSV data to read * @param column_flags Per-column parsing behavior flags * @param row_offsets The start the CSV data of interest * @param d_column_data The count for each column data type */ __global__ void __launch_bounds__(csvparse_block_dim) data_type_detection(parse_options_view const opts, device_span<char const> csv_text, device_span<column_parse::flags const> const column_flags, device_span<uint64_t const> const row_offsets, device_span<column_type_histogram> d_column_data) { auto const raw_csv = csv_text.data(); // ThreadIds range per block, so also need the blockId // This is entry into the fields; threadId is an element within `num_records` long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x); long const rec_id_next = rec_id + 1; // we can have more threads than data, make sure we are not past the end of // the data if (rec_id_next >= row_offsets.size()) { return; } auto field_start = raw_csv + row_offsets[rec_id]; auto const row_end = raw_csv + row_offsets[rec_id_next]; auto next_field = field_start; int col = 0; int actual_col = 0; // Going through all the columns of a given record while (col < column_flags.size() && field_start <= row_end) { auto next_delimiter = cudf::io::gpu::seek_field_end(field_start, row_end, opts); // Checking if this is a column that the user wants --- user can filter columns if (column_flags[col] & column_parse::enabled) { // points to last character in the field auto const field_len = static_cast<size_t>(next_delimiter - field_start); if (serialized_trie_contains(opts.trie_na, {field_start, field_len})) { atomicAdd(&d_column_data[actual_col].null_count, 1); } else if (serialized_trie_contains(opts.trie_true, {field_start, field_len}) || serialized_trie_contains(opts.trie_false, {field_start, field_len})) { atomicAdd(&d_column_data[actual_col].bool_count, 1); } else if (cudf::io::is_infinity(field_start, next_delimiter)) { atomicAdd(&d_column_data[actual_col].float_count, 1); } else { long count_number = 0; long count_decimal = 0; long count_thousands = 0; long count_slash = 0; long count_dash = 0; long count_plus = 0; long count_colon = 0; long count_string = 0; long count_exponent = 0; // Modify field_start & end to ignore whitespace and quotechars // This could possibly result in additional empty fields auto const trimmed_field_range = trim_whitespaces_quotes(field_start, next_delimiter); auto const trimmed_field_len = trimmed_field_range.second - trimmed_field_range.first; for (auto cur = trimmed_field_range.first; cur < trimmed_field_range.second; ++cur) { if (is_digit(*cur)) { count_number++; continue; } if (*cur == opts.decimal) { count_decimal++; continue; } if (*cur == opts.thousands) { count_thousands++; continue; } // Looking for unique characters that will help identify column types. switch (*cur) { case '-': count_dash++; break; case '+': count_plus++; break; case '/': count_slash++; break; case ':': count_colon++; break; case 'e': case 'E': if (cur > trimmed_field_range.first && cur < trimmed_field_range.second - 1) count_exponent++; break; default: count_string++; break; } } // Integers have to have the length of the string // Off by one if they start with a minus sign auto const int_req_number_cnt = trimmed_field_len - count_thousands - ((*trimmed_field_range.first == '-' || *trimmed_field_range.first == '+') && trimmed_field_len > 1); if (column_flags[col] & column_parse::as_datetime) { // PANDAS uses `object` dtype if the date is unparseable if (is_datetime(count_string, count_decimal, count_colon, count_dash, count_slash)) { atomicAdd(&d_column_data[actual_col].datetime_count, 1); } else { atomicAdd(&d_column_data[actual_col].string_count, 1); } } else if (count_number == int_req_number_cnt) { auto const is_negative = (*trimmed_field_range.first == '-'); auto const data_begin = trimmed_field_range.first + (is_negative || (*trimmed_field_range.first == '+')); cudf::size_type *ptr = cudf::io::gpu::infer_integral_field_counter( data_begin, data_begin + count_number, is_negative, d_column_data[actual_col]); atomicAdd(ptr, 1); } else if (is_floatingpoint(trimmed_field_len, count_number, count_decimal, count_thousands, count_dash + count_plus, count_exponent)) { atomicAdd(&d_column_data[actual_col].float_count, 1); } else { atomicAdd(&d_column_data[actual_col].string_count, 1); } } actual_col++; } next_field = next_delimiter + 1; field_start = next_field; col++; } } template <typename T, int base> __inline__ __device__ T decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::io::parse_numeric<T, base>(begin, end, opts); } template <typename T> __inline__ __device__ T decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::io::parse_numeric<T>(begin, end, opts); } template <> __inline__ __device__ cudf::timestamp_D decode_value(char const *begin, char const *end, parse_options_view const &opts) { return timestamp_D{cudf::duration_D{to_date(begin, end, opts.dayfirst)}}; } template <> __inline__ __device__ cudf::timestamp_s decode_value(char const *begin, char const *end, parse_options_view const &opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_s{cudf::duration_s{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_s{cudf::duration_s{milli / 1000}}; } } template <> __inline__ __device__ cudf::timestamp_ms decode_value(char const *begin, char const *end, parse_options_view const &opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_ms{cudf::duration_ms{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_ms{cudf::duration_ms{milli}}; } } template <> __inline__ __device__ cudf::timestamp_us decode_value(char const *begin, char const *end, parse_options_view const &opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_us{cudf::duration_us{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_us{cudf::duration_us{milli * 1000}}; } } template <> __inline__ __device__ cudf::timestamp_ns decode_value(char const *begin, char const *end, parse_options_view const &opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_ns{cudf::duration_ns{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_ns{cudf::duration_ns{milli * 1000000}}; } } #ifndef DURATION_DECODE_VALUE #define DURATION_DECODE_VALUE(Type) \ template <> \ __inline__ __device__ Type decode_value( \ const char *begin, const char *end, parse_options_view const &opts) \ { \ return Type{to_time_delta<Type>(begin, end)}; \ } #endif DURATION_DECODE_VALUE(duration_D) DURATION_DECODE_VALUE(duration_s) DURATION_DECODE_VALUE(duration_ms) DURATION_DECODE_VALUE(duration_us) DURATION_DECODE_VALUE(duration_ns) // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::string_view decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::string_view{}; } // The purpose of this is merely to allow compilation ONLY template <> __inline__ __device__ cudf::dictionary32 decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::dictionary32{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::list_view decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::list_view{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ numeric::decimal32 decode_value(char const *begin, char const *end, parse_options_view const &opts) { return numeric::decimal32{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ numeric::decimal64 decode_value(char const *begin, char const *end, parse_options_view const &opts) { return numeric::decimal64{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::struct_view decode_value(char const *begin, char const *end, parse_options_view const &opts) { return cudf::struct_view{}; } /** * @brief Functor for converting CSV raw data to typed value. */ struct decode_op { /** * @brief Dispatch for numeric types whose values can be convertible to * 0 or 1 to represent boolean false/true, based upon checking against a * true/false values list. * * @return bool Whether the parsed value is valid. */ template <typename T, typename std::enable_if_t<std::is_integral<T>::value and !std::is_same<T, bool>::value> * = nullptr> __host__ __device__ __forceinline__ bool operator()(void *out_buffer, size_t row, char const *begin, char const *end, parse_options_view const &opts, column_parse::flags flags) { static_cast<T *>(out_buffer)[row] = [&flags, &opts, begin, end]() -> T { // Check for user-specified true/false values auto const field_len = static_cast<size_t>(end - begin); if (serialized_trie_contains(opts.trie_true, {begin, field_len})) { return 1; } if (serialized_trie_contains(opts.trie_false, {begin, field_len})) { return 0; } return flags & column_parse::as_hexadecimal ? decode_value<T, 16>(begin, end, opts) : decode_value<T>(begin, end, opts); }(); return true; } /** * @brief Dispatch for boolean type types. */ template <typename T, typename std::enable_if_t<std::is_same<T, bool>::value> * = nullptr> __host__ __device__ __forceinline__ bool operator()(void *out_buffer, size_t row, char const *begin, char const *end, parse_options_view const &opts, column_parse::flags flags) { static_cast<T *>(out_buffer)[row] = [&opts, begin, end]() { // Check for user-specified true/false values auto const field_len = static_cast<size_t>(end - begin); if (serialized_trie_contains(opts.trie_true, {begin, field_len})) { return true; } if (serialized_trie_contains(opts.trie_false, {begin, field_len})) { return false; } return decode_value<T>(begin, end, opts); }(); return true; } /** * @brief Dispatch for floating points, which are set to NaN if the input * is not valid. In such case, the validity mask is set to zero too. */ template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value> * = nullptr> __host__ __device__ __forceinline__ bool operator()(void *out_buffer, size_t row, char const *begin, char const *end, parse_options_view const &opts, column_parse::flags flags) { T const value = decode_value<T>(begin, end, opts); static_cast<T *>(out_buffer)[row] = value; return !std::isnan(value); } /** * @brief Dispatch for all other types. */ template <typename T, typename std::enable_if_t<!std::is_integral<T>::value and !std::is_floating_point<T>::value> * = nullptr> __host__ __device__ __forceinline__ bool operator()(void *out_buffer, size_t row, char const *begin, char const *end, parse_options_view const &opts, column_parse::flags flags) { static_cast<T *>(out_buffer)[row] = decode_value<T>(begin, end, opts); return true; } }; /** * @brief CUDA kernel that parses and converts CSV data into cuDF column data. * * Data is processed one record at a time * * @param[in] options A set of parsing options * @param[in] data The entire CSV data to read * @param[in] column_flags Per-column parsing behavior flags * @param[in] row_offsets The start the CSV data of interest * @param[in] dtypes The data type of the column * @param[out] columns The output column data * @param[out] valids The bitmaps indicating whether column fields are valid */ __global__ void __launch_bounds__(csvparse_block_dim) convert_csv_to_cudf(cudf::io::parse_options_view options, device_span<char const> data, device_span<column_parse::flags const> column_flags, device_span<uint64_t const> row_offsets, device_span<cudf::data_type const> dtypes, device_span<void *const> columns, device_span<cudf::bitmask_type *const> valids) { auto const raw_csv = data.data(); // thread IDs range per block, so also need the block id. // this is entry into the field array - tid is an elements within the num_entries array long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x); long const rec_id_next = rec_id + 1; // we can have more threads than data, make sure we are not past the end of // the data if (rec_id_next >= row_offsets.size()) return; auto field_start = raw_csv + row_offsets[rec_id]; auto const row_end = raw_csv + row_offsets[rec_id_next]; auto next_field = field_start; int col = 0; int actual_col = 0; while (col < column_flags.size() && field_start <= row_end) { auto next_delimiter = cudf::io::gpu::seek_field_end(next_field, row_end, options); if (column_flags[col] & column_parse::enabled) { // check if the entire field is a NaN string - consistent with pandas auto const is_valid = !serialized_trie_contains( options.trie_na, {field_start, static_cast<size_t>(next_delimiter - field_start)}); // Modify field_start & end to ignore whitespace and quotechars auto field_end = next_delimiter; if (is_valid && dtypes[actual_col].id() != cudf::type_id::STRING) { auto const trimmed_field = trim_whitespaces_quotes(field_start, field_end, options.quotechar); field_start = trimmed_field.first; field_end = trimmed_field.second; } if (is_valid) { // Type dispatcher does not handle STRING if (dtypes[actual_col].id() == cudf::type_id::STRING) { auto end = next_delimiter; if (options.keepquotes == false) { if ((*field_start == options.quotechar) && (*(end - 1) == options.quotechar)) { ++field_start; --end; } } auto str_list = static_cast<std::pair<const char *, size_t> *>(columns[actual_col]); str_list[rec_id].first = field_start; str_list[rec_id].second = end - field_start; } else { if (cudf::type_dispatcher(dtypes[actual_col], decode_op{}, columns[actual_col], rec_id, field_start, field_end, options, column_flags[col])) { // set the valid bitmap - all bits were set to 0 to start set_bit(valids[actual_col], rec_id); } } } else if (dtypes[actual_col].id() == cudf::type_id::STRING) { auto str_list = static_cast<std::pair<const char *, size_t> *>(columns[actual_col]); str_list[rec_id].first = nullptr; str_list[rec_id].second = 0; } ++actual_col; } next_field = next_delimiter + 1; field_start = next_field; ++col; } } /* * @brief Merge two packed row contexts (each corresponding to a block of characters) * and return the packed row context corresponding to the merged character block */ inline __device__ packed_rowctx_t merge_row_contexts(packed_rowctx_t first_ctx, packed_rowctx_t second_ctx) { uint32_t id0 = get_row_context(first_ctx, ROW_CTX_NONE) & 3; uint32_t id1 = get_row_context(first_ctx, ROW_CTX_QUOTE) & 3; uint32_t id2 = get_row_context(first_ctx, ROW_CTX_COMMENT) & 3; return (first_ctx & ~pack_row_contexts(3, 3, 3)) + pack_row_contexts(get_row_context(second_ctx, id0), get_row_context(second_ctx, id1), get_row_context(second_ctx, id2)); } /* * @brief Per-character context: * 1-bit count (0 or 1) per context in the lower 4 bits * 2-bit output context id per input context in bits 8..15 */ constexpr __device__ uint32_t make_char_context(uint32_t id0, uint32_t id1, uint32_t id2 = ROW_CTX_COMMENT, uint32_t c0 = 0, uint32_t c1 = 0, uint32_t c2 = 0) { return (id0 << 8) | (id1 << 10) | (id2 << 12) | (ROW_CTX_EOF << 14) | (c0) | (c1 << 1) | (c2 << 2); } /* * @brief Merge a 1-character context to keep track of bitmasks where new rows occur * Merges a single-character "block" row context at position pos with the current * block's row context (the current block contains 32-pos characters) * * @param ctx Current block context and new rows bitmaps * @param char_ctx state transitions associated with new character * @param pos Position within the current 32-character block * * NOTE: This is probably the most performance-critical piece of the row gathering kernel. * The char_ctx value should be created via make_char_context, and its value should * have been evaluated at compile-time. */ inline __device__ void merge_char_context(uint4 &ctx, uint32_t char_ctx, uint32_t pos) { uint32_t id0 = (ctx.w >> 0) & 3; uint32_t id1 = (ctx.w >> 2) & 3; uint32_t id2 = (ctx.w >> 4) & 3; // Set the newrow bit in the bitmap at the corresponding position ctx.x |= ((char_ctx >> id0) & 1) << pos; ctx.y |= ((char_ctx >> id1) & 1) << pos; ctx.z |= ((char_ctx >> id2) & 1) << pos; // Update the output context ids ctx.w = ((char_ctx >> (8 + id0 * 2)) & 0x03) | ((char_ctx >> (6 + id1 * 2)) & 0x0c) | ((char_ctx >> (4 + id2 * 2)) & 0x30) | (ROW_CTX_EOF << 6); } /* * Convert the context-with-row-bitmaps version to a packed row context */ inline __device__ packed_rowctx_t pack_rowmaps(uint4 ctx_map) { return pack_row_contexts(make_row_context(__popc(ctx_map.x), (ctx_map.w >> 0) & 3), make_row_context(__popc(ctx_map.y), (ctx_map.w >> 2) & 3), make_row_context(__popc(ctx_map.z), (ctx_map.w >> 4) & 3)); } /* * Selects the row bitmap corresponding to the given parser state */ inline __device__ uint32_t select_rowmap(uint4 ctx_map, uint32_t ctxid) { return (ctxid == ROW_CTX_NONE) ? ctx_map.x : (ctxid == ROW_CTX_QUOTE) ? ctx_map.y : (ctxid == ROW_CTX_COMMENT) ? ctx_map.z : 0; } /** * @brief Single pair-wise 512-wide row context merge transform * * Merge row context blocks and record the merge operation in a context * tree so that the transform is reversible. * The tree is organized such that the left and right children of node n * are located at indices n*2 and n*2+1, the root node starting at index 1 * * @tparam lanemask mask to specify source of packed row context * @tparam tmask mask to specify principle thread for merging row context * @tparam base start location for writing into packed row context tree * @tparam level_scale level of the node in the tree * @param ctxtree[out] packed row context tree * @param ctxb[in] packed row context for the current character block * @param t thread id (leaf node id) */ template <uint32_t lanemask, uint32_t tmask, uint32_t base, uint32_t level_scale> inline __device__ void ctx_merge(uint64_t *ctxtree, packed_rowctx_t *ctxb, uint32_t t) { uint64_t tmp = shuffle_xor(*ctxb, lanemask); if (!(t & tmask)) { *ctxb = merge_row_contexts(*ctxb, tmp); ctxtree[base + (t >> level_scale)] = *ctxb; } } /** * @brief Single 512-wide row context inverse merge transform * * Walks the context tree starting from a root node * * @tparam rmask Mask to specify which threads write input row context * @param[in] base Start read location of the merge transform tree * @param[in] ctxtree Merge transform tree * @param[in] ctx Input context * @param[in] brow4 output row in block *4 * @param[in] t thread id (leaf node id) */ template <uint32_t rmask> inline __device__ void ctx_unmerge( uint32_t base, uint64_t *ctxtree, uint32_t *ctx, uint32_t *brow4, uint32_t t) { rowctx32_t ctxb_left, ctxb_right, ctxb_sum; ctxb_sum = get_row_context(ctxtree[base], *ctx); ctxb_left = get_row_context(ctxtree[(base)*2 + 0], *ctx); ctxb_right = get_row_context(ctxtree[(base)*2 + 1], ctxb_left & 3); if (t & (rmask)) { *brow4 += (ctxb_sum & ~3) - (ctxb_right & ~3); *ctx = ctxb_left & 3; } } /* * @brief 512-wide row context merge transform * * Repeatedly merge row context blocks, keeping track of each merge operation * in a context tree so that the transform is reversible * The tree is organized such that the left and right children of node n * are located at indices n*2 and n*2+1, the root node starting at index 1 * * Each node contains the counts and output contexts corresponding to the * possible input contexts. * Each parent node's count is obtained by adding the corresponding counts * from the left child node with the right child node's count selected from * the left child node's output context: * parent.count[k] = left.count[k] + right.count[left.outctx[k]] * parent.outctx[k] = right.outctx[left.outctx[k]] * * @param ctxtree[out] packed row context tree * @param ctxb[in] packed row context for the current character block * @param t thread id (leaf node id) */ static inline __device__ void rowctx_merge_transform(uint64_t ctxtree[1024], packed_rowctx_t ctxb, uint32_t t) { ctxtree[512 + t] = ctxb; ctx_merge<1, 0x1, 256, 1>(ctxtree, &ctxb, t); ctx_merge<2, 0x3, 128, 2>(ctxtree, &ctxb, t); ctx_merge<4, 0x7, 64, 3>(ctxtree, &ctxb, t); ctx_merge<8, 0xf, 32, 4>(ctxtree, &ctxb, t); __syncthreads(); if (t < 32) { ctxb = ctxtree[32 + t]; ctx_merge<1, 0x1, 16, 1>(ctxtree, &ctxb, t); ctx_merge<2, 0x3, 8, 2>(ctxtree, &ctxb, t); ctx_merge<4, 0x7, 4, 3>(ctxtree, &ctxb, t); ctx_merge<8, 0xf, 2, 4>(ctxtree, &ctxb, t); // Final stage uint64_t tmp = shuffle_xor(ctxb, 16); if (t == 0) { ctxtree[1] = merge_row_contexts(ctxb, tmp); } } } /* * @brief 512-wide row context inverse merge transform * * Walks the context tree starting from the root node (index 1) using * the starting context in node index 0. * The return value is the starting row and input context for the given leaf node * * @param[in] ctxtree Merge transform tree * @param[in] t thread id (leaf node id) * * @return Final row context and count (row_position*4 + context_id format) */ static inline __device__ rowctx32_t rowctx_inverse_merge_transform(uint64_t ctxtree[1024], uint32_t t) { uint32_t ctx = ctxtree[0] & 3; // Starting input context rowctx32_t brow4 = 0; // output row in block *4 ctx_unmerge<256>(1, ctxtree, &ctx, &brow4, t); ctx_unmerge<128>(2 + (t >> 8), ctxtree, &ctx, &brow4, t); ctx_unmerge<64>(4 + (t >> 7), ctxtree, &ctx, &brow4, t); ctx_unmerge<32>(8 + (t >> 6), ctxtree, &ctx, &brow4, t); ctx_unmerge<16>(16 + (t >> 5), ctxtree, &ctx, &brow4, t); ctx_unmerge<8>(32 + (t >> 4), ctxtree, &ctx, &brow4, t); ctx_unmerge<4>(64 + (t >> 3), ctxtree, &ctx, &brow4, t); ctx_unmerge<2>(128 + (t >> 2), ctxtree, &ctx, &brow4, t); ctx_unmerge<1>(256 + (t >> 1), ctxtree, &ctx, &brow4, t); return brow4 + ctx; } /** * @brief Gather row offsets from CSV character data split into 16KB chunks * * This is done in two phases: the first phase returns the possible row counts * per 16K character block for each possible parsing context at the start of the block, * along with the resulting parsing context at the end of the block. * The caller can then compute the actual parsing context at the beginning of each * individual block and total row count. * The second phase outputs the location of each row in the block, using the parsing * context and initial row counter accumulated from the results of the previous phase. * Row parsing context will be updated after phase 2 such that the value contains * the number of rows starting at byte_range_end or beyond. * * @param row_ctx Row parsing context (output of phase 1 or input to phase 2) * @param offsets_out Row offsets (nullptr for phase1, non-null indicates phase 2) * @param data Base pointer of character data (all row offsets are relative to this) * @param chunk_size Total number of characters to parse * @param parse_pos Current parsing position in the file * @param start_offset Position of the start of the character buffer in the file * @param data_size CSV file size * @param byte_range_start Ignore rows starting before this position in the file * @param byte_range_end In phase 2, store the number of rows beyond range in row_ctx * @param skip_rows Number of rows to skip (ignored in phase 1) * @param terminator Line terminator character * @param delimiter Column delimiter character * @param quotechar Quote character * @param escapechar Delimiter escape character * @param commentchar Comment line character (skip rows starting with this character) */ __global__ void __launch_bounds__(rowofs_block_dim) gather_row_offsets_gpu(uint64_t *row_ctx, device_span<uint64_t> offsets_out, device_span<char const> const data, size_t chunk_size, size_t parse_pos, size_t start_offset, size_t data_size, size_t byte_range_start, size_t byte_range_end, size_t skip_rows, int terminator, int delimiter, int quotechar, int escapechar, int commentchar) { auto start = data.begin(); using block_reduce = typename cub::BlockReduce<uint32_t, rowofs_block_dim>; __shared__ union { typename block_reduce::TempStorage bk_storage; __align__(8) uint64_t ctxtree[rowofs_block_dim * 2]; } temp_storage; const char *end = start + (min(parse_pos + chunk_size, data_size) - start_offset); uint32_t t = threadIdx.x; size_t block_pos = (parse_pos - start_offset) + blockIdx.x * static_cast<size_t>(rowofs_block_bytes) + t * 32; const char *cur = start + block_pos; // Initial state is neutral context (no state transitions), zero rows uint4 ctx_map = { .x = 0, .y = 0, .z = 0, .w = (ROW_CTX_NONE << 0) | (ROW_CTX_QUOTE << 2) | (ROW_CTX_COMMENT << 4) | (ROW_CTX_EOF << 6)}; int c, c_prev = (cur > start && cur <= end) ? cur[-1] : terminator; // Loop through all 32 bytes and keep a bitmask of row starts for each possible input context for (uint32_t pos = 0; pos < 32; pos++, cur++, c_prev = c) { uint32_t ctx; if (cur < end) { c = cur[0]; if (c_prev == terminator) { if (c == commentchar) { // Start of a new comment row ctx = make_char_context(ROW_CTX_COMMENT, ROW_CTX_QUOTE, ROW_CTX_COMMENT, 1, 0, 1); } else if (c == quotechar) { // Quoted string on newrow, or quoted string ending in terminator ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE, ROW_CTX_QUOTE, 1, 0, 1); } else { // Start of a new row unless within a quote ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_NONE, 1, 0, 1); } } else if (c == quotechar) { if (c_prev == delimiter || c_prev == quotechar) { // Quoted string after delimiter, quoted string ending in delimiter, or double-quote ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE); } else { // Closing or ignored quote ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_NONE); } } else { // Neutral character ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE); } } else { const char *data_end = start + data_size - start_offset; if (cur <= end && cur == data_end) { // Add a newline at data end (need the extra row offset to infer length of previous row) ctx = make_char_context(ROW_CTX_EOF, ROW_CTX_EOF, ROW_CTX_EOF, 1, 1, 1); } else { // Pass-through context (beyond chunk_size or data_end) ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_COMMENT); } } // Merge with current context, keeping track of where new rows occur merge_char_context(ctx_map, ctx, pos); } // Eliminate rows that start before byte_range_start if (start_offset + block_pos < byte_range_start) { uint32_t dist_minus1 = min(byte_range_start - (start_offset + block_pos) - 1, UINT64_C(31)); uint32_t mask = 0xfffffffe << dist_minus1; ctx_map.x &= mask; ctx_map.y &= mask; ctx_map.z &= mask; } // Convert the long-form {rowmap,outctx}[inctx] version into packed version // {rowcount,ouctx}[inctx], then merge the row contexts of the 32-character blocks into // a single 16K-character block context rowctx_merge_transform(temp_storage.ctxtree, pack_rowmaps(ctx_map), t); // If this is the second phase, get the block's initial parser state and row counter if (offsets_out.data()) { if (t == 0) { temp_storage.ctxtree[0] = row_ctx[blockIdx.x]; } __syncthreads(); // Walk back the transform tree with the known initial parser state rowctx32_t ctx = rowctx_inverse_merge_transform(temp_storage.ctxtree, t); uint64_t row = (temp_storage.ctxtree[0] >> 2) + (ctx >> 2); uint32_t rows_out_of_range = 0; uint32_t rowmap = select_rowmap(ctx_map, ctx & 3); // Output row positions while (rowmap != 0) { uint32_t pos = __ffs(rowmap); block_pos += pos; if (row >= skip_rows && row - skip_rows < offsets_out.size()) { // Output byte offsets are relative to the base of the input buffer offsets_out[row - skip_rows] = block_pos - 1; rows_out_of_range += (start_offset + block_pos - 1 >= byte_range_end); } row++; rowmap >>= pos; } __syncthreads(); // Return the number of rows out of range rows_out_of_range = block_reduce(temp_storage.bk_storage).Sum(rows_out_of_range); if (t == 0) { row_ctx[blockIdx.x] = rows_out_of_range; } } else { // Just store the row counts and output contexts if (t == 0) { row_ctx[blockIdx.x] = temp_storage.ctxtree[1]; } } } size_t __host__ count_blank_rows(const cudf::io::parse_options_view &opts, device_span<char const> data, device_span<uint64_t const> row_offsets, rmm::cuda_stream_view stream) { const auto newline = opts.skipblanklines ? opts.terminator : opts.comment; const auto comment = opts.comment != '\0' ? opts.comment : newline; const auto carriage = (opts.skipblanklines && opts.terminator == '\n') ? '\r' : comment; return thrust::count_if( rmm::exec_policy(stream), row_offsets.begin(), row_offsets.end(), [data = data, newline, comment, carriage] __device__(const uint64_t pos) { return ((pos != data.size()) && (data[pos] == newline || data[pos] == comment || data[pos] == carriage)); }); } device_span<uint64_t> __host__ remove_blank_rows(cudf::io::parse_options_view const &options, device_span<char const> data, device_span<uint64_t> row_offsets, rmm::cuda_stream_view stream) { size_t d_size = data.size(); const auto newline = options.skipblanklines ? options.terminator : options.comment; const auto comment = options.comment != '\0' ? options.comment : newline; const auto carriage = (options.skipblanklines && options.terminator == '\n') ? '\r' : comment; auto new_end = thrust::remove_if( rmm::exec_policy(stream), row_offsets.begin(), row_offsets.end(), [data = data, d_size, newline, comment, carriage] __device__(const uint64_t pos) { return ((pos != d_size) && (data[pos] == newline || data[pos] == comment || data[pos] == carriage)); }); return row_offsets.subspan(0, new_end - row_offsets.begin()); } std::vector<column_type_histogram> detect_column_types( cudf::io::parse_options_view const &options, device_span<char const> const data, device_span<column_parse::flags const> const column_flags, device_span<uint64_t const> const row_starts, size_t const num_active_columns, rmm::cuda_stream_view stream) { // Calculate actual block count to use based on records count const int block_size = csvparse_block_dim; const int grid_size = (row_starts.size() + block_size - 1) / block_size; auto d_stats = detail::make_zeroed_device_uvector_async<column_type_histogram>(num_active_columns, stream); data_type_detection<<<grid_size, block_size, 0, stream.value()>>>( options, data, column_flags, row_starts, d_stats); return detail::make_std_vector_sync(d_stats, stream); } void __host__ decode_row_column_data(cudf::io::parse_options_view const &options, device_span<char const> data, device_span<column_parse::flags const> column_flags, device_span<uint64_t const> row_offsets, device_span<cudf::data_type const> dtypes, device_span<void *const> columns, device_span<cudf::bitmask_type *const> valids, rmm::cuda_stream_view stream) { // Calculate actual block count to use based on records count auto const block_size = csvparse_block_dim; auto const num_rows = row_offsets.size() - 1; auto const grid_size = (num_rows + block_size - 1) / block_size; convert_csv_to_cudf<<<grid_size, block_size, 0, stream.value()>>>( options, data, column_flags, row_offsets, dtypes, columns, valids); } uint32_t __host__ gather_row_offsets(const parse_options_view &options, uint64_t *row_ctx, device_span<uint64_t> const offsets_out, device_span<char const> const data, size_t chunk_size, size_t parse_pos, size_t start_offset, size_t data_size, size_t byte_range_start, size_t byte_range_end, size_t skip_rows, rmm::cuda_stream_view stream) { uint32_t dim_grid = 1 + (chunk_size / rowofs_block_bytes); gather_row_offsets_gpu<<<dim_grid, rowofs_block_dim, 0, stream.value()>>>( row_ctx, offsets_out, data, chunk_size, parse_pos, start_offset, data_size, byte_range_start, byte_range_end, skip_rows, options.terminator, options.delimiter, (options.quotechar) ? options.quotechar : 0x100, /*(options.escapechar) ? options.escapechar :*/ 0x100, (options.comment) ? options.comment : 0x100); return dim_grid; } } // namespace gpu } // namespace csv } // namespace io } // namespace cudf
3f35e61bd4a684c83ffce39d69b4444e49fe4ac1.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <vector> #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include "hip/hip_runtime.h" #include "caffe/layers/region_target_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ExtractBoundingBox(int total, int num_anchor, int height, int width, Dtype* bbs_data, const Dtype* blob_xy_data, const Dtype* blob_wh_data, const Dtype* biases) { CUDA_KERNEL_LOOP(index, total) { int b = index / (num_anchor * height * width); int left = index % (num_anchor * height * width); int n = left / (height * width); left = left % (height * width); int j = left / width; int i = left % width; Dtype* curr_bbs_data = bbs_data + index * 4; int offset_double_bnji = b * (2 * num_anchor) * height * width + n * height * width + j * width + i; int offset_double_bnji_next = offset_double_bnji + num_anchor * height * width; *(curr_bbs_data + 0) = (*(blob_xy_data + offset_double_bnji) + i) / width; *(curr_bbs_data + 1) = (*(blob_xy_data + offset_double_bnji_next) + j) / height; double w = *(blob_wh_data + offset_double_bnji); double h = *(blob_wh_data + offset_double_bnji_next); *(curr_bbs_data + 2) = exp(w) * biases[2 * n] / width; *(curr_bbs_data + 3) = exp(h) * biases[2 * n + 1] / height; } } template <typename Dtype> __global__ void CalculateIOU(int total, Dtype* iou_data, const Dtype* bbs_data, const Dtype* truth_data, int num_anchor, int height, int width, int max_gt, Dtype positive_thresh, const Dtype* blob_obj_data, Dtype* target_obj_noobj_data) { CUDA_KERNEL_LOOP(index, total) { int b = index / (num_anchor * height * width * max_gt); int left = index % (num_anchor * height * width * max_gt); int n = left / (height * width * max_gt); left = left % (height * width * max_gt); int j = left / (width * max_gt); left = left % (width * max_gt); int i = left / max_gt; int t = left % max_gt; Dtype tx = *(truth_data + b * 5 * max_gt + t * 5 + 0); Dtype ty = *(truth_data + b * 5 * max_gt + t * 5 + 1); Dtype tw = *(truth_data + b * 5 * max_gt + t * 5 + 2); Dtype th = *(truth_data + b * 5 * max_gt + t * 5 + 3); Dtype curr_iou = 0; if (tx) { int curr_index = (b * num_anchor * height * width + n * height * width + j * width + i) * 4; Dtype px = *(bbs_data + curr_index + 0); Dtype py = *(bbs_data + curr_index + 1); Dtype pw = *(bbs_data + curr_index + 2); Dtype ph = *(bbs_data + curr_index + 3); curr_iou = TBoxIou(px, py, pw, ph, tx, ty, tw, th); // if the iou is large enough, let's not penalize the objectiveness if (curr_iou > positive_thresh) { // multiple threads might write this address at the same time, but // at least one will succeeds. It is safe to do this. *(target_obj_noobj_data + index / max_gt) = *(blob_obj_data + index / max_gt); } } *(iou_data + index) = curr_iou; } } template <typename Dtype> __global__ void GroundTruthTarget(int total, int max_gt, const Dtype* truth_data, int num_anchor, int height, int width, const Dtype* biases, int* gt_target_data) { CUDA_KERNEL_LOOP(index, total) { int b = index / max_gt; int t = index % max_gt; Dtype tx = *(truth_data + b * max_gt * 5 + 5 * t + 0); Dtype ty = *(truth_data + b * max_gt * 5 + 5 * t + 1); int target_i = -1; int target_j = -1; int target_n = -1; if (tx > 0 && ty > 0 && tx < 1 && ty < 1) { target_i = tx * width; target_j = ty * height; Dtype tw = *(truth_data + b * max_gt * 5 + 5 * t + 2); Dtype th = *(truth_data + b * max_gt * 5 + 5 * t + 3); Dtype max_iou = -1; target_n = -1; for (int n = 0; n < num_anchor; n++) { Dtype curr_iou = TBoxIou<Dtype>(0, 0, tw, th, 0, 0, biases[2 * n] / width, biases[2 * n + 1] / height); if (curr_iou > max_iou) { max_iou = curr_iou; target_n = n; } } } *(gt_target_data + b * max_gt * 3 + t * 3 + 0) = target_i; *(gt_target_data + b * max_gt * 3 + t * 3 + 1) = target_j; *(gt_target_data + b * max_gt * 3 + t * 3 + 2) = target_n; } } template <typename Dtype> __global__ void RemoveDuplicateTarget(int total, int* gt_target_data, int max_gt) { CUDA_KERNEL_LOOP(index, total) { int b = index / (max_gt * max_gt); int left_index = index % (max_gt * max_gt); int left_t = left_index / max_gt; int right_t = left_index % max_gt; if (left_t == right_t) { continue; } int left_target_i = *(gt_target_data + b * max_gt * 3 + left_t * 3 + 0); int left_target_j = *(gt_target_data + b * max_gt * 3 + left_t * 3 + 1); int left_target_n = *(gt_target_data + b * max_gt * 3 + left_t * 3 + 2); if (left_target_i < 0) { continue; } int right_target_i = *(gt_target_data + b * max_gt * 3 + right_t * 3 + 0); int right_target_j = *(gt_target_data + b * max_gt * 3 + right_t * 3 + 1); int right_target_n = *(gt_target_data + b * max_gt * 3 + right_t * 3 + 2); if (right_target_i < 0) { continue; } if (left_target_i == right_target_i && left_target_j == right_target_j && left_target_n == right_target_n) { if (left_t < right_t) { *(gt_target_data + b * max_gt * 3 + left_t * 3 + 0) = -1; *(gt_target_data + b * max_gt * 3 + left_t * 3 + 1) = -1; *(gt_target_data + b * max_gt * 3 + left_t * 3 + 2) = -1; } else { *(gt_target_data + b * max_gt * 3 + right_t * 3 + 0) = -1; *(gt_target_data + b * max_gt * 3 + right_t * 3 + 1) = -1; *(gt_target_data + b * max_gt * 3 + right_t * 3 + 2) = -1; } } } } template <typename Dtype> __global__ void AlignGroudTruth(int total, const int* gt_target_data, int max_gt, const Dtype* truth_data, Dtype* target_xy_data, Dtype* target_wh_data, Dtype* target_xywh_weight_data, Dtype coord_scale, int num_anchor, int height, int width, bool rescore, Dtype* target_obj_obj_data, const Dtype* iou_data, Dtype* target_obj_noobj_data, Dtype* target_class_data, const Dtype* biases, const Dtype* blob_obj_data) { CUDA_KERNEL_LOOP(index, total) { int b = index / max_gt; int t = index % max_gt; int target_i = *(gt_target_data + b * max_gt * 3 + t * 3 + 0); int target_j = *(gt_target_data + b * max_gt * 3 + t * 3 + 1); int target_n = *(gt_target_data + b * max_gt * 3 + t * 3 + 2); if (target_i < 0) { continue; } int offset_bt = b * max_gt * 5 + 5 * t; Dtype tx = *(truth_data + offset_bt + 0); Dtype ty = *(truth_data + offset_bt + 1); Dtype tw = *(truth_data + offset_bt + 2); Dtype th = *(truth_data + offset_bt + 3); if (tw <= 0.00001 || th <= 0.00001) { // we explicitly ignore this zero-length bounding boxes // note: this layer is not designed to support image-level labels continue; } int offset_bnji = b * num_anchor * height * width + target_n * height * width + target_j * width + target_i; int offset_double_bnji = offset_bnji + b * num_anchor * height * width; int offset_double_bnji_next = offset_double_bnji + num_anchor * width * height; *(target_xy_data + offset_double_bnji) = tx * width - target_i; *(target_xy_data + offset_double_bnji_next) = ty * height - target_j; *(target_wh_data + offset_double_bnji) = log(tw * width / biases[2 * target_n]); *(target_wh_data + offset_double_bnji_next) = log(th * height / biases[2 * target_n + 1]); *(target_xywh_weight_data + offset_double_bnji) = coord_scale * (2 - tw * th); *(target_xywh_weight_data + offset_double_bnji_next) = coord_scale * (2 - tw * th); if (!rescore) { *(target_obj_obj_data + offset_bnji) = 1; } else { *(target_obj_obj_data + offset_bnji) = *(iou_data + offset_bnji * max_gt + t); } *(target_obj_noobj_data + offset_bnji) = *(blob_obj_data + offset_bnji); int cls = *(truth_data + offset_bt + 4); *(target_class_data + offset_bnji) = cls; } } template <typename Dtype> void RegionTargetLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { seen_images_ = this->blobs_[0]->mutable_cpu_data(); int blob_idx = 0; auto blob_xy = bottom[blob_idx++]; auto blob_wh = bottom[blob_idx++]; auto blob_obj = bottom[blob_idx++]; auto blob_truth = bottom[blob_idx++]; blob_idx = 0; auto target_xy = top[blob_idx++]; auto target_wh = top[blob_idx++]; auto target_xywh_weight = top[blob_idx++]; auto target_obj_obj = top[blob_idx++]; auto target_obj_noobj = top[blob_idx++]; auto target_class = top[blob_idx++]; auto blob_xy_data = blob_xy->gpu_data(); auto blob_wh_data = blob_wh->gpu_data(); auto blob_obj_data = blob_obj->gpu_data(); auto truth_data = blob_truth->gpu_data(); auto target_xy_data = target_xy->mutable_gpu_data(); auto target_wh_data = target_wh->mutable_gpu_data(); auto target_xywh_weight_data = target_xywh_weight->mutable_gpu_data(); auto target_obj_noobj_data = target_obj_noobj->mutable_gpu_data(); auto target_obj_obj_data = target_obj_obj->mutable_gpu_data(); auto target_class_data = target_class->mutable_gpu_data(); auto biases = this->biases_.gpu_data(); auto iou_data = this->ious_.mutable_gpu_data(); auto gt_target_data = this->gt_target_.mutable_gpu_data(); // if the iou is large enough, let's not penalize the objectiveness if ((*seen_images_) * Caffe::solver_count() < this->anchor_aligned_images_) { // if it is at the very begiining, let's align the output caffe_gpu_set(target_xy->count(), Dtype(0.5), target_xy_data); caffe_gpu_set(target_wh->count(), Dtype(0), target_wh_data); caffe_gpu_set(target_xywh_weight->count(), Dtype(0.01), target_xywh_weight_data); } else { // by default, we set the target of xywh as itself, that mean 0 penalty caffe_copy(target_xy->count(), blob_xy->gpu_data(), target_xy_data); caffe_copy(target_wh->count(), blob_wh->gpu_data(), target_wh_data); caffe_gpu_set(target_xywh_weight->count(), (Dtype)0., target_xywh_weight_data); } // for no-objectiveness, by default all of them be 0. we will zero-out the // position if it is 1) gt or 2) the predicted result is good enought caffe_gpu_set(target_obj_noobj->count(), (Dtype)0, target_obj_noobj_data); // For this one, we will only pernalize the position which should be // responsible for the gt caffe_copy(target_obj_obj->count(), blob_obj->gpu_data(), target_obj_obj_data); // by default, dont penalize the results caffe_gpu_set(target_class->count(), (Dtype)-1, target_class_data); caffe_gpu_set(this->ious_.count(), (Dtype)0, iou_data); int batches = blob_xy->num(); int height = blob_xy->height(); int width = blob_xy->width(); int num_anchor = blob_xy->channels() / 2; const int max_gt = blob_truth->channels() / 5; CHECK_EQ(blob_truth->height(), 1); CHECK_EQ(blob_truth->width(), 1); const Dtype* bbs_data = NULL; if (bottom.size() >= 5) { bbs_data = bottom[4]->gpu_data(); } else { // Calculate the bbs if it is not a bottom int total = batches * num_anchor * height * width; ExtractBoundingBox<Dtype> << <CAFFE_GET_BLOCKS(total), CAFFE_CUDA_NUM_THREADS >> >( total, num_anchor, height, width, this->bbs_.mutable_gpu_data(), blob_xy_data, blob_wh_data, biases); CUDA_POST_KERNEL_CHECK; bbs_data = this->bbs_.gpu_data(); } int total = batches * num_anchor * height * width * max_gt; hipLaunchKernelGGL(( CalculateIOU<Dtype>), dim3(CAFFE_GET_BLOCKS(total)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, total, iou_data, bbs_data, truth_data, num_anchor, height, width, max_gt, this->positive_thresh_, blob_obj->gpu_data(), target_obj_noobj_data); CUDA_POST_KERNEL_CHECK; total = batches * max_gt; hipLaunchKernelGGL(( GroundTruthTarget<Dtype>), dim3(CAFFE_GET_BLOCKS(total)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, total, max_gt, blob_truth->gpu_data(), num_anchor, height, width, biases, gt_target_data); CUDA_POST_KERNEL_CHECK; total = max_gt * max_gt * batches; hipLaunchKernelGGL(( RemoveDuplicateTarget<Dtype>), dim3(CAFFE_GET_BLOCKS(total)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, total, gt_target_data, max_gt); total = batches * max_gt; hipLaunchKernelGGL(( AlignGroudTruth<Dtype>), dim3(CAFFE_GET_BLOCKS(total)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, total, gt_target_data, max_gt, blob_truth->gpu_data(), target_xy_data, target_wh_data, target_xywh_weight_data, coord_scale_, num_anchor, height, width, this->rescore_, target_obj_obj_data, iou_data, target_obj_noobj_data, target_class_data, biases, blob_obj_data); CUDA_POST_KERNEL_CHECK; (*seen_images_) += batches; } template <typename Dtype> void RegionTargetLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { } INSTANTIATE_LAYER_GPU_FUNCS(RegionTargetLayer); } // namespace caffe
3f35e61bd4a684c83ffce39d69b4444e49fe4ac1.cu
#include <algorithm> #include <vector> #include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include "cuda.h" #include "caffe/layers/region_target_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ExtractBoundingBox(int total, int num_anchor, int height, int width, Dtype* bbs_data, const Dtype* blob_xy_data, const Dtype* blob_wh_data, const Dtype* biases) { CUDA_KERNEL_LOOP(index, total) { int b = index / (num_anchor * height * width); int left = index % (num_anchor * height * width); int n = left / (height * width); left = left % (height * width); int j = left / width; int i = left % width; Dtype* curr_bbs_data = bbs_data + index * 4; int offset_double_bnji = b * (2 * num_anchor) * height * width + n * height * width + j * width + i; int offset_double_bnji_next = offset_double_bnji + num_anchor * height * width; *(curr_bbs_data + 0) = (*(blob_xy_data + offset_double_bnji) + i) / width; *(curr_bbs_data + 1) = (*(blob_xy_data + offset_double_bnji_next) + j) / height; double w = *(blob_wh_data + offset_double_bnji); double h = *(blob_wh_data + offset_double_bnji_next); *(curr_bbs_data + 2) = exp(w) * biases[2 * n] / width; *(curr_bbs_data + 3) = exp(h) * biases[2 * n + 1] / height; } } template <typename Dtype> __global__ void CalculateIOU(int total, Dtype* iou_data, const Dtype* bbs_data, const Dtype* truth_data, int num_anchor, int height, int width, int max_gt, Dtype positive_thresh, const Dtype* blob_obj_data, Dtype* target_obj_noobj_data) { CUDA_KERNEL_LOOP(index, total) { int b = index / (num_anchor * height * width * max_gt); int left = index % (num_anchor * height * width * max_gt); int n = left / (height * width * max_gt); left = left % (height * width * max_gt); int j = left / (width * max_gt); left = left % (width * max_gt); int i = left / max_gt; int t = left % max_gt; Dtype tx = *(truth_data + b * 5 * max_gt + t * 5 + 0); Dtype ty = *(truth_data + b * 5 * max_gt + t * 5 + 1); Dtype tw = *(truth_data + b * 5 * max_gt + t * 5 + 2); Dtype th = *(truth_data + b * 5 * max_gt + t * 5 + 3); Dtype curr_iou = 0; if (tx) { int curr_index = (b * num_anchor * height * width + n * height * width + j * width + i) * 4; Dtype px = *(bbs_data + curr_index + 0); Dtype py = *(bbs_data + curr_index + 1); Dtype pw = *(bbs_data + curr_index + 2); Dtype ph = *(bbs_data + curr_index + 3); curr_iou = TBoxIou(px, py, pw, ph, tx, ty, tw, th); // if the iou is large enough, let's not penalize the objectiveness if (curr_iou > positive_thresh) { // multiple threads might write this address at the same time, but // at least one will succeeds. It is safe to do this. *(target_obj_noobj_data + index / max_gt) = *(blob_obj_data + index / max_gt); } } *(iou_data + index) = curr_iou; } } template <typename Dtype> __global__ void GroundTruthTarget(int total, int max_gt, const Dtype* truth_data, int num_anchor, int height, int width, const Dtype* biases, int* gt_target_data) { CUDA_KERNEL_LOOP(index, total) { int b = index / max_gt; int t = index % max_gt; Dtype tx = *(truth_data + b * max_gt * 5 + 5 * t + 0); Dtype ty = *(truth_data + b * max_gt * 5 + 5 * t + 1); int target_i = -1; int target_j = -1; int target_n = -1; if (tx > 0 && ty > 0 && tx < 1 && ty < 1) { target_i = tx * width; target_j = ty * height; Dtype tw = *(truth_data + b * max_gt * 5 + 5 * t + 2); Dtype th = *(truth_data + b * max_gt * 5 + 5 * t + 3); Dtype max_iou = -1; target_n = -1; for (int n = 0; n < num_anchor; n++) { Dtype curr_iou = TBoxIou<Dtype>(0, 0, tw, th, 0, 0, biases[2 * n] / width, biases[2 * n + 1] / height); if (curr_iou > max_iou) { max_iou = curr_iou; target_n = n; } } } *(gt_target_data + b * max_gt * 3 + t * 3 + 0) = target_i; *(gt_target_data + b * max_gt * 3 + t * 3 + 1) = target_j; *(gt_target_data + b * max_gt * 3 + t * 3 + 2) = target_n; } } template <typename Dtype> __global__ void RemoveDuplicateTarget(int total, int* gt_target_data, int max_gt) { CUDA_KERNEL_LOOP(index, total) { int b = index / (max_gt * max_gt); int left_index = index % (max_gt * max_gt); int left_t = left_index / max_gt; int right_t = left_index % max_gt; if (left_t == right_t) { continue; } int left_target_i = *(gt_target_data + b * max_gt * 3 + left_t * 3 + 0); int left_target_j = *(gt_target_data + b * max_gt * 3 + left_t * 3 + 1); int left_target_n = *(gt_target_data + b * max_gt * 3 + left_t * 3 + 2); if (left_target_i < 0) { continue; } int right_target_i = *(gt_target_data + b * max_gt * 3 + right_t * 3 + 0); int right_target_j = *(gt_target_data + b * max_gt * 3 + right_t * 3 + 1); int right_target_n = *(gt_target_data + b * max_gt * 3 + right_t * 3 + 2); if (right_target_i < 0) { continue; } if (left_target_i == right_target_i && left_target_j == right_target_j && left_target_n == right_target_n) { if (left_t < right_t) { *(gt_target_data + b * max_gt * 3 + left_t * 3 + 0) = -1; *(gt_target_data + b * max_gt * 3 + left_t * 3 + 1) = -1; *(gt_target_data + b * max_gt * 3 + left_t * 3 + 2) = -1; } else { *(gt_target_data + b * max_gt * 3 + right_t * 3 + 0) = -1; *(gt_target_data + b * max_gt * 3 + right_t * 3 + 1) = -1; *(gt_target_data + b * max_gt * 3 + right_t * 3 + 2) = -1; } } } } template <typename Dtype> __global__ void AlignGroudTruth(int total, const int* gt_target_data, int max_gt, const Dtype* truth_data, Dtype* target_xy_data, Dtype* target_wh_data, Dtype* target_xywh_weight_data, Dtype coord_scale, int num_anchor, int height, int width, bool rescore, Dtype* target_obj_obj_data, const Dtype* iou_data, Dtype* target_obj_noobj_data, Dtype* target_class_data, const Dtype* biases, const Dtype* blob_obj_data) { CUDA_KERNEL_LOOP(index, total) { int b = index / max_gt; int t = index % max_gt; int target_i = *(gt_target_data + b * max_gt * 3 + t * 3 + 0); int target_j = *(gt_target_data + b * max_gt * 3 + t * 3 + 1); int target_n = *(gt_target_data + b * max_gt * 3 + t * 3 + 2); if (target_i < 0) { continue; } int offset_bt = b * max_gt * 5 + 5 * t; Dtype tx = *(truth_data + offset_bt + 0); Dtype ty = *(truth_data + offset_bt + 1); Dtype tw = *(truth_data + offset_bt + 2); Dtype th = *(truth_data + offset_bt + 3); if (tw <= 0.00001 || th <= 0.00001) { // we explicitly ignore this zero-length bounding boxes // note: this layer is not designed to support image-level labels continue; } int offset_bnji = b * num_anchor * height * width + target_n * height * width + target_j * width + target_i; int offset_double_bnji = offset_bnji + b * num_anchor * height * width; int offset_double_bnji_next = offset_double_bnji + num_anchor * width * height; *(target_xy_data + offset_double_bnji) = tx * width - target_i; *(target_xy_data + offset_double_bnji_next) = ty * height - target_j; *(target_wh_data + offset_double_bnji) = log(tw * width / biases[2 * target_n]); *(target_wh_data + offset_double_bnji_next) = log(th * height / biases[2 * target_n + 1]); *(target_xywh_weight_data + offset_double_bnji) = coord_scale * (2 - tw * th); *(target_xywh_weight_data + offset_double_bnji_next) = coord_scale * (2 - tw * th); if (!rescore) { *(target_obj_obj_data + offset_bnji) = 1; } else { *(target_obj_obj_data + offset_bnji) = *(iou_data + offset_bnji * max_gt + t); } *(target_obj_noobj_data + offset_bnji) = *(blob_obj_data + offset_bnji); int cls = *(truth_data + offset_bt + 4); *(target_class_data + offset_bnji) = cls; } } template <typename Dtype> void RegionTargetLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { seen_images_ = this->blobs_[0]->mutable_cpu_data(); int blob_idx = 0; auto blob_xy = bottom[blob_idx++]; auto blob_wh = bottom[blob_idx++]; auto blob_obj = bottom[blob_idx++]; auto blob_truth = bottom[blob_idx++]; blob_idx = 0; auto target_xy = top[blob_idx++]; auto target_wh = top[blob_idx++]; auto target_xywh_weight = top[blob_idx++]; auto target_obj_obj = top[blob_idx++]; auto target_obj_noobj = top[blob_idx++]; auto target_class = top[blob_idx++]; auto blob_xy_data = blob_xy->gpu_data(); auto blob_wh_data = blob_wh->gpu_data(); auto blob_obj_data = blob_obj->gpu_data(); auto truth_data = blob_truth->gpu_data(); auto target_xy_data = target_xy->mutable_gpu_data(); auto target_wh_data = target_wh->mutable_gpu_data(); auto target_xywh_weight_data = target_xywh_weight->mutable_gpu_data(); auto target_obj_noobj_data = target_obj_noobj->mutable_gpu_data(); auto target_obj_obj_data = target_obj_obj->mutable_gpu_data(); auto target_class_data = target_class->mutable_gpu_data(); auto biases = this->biases_.gpu_data(); auto iou_data = this->ious_.mutable_gpu_data(); auto gt_target_data = this->gt_target_.mutable_gpu_data(); // if the iou is large enough, let's not penalize the objectiveness if ((*seen_images_) * Caffe::solver_count() < this->anchor_aligned_images_) { // if it is at the very begiining, let's align the output caffe_gpu_set(target_xy->count(), Dtype(0.5), target_xy_data); caffe_gpu_set(target_wh->count(), Dtype(0), target_wh_data); caffe_gpu_set(target_xywh_weight->count(), Dtype(0.01), target_xywh_weight_data); } else { // by default, we set the target of xywh as itself, that mean 0 penalty caffe_copy(target_xy->count(), blob_xy->gpu_data(), target_xy_data); caffe_copy(target_wh->count(), blob_wh->gpu_data(), target_wh_data); caffe_gpu_set(target_xywh_weight->count(), (Dtype)0., target_xywh_weight_data); } // for no-objectiveness, by default all of them be 0. we will zero-out the // position if it is 1) gt or 2) the predicted result is good enought caffe_gpu_set(target_obj_noobj->count(), (Dtype)0, target_obj_noobj_data); // For this one, we will only pernalize the position which should be // responsible for the gt caffe_copy(target_obj_obj->count(), blob_obj->gpu_data(), target_obj_obj_data); // by default, dont penalize the results caffe_gpu_set(target_class->count(), (Dtype)-1, target_class_data); caffe_gpu_set(this->ious_.count(), (Dtype)0, iou_data); int batches = blob_xy->num(); int height = blob_xy->height(); int width = blob_xy->width(); int num_anchor = blob_xy->channels() / 2; const int max_gt = blob_truth->channels() / 5; CHECK_EQ(blob_truth->height(), 1); CHECK_EQ(blob_truth->width(), 1); const Dtype* bbs_data = NULL; if (bottom.size() >= 5) { bbs_data = bottom[4]->gpu_data(); } else { // Calculate the bbs if it is not a bottom int total = batches * num_anchor * height * width; ExtractBoundingBox<Dtype> << <CAFFE_GET_BLOCKS(total), CAFFE_CUDA_NUM_THREADS >> >( total, num_anchor, height, width, this->bbs_.mutable_gpu_data(), blob_xy_data, blob_wh_data, biases); CUDA_POST_KERNEL_CHECK; bbs_data = this->bbs_.gpu_data(); } int total = batches * num_anchor * height * width * max_gt; CalculateIOU<Dtype><<<CAFFE_GET_BLOCKS(total), CAFFE_CUDA_NUM_THREADS>>>(total, iou_data, bbs_data, truth_data, num_anchor, height, width, max_gt, this->positive_thresh_, blob_obj->gpu_data(), target_obj_noobj_data); CUDA_POST_KERNEL_CHECK; total = batches * max_gt; GroundTruthTarget<Dtype><<<CAFFE_GET_BLOCKS(total), CAFFE_CUDA_NUM_THREADS>>>(total, max_gt, blob_truth->gpu_data(), num_anchor, height, width, biases, gt_target_data); CUDA_POST_KERNEL_CHECK; total = max_gt * max_gt * batches; RemoveDuplicateTarget<Dtype><<<CAFFE_GET_BLOCKS(total), CAFFE_CUDA_NUM_THREADS>>>(total, gt_target_data, max_gt); total = batches * max_gt; AlignGroudTruth<Dtype><<<CAFFE_GET_BLOCKS(total), CAFFE_CUDA_NUM_THREADS>>>(total, gt_target_data, max_gt, blob_truth->gpu_data(), target_xy_data, target_wh_data, target_xywh_weight_data, coord_scale_, num_anchor, height, width, this->rescore_, target_obj_obj_data, iou_data, target_obj_noobj_data, target_class_data, biases, blob_obj_data); CUDA_POST_KERNEL_CHECK; (*seen_images_) += batches; } template <typename Dtype> void RegionTargetLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { } INSTANTIATE_LAYER_GPU_FUNCS(RegionTargetLayer); } // namespace caffe
131f6f89fd836c5f9927b4ca648c3f44d2793a01.hip
// !!! This is a file automatically generated by hipify!!! #include "net_spiking.cuh" #include "opencv2/opencv.hpp" #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "common/util.h" #include <time.h> #include "dataAugmentation/cuTransformation.cuh" #include "common/Config.h" #include <helper_functions.h> #include <helper_cuda.h> #include "common/MemoryMonitor.h" #include "common/cuBase.h" #include "layers/LayerBase.h" #include "layers/DataLayerSpiking.h" #include "layers/ConvSpiking.h" #include "layers/PoolingSpiking.h" #include "layers/Spiking.h" #include "layers/SoftMaxSpiking.h" #include <queue> #include <set> int cuSCurCorrect; cuMatrix<int>* cuSCorrect = NULL; cuMatrix<int>* cuSVote = NULL; cuMatrix<bool>* cuSPredictions = NULL; cuMatrix<int>* cuSTrCorrect = NULL; cuMatrix<int>* cuSTrVote = NULL; cuMatrix<bool>* cuSTrPredictions = NULL; cuMatrix<float>* cuSampleWeight = NULL; std::vector<ConfigBase*> spiking_que; void cuSaveSpikingNet() { FILE *pOut = fopen("Result/checkPoint.txt", "w"); for(int i = 0; i < (int)spiking_que.size(); i++){ LayerBase* layer = Layers::instance()->get(spiking_que[i]->m_name); layer->save(pOut); } fclose(pOut); }; void cuFreeSpikingNet() { } void cuReadSpikingNet(const char* path) { FILE *pIn = fopen(path, "r"); for(int i = 0; i < (int)spiking_que.size(); i++){ LayerBase* layer = Layers::instance()->get(spiking_que[i]->m_name); layer->initFromCheckpoint(pIn); } fclose(pIn); }; void buildSpikingNetwork(int trainLen, int testLen) { /*BFS*/ std::queue<ConfigBase*>qqq; std::set<ConfigBase*> inque; for(int i = 0; i < (int)Config::instance()->getFirstLayers().size(); i++){ qqq.push(Config::instance()->getFirstLayers()[i]); inque.insert(Config::instance()->getFirstLayers()[i]); } char logStr[1024]; sprintf(logStr, "\n\n******************layer nexts start********************\n"); LOG(logStr, "Result/log.txt"); std::set<ConfigBase*>finish; while(!qqq.empty()){ ConfigBase* top = qqq.front(); qqq.pop(); finish.insert(top); spiking_que.push_back(top); if(top->m_type == std::string("DATASPIKING")){ new DataLayerSpiking(top->m_name); } else if(top->m_type == std::string("CONVSPIKING")){ new ConvSpiking(top->m_name); } else if(top->m_type == std::string("POOLINGSPIKING")){ new PoolingSpiking(top->m_name); } else if(top->m_type == std::string("SPIKING")){ new Spiking(top->m_name); } else if(top->m_type == std::string("SOFTMAXSPIKING")){ new SoftMaxSpiking(top->m_name); } sprintf(logStr, "layer %15s:", top->m_name.c_str()); LOG(logStr, "Result/log.txt"); for(int n = 0; n < (int)top->m_next.size(); n++){ if(inque.find(top->m_next[n]) == inque.end()){ qqq.push(top->m_next[n]); inque.insert(top->m_next[n]); } sprintf(logStr, "%s ", top->m_next[n]->m_name.c_str()); LOG(logStr, "Result/log.txt"); }sprintf(logStr, "\n"); LOG(logStr, "Result/log.txt"); } sprintf(logStr, "\n\n******************layer nexts end********************\n"); LOG(logStr, "Result/log.txt"); //* correct and cuSVote for tracking the test results if(cuSCorrect == NULL) { cuSCorrect = new cuMatrix<int>(1,1,1); cuSVote = new cuMatrix<int>(testLen, Config::instance()->getClasses(), 1); cuSPredictions = new cuMatrix<bool>(testLen, 1, 1); } //* cuSTrCorrect and cuSTrVote for tracking the training results if(cuSTrCorrect == NULL) { cuSTrCorrect = new cuMatrix<int>(1,1,1); cuSTrVote = new cuMatrix<int>(trainLen, Config::instance()->getClasses(), 1); cuSTrPredictions = new cuMatrix<bool>(trainLen, 1, 1); } // boost weighted training if(cuSampleWeight == NULL) { cuSampleWeight = new cuMatrix<float>(trainLen, 1, 1); for(int i = 0; i < cuSampleWeight->getLen(); i++){ cuSampleWeight->getHost()[i] = 1.0f; } cuSampleWeight->toGpu(); } } void cuFreeSNNMemory( int batch, cuMatrixVector<bool>&trainX, cuMatrixVector<bool>&testX) { } /* * Get the network prediction result * block = dim3(1) * thread = dim3(batch) */ __global__ void g_getPredict(int* batchfireCount, int cols, int start, int* vote) { int batchid = threadIdx.x; if(batchid < start) return; int* p = batchfireCount + batchid * cols; int* votep = vote + batchid * cols; int r = 0; int maxCount = 0; for(int i = 0; i < cols; i++) { int cnt = p[i]; if(maxCount < cnt) { maxCount = cnt; r = i; } } votep[r]++; } /* * Get the predict based on softmax * dim3(1),dim3(batch) */ __global__ void g_getPredict_softmax(float* softMaxP, int cols, int start, int* vote) { int id = threadIdx.x; if(id < start) return; float* p = softMaxP + id * cols; int* votep= vote + id * cols; int r = 0; float maxele = log(p[0]); for(int i = 1; i < cols; i++) { float val = log(p[i]); if(maxele < val) { maxele = val; r = i; } } votep[r]++; } //* get the prediction from the spiking output layer void outputPredict(int* vote, int start) { for(int i = 0; i < (int)spiking_que.size(); i++){ if(spiking_que[i]->m_name == std::string("output")){ hipLaunchKernelGGL(( g_getPredict), dim3(dim3(1)), dim3(Config::instance()->getBatchSize()), 0, 0, Layers::instance()->get(spiking_que[i]->m_name)->getFireCount()->getDev(), Layers::instance()->get(spiking_que[i]->m_name)->getFireCount()->cols, start, vote); hipStreamSynchronize(0); getLastCudaError("g_getPredict"); } if(spiking_que[i]->m_type == std::string("SOFTMAXSPIKING")){ hipLaunchKernelGGL(( g_getPredict_softmax), dim3(dim3(1)), dim3(Config::instance()->getBatchSize()), 0, 0, Layers::instance()->get(spiking_que[i]->m_name)->getOutputs()->getDev(), Layers::instance()->get(spiking_que[i]->m_name)->getOutputs()->cols, start, vote); hipStreamSynchronize(0); getLastCudaError("g_getPredict_softmax"); } } } void getSpikingNetworkCost(int* y, float* weights, int* vote, int start) { /*feedforward*/ for(int i = 0; i < (int)spiking_que.size(); i++){ if(spiking_que[i]->m_name == std::string("output") || spiking_que[i]->m_type == std::string("SOFTMAXSPIKING")){ SpikingLayerBase* output = (SpikingLayerBase*)Layers::instance()->get(spiking_que[i]->m_name); output->setPredict(y); output->setSampleWeight(weights); } } for(int i = 0; i < (int)spiking_que.size(); i++){ LayerBase* layer = Layers::instance()->get(spiking_que[i]->m_name); layer->feedforward(); } /*record the prediction*/ outputPredict(vote, start); /*backpropagation*/ for(int i = (int)spiking_que.size() - 1; i >=0; i--){ ConfigBase* top = spiking_que[i]; if(top->m_name == std::string("reservoir")) continue; SpikingLayerBase* layer = (SpikingLayerBase*)Layers::instance()->get(top->m_name); layer->backpropagation(); layer->getGrad(); layer->updateWeight(); } hipStreamSynchronize(Layers::instance()->get_stream()); getLastCudaError("updateWB"); } void resultPredict(int* y, int* vote, int start) { /*feedforward*/ for(int i = 0; i < (int)spiking_que.size(); i++){ if(spiking_que[i]->m_name == std::string("output") || spiking_que[i]->m_type == std::string("SOFTMAXSPIKING")){ SpikingLayerBase* output = (SpikingLayerBase*)Layers::instance()->get(spiking_que[i]->m_name); output->setPredict(y); } } for(int i = 0; i < (int)spiking_que.size(); i++){ LayerBase* layer = Layers::instance()->get(spiking_que[i]->m_name); layer->feedforward(); } /*obtain the prediction predict*/ outputPredict(vote, start); } void gradientChecking(bool**x, int*y, int batch, int nclasses, hipblasHandle_t handle) { } /* * block = (testX.size() + batch - 1) / batch * thread = batch */ void __global__ g_getSpikeVotingResult(int* voting, int* y, int* correct, bool* predictions, int len, int nclasses) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int idx = i + blockDim.x * blockIdx.x + threadIdx.x; if(idx < len) { int* pvoting = voting + idx * nclasses; int _max = pvoting[0]; int rid = 0; for(int j = 1; j < nclasses; j++) { if(pvoting[j] > _max) { _max = pvoting[j]; rid = j; } } if(rid == y[idx]) { atomicAdd(correct, 1); predictions[idx] = true; } } } } /* * block = 1 * thread = nclasses */ void __global__ g_boostWeightUpdate(float* weights, bool* predictions, int* y, int len, int nclasses) { extern __shared__ float sums[]; float * sum_weights = (float*)sums; float * error_weighted = (float*)&sums[nclasses]; int tid = threadIdx.x; sum_weights[tid] = 0; error_weighted[tid] = 0; __syncthreads(); // 1. compute the sum of the boosting weight for each class for(int i = 0; i < len; i += blockDim.x) { int idx = i + tid; if(idx < len) { int cls = y[idx]; float w = weights[idx]; atomicAdd(&sum_weights[cls], w); } } __syncthreads(); // 2. compute the weighted error for each class for(int i = 0; i < len; i += blockDim.x) { int idx = i + tid; if(idx < len) { int cls = y[idx]; bool prediction = predictions[idx]; float w = weights[idx]; atomicAdd(&error_weighted[cls], w*(!prediction)/sum_weights[cls]); } } __syncthreads(); // 3. update the boost weight for each training sample for(int i = 0; i < len; i += blockDim.x) { int idx = i + tid; if(idx < len) { bool prediction = predictions[idx]; int cls = y[idx]; float w = weights[idx]; float stage = error_weighted[cls]/20.0f; float new_w = w * __expf(stage * (!prediction)); weights[idx] = new_w; /* if(prediction) printf("Sample: %d predicts correctly old sample weight: %f new sample weight %f\n", cls, w, new_w); else printf("Sample: %d predicts incorrectly old sample weight: %f new sample weight %f\n", cls, w, new_w); */ } } } //* verify that the GPU sim result aligns with CPU sim void verifyResult(std::string phrase) { for(int i = 0; i < (int)spiking_que.size(); i++){ SpikingLayerBase* layer = (SpikingLayerBase*) Layers::instance()->get(spiking_que[i]->m_name); layer->verify(phrase); } } void predictTestRate(cuMatrixVector<bool>&x, cuMatrix<int>*y , cuMatrixVector<bool>&testX, cuMatrix<int>* testY, int batch, int nclasses, hipblasHandle_t handle) { Config::instance()->setTraining(false); DataLayerSpiking *dl = static_cast<DataLayerSpiking*>(Layers::instance()->get("data")); dl->getBatchSpikes(testX, 0); cuSVote->gpuClear(); for (int k = 0; k < ((int)testX.size() + batch - 1) / batch; k ++) { dl->synchronize(); int start = k * batch; printf("test %2d%%", 100 * start / (((int)testX.size() + batch - 1))); if(start + batch <= (int)testX.size() - batch) dl->getBatchSpikes(testX, start + batch); else{ int tstart = testX.size() - batch; dl->getBatchSpikes(testX, tstart); } if(start + batch > (int)testX.size()){ start = (int)testX.size() - batch; } dl->testData(); resultPredict(testY->getDev() + start, cuSVote->getDev() + start * nclasses, k * batch - start); printf("\b\b\b\b\b\b\b\b\b"); } cuSCorrect->gpuClear(); cuSPredictions->gpuClear(); hipLaunchKernelGGL(( g_getSpikeVotingResult), dim3(dim3((testX.size() + batch - 1) / batch)), dim3(dim3(batch)), 0, 0, cuSVote->getDev(), testY->getDev(), cuSCorrect->getDev(), cuSPredictions->getDev(), testX.size(), nclasses); hipStreamSynchronize(0); getLastCudaError("g_getSpikeVotingResult"); cuSCorrect->toCpu(); if (cuSCorrect->get(0, 0, 0) > cuSCurCorrect) { cuSCurCorrect = cuSCorrect->get(0, 0, 0); cuSaveSpikingNet(); } } float getSpikingCost(){ float cost = 0.0; for(int i = 0; i < (int)spiking_que.size(); i++){ if(spiking_que[i]->m_name == "output" || spiking_que[i]->m_type == std::string("SOFTMAXSPIKING")){ LayerBase* layer = (LayerBase*)Layers::instance()->get(spiking_que[i]->m_name); layer->calCost(); cost += layer->getCost(); } } return cost; } void cuTrainSpikingNetwork(cuMatrixVector<bool>&x, cuMatrix<int>*y, cuMatrixVector<bool>&testX, cuMatrix<int>* testY, int batch, int nclasses, std::vector<float>&nlrate, std::vector<float>&nMomentum, std::vector<int>&epoCount, hipblasHandle_t handle) { char logStr[1024]; if(nlrate.size() != nMomentum.size() || nMomentum.size() != epoCount.size() || nlrate.size() != epoCount.size()) { printf("nlrate, nMomentum, epoCount size not equal\n"); exit(0); } if(Config::instance()->getIsGradientChecking()) gradientChecking(x.m_devPoint, y->getDev(), batch, nclasses, handle); float my_start = (float)clock(); predictTestRate(x, y, testX, testY, batch, nclasses, handle); float my_end = (float)clock(); sprintf(logStr, "===================output fire counts================\n"); LOG(logStr, "Result/log.txt"); testY->toCpu(); sprintf(logStr, "The last sample label: %d\n", testY->get(testY->getLen() - batch, 0, 0)); LOG(logStr, "Result/log.txt"); for(int i = 0; i < (int)spiking_que.size(); i++){ SpikingLayerBase* layer = (SpikingLayerBase*) Layers::instance()->get(spiking_que[i]->m_name); layer->printFireCount(); } sprintf(logStr, "time spent on test : time=%.03lfs\n", (float) (my_end - my_start) / CLOCKS_PER_SEC); LOG(logStr, "Result/log.txt"); if(Config::instance()->getIsGradientChecking()) verifyResult(std::string("train")); sprintf(logStr, "correct is %d\n", cuSCorrect->get(0,0,0)); LOG(logStr, "Result/log.txt"); int epochs = Config::instance()->getTestEpoch(); float lrate = 0.05f; float Momentum = 0.9f; int id = 0; //hipProfilerStart(); for (int epo = 0; epo < epochs; epo++) { if (id >= (int)nlrate.size()) break; lrate = nlrate[id]; Momentum = nMomentum[id]; Config::instance()->setLrate(lrate); Config::instance()->setMomentum(Momentum); float start, end; start = (float)clock(); if(Config::instance()->applyPreproc()){ int ImgSize = 28; cuApplyRandom(batch, clock() + epo, ImgSize); } Config::instance()->setTraining(true); x.shuffle(5000, y, cuSampleWeight); DataLayerSpiking *dl = static_cast<DataLayerSpiking*>(Layers::instance()->get("data")); dl->loadBatchSpikes(x, 0); cuSTrVote->gpuClear(); float cost = 0.0f; for (int k = 0; k < ((int)x.size() + batch - 1) / batch; k ++) { dl->synchronize(); int start = k * batch; printf("train %2d%%", 100 * start / (((int)x.size() + batch - 1))); if(start + batch <= (int)x.size() - batch){ dl->loadBatchSpikes(x, start + batch); }else{ int tstart = x.size() - batch; dl->loadBatchSpikes(x, tstart); } if(start + batch > (int)x.size()){ start = (int)x.size() - batch; } getSpikingNetworkCost( y->getDev() + start, cuSampleWeight->getDev() + start, cuSTrVote->getDev() + start * nclasses, k * batch - start); cost += getSpikingCost(); printf("\b\b\b\b\b\b\b\b\b"); } cost /= (float)x.size(); end = (float)clock(); sprintf(logStr, "epoch=%d time=%.03lfs cost=%f Momentum=%.06lf lrate=%.08lf\n", epo, (float) (end - start) / CLOCKS_PER_SEC, cost, Config::instance()->getMomentum(), Config::instance()->getLrate()); LOG(logStr, "Result/log.txt"); cuSTrCorrect->gpuClear(); cuSTrPredictions->gpuClear(); hipLaunchKernelGGL(( g_getSpikeVotingResult), dim3(dim3((x.size() + batch - 1) / batch)), dim3(dim3(batch)), 0, 0, cuSTrVote->getDev(), y->getDev(), cuSTrCorrect->getDev(), cuSTrPredictions->getDev(), x.size(), nclasses); hipStreamSynchronize(0); getLastCudaError("g_getSpikeVotingResult"); cuSTrCorrect->toCpu(); sprintf(logStr, "train performance: %.2lf%%\n", 100.0 * cuSTrCorrect->get(0, 0, 0) / x.size()); LOG(logStr, "Result/log.txt"); if (Config::instance()->hasBoostWeightTrain()) { hipLaunchKernelGGL(( g_boostWeightUpdate), dim3(dim3(1)), dim3(dim3(nclasses)), sizeof(float) * 2 * nclasses, 0, cuSampleWeight->getDev(), cuSTrPredictions->getDev(), y->getDev(), x.size(), nclasses); hipStreamSynchronize(0); getLastCudaError("g_getSpikeVotingResult"); cuSampleWeight->toCpu(); } if (epo && epo % epoCount[id] == 0) { id++; } sprintf(logStr, "===================weight value================\n"); LOG(logStr, "Result/log.txt"); for(int i = 0; i < (int)spiking_que.size(); i++){ LayerBase* layer = Layers::instance()->get(spiking_que[i]->m_name); layer->printParameter(); } sprintf(logStr, "===================test Result================\n"); LOG(logStr, "Result/log.txt"); predictTestRate(x, y, testX, testY, batch, nclasses, handle); if(Config::instance()->getIsGradientChecking()) verifyResult(std::string("test")); sprintf(logStr, "test %.2lf%%/%.2lf%%\n", 100.0 * cuSCorrect->get(0, 0, 0) / testX.size(), 100.0 * cuSCurCorrect / testX.size()); LOG(logStr, "Result/log.txt"); sprintf(logStr, "===================output fire counts================\n"); LOG(logStr, "Result/log.txt"); testY->toCpu(); sprintf(logStr, "The last sample label: %d\n", testY->get(testY->getLen() - batch, 0, 0)); LOG(logStr, "Result/log.txt"); for(int i = 0; i < (int)spiking_que.size(); i++){ SpikingLayerBase* layer = (SpikingLayerBase*) Layers::instance()->get(spiking_que[i]->m_name); layer->printFireCount(); } if(epo == 0){ MemoryMonitor::instance()->printCpuMemory(); MemoryMonitor::instance()->printGpuMemory(); } } //hipProfilerStop(); }
131f6f89fd836c5f9927b4ca648c3f44d2793a01.cu
#include "net_spiking.cuh" #include "opencv2/opencv.hpp" #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include "common/util.h" #include <time.h> #include "dataAugmentation/cuTransformation.cuh" #include "common/Config.h" #include <helper_functions.h> #include <helper_cuda.h> #include "common/MemoryMonitor.h" #include "common/cuBase.h" #include "layers/LayerBase.h" #include "layers/DataLayerSpiking.h" #include "layers/ConvSpiking.h" #include "layers/PoolingSpiking.h" #include "layers/Spiking.h" #include "layers/SoftMaxSpiking.h" #include <queue> #include <set> int cuSCurCorrect; cuMatrix<int>* cuSCorrect = NULL; cuMatrix<int>* cuSVote = NULL; cuMatrix<bool>* cuSPredictions = NULL; cuMatrix<int>* cuSTrCorrect = NULL; cuMatrix<int>* cuSTrVote = NULL; cuMatrix<bool>* cuSTrPredictions = NULL; cuMatrix<float>* cuSampleWeight = NULL; std::vector<ConfigBase*> spiking_que; void cuSaveSpikingNet() { FILE *pOut = fopen("Result/checkPoint.txt", "w"); for(int i = 0; i < (int)spiking_que.size(); i++){ LayerBase* layer = Layers::instance()->get(spiking_que[i]->m_name); layer->save(pOut); } fclose(pOut); }; void cuFreeSpikingNet() { } void cuReadSpikingNet(const char* path) { FILE *pIn = fopen(path, "r"); for(int i = 0; i < (int)spiking_que.size(); i++){ LayerBase* layer = Layers::instance()->get(spiking_que[i]->m_name); layer->initFromCheckpoint(pIn); } fclose(pIn); }; void buildSpikingNetwork(int trainLen, int testLen) { /*BFS*/ std::queue<ConfigBase*>qqq; std::set<ConfigBase*> inque; for(int i = 0; i < (int)Config::instance()->getFirstLayers().size(); i++){ qqq.push(Config::instance()->getFirstLayers()[i]); inque.insert(Config::instance()->getFirstLayers()[i]); } char logStr[1024]; sprintf(logStr, "\n\n******************layer nexts start********************\n"); LOG(logStr, "Result/log.txt"); std::set<ConfigBase*>finish; while(!qqq.empty()){ ConfigBase* top = qqq.front(); qqq.pop(); finish.insert(top); spiking_que.push_back(top); if(top->m_type == std::string("DATASPIKING")){ new DataLayerSpiking(top->m_name); } else if(top->m_type == std::string("CONVSPIKING")){ new ConvSpiking(top->m_name); } else if(top->m_type == std::string("POOLINGSPIKING")){ new PoolingSpiking(top->m_name); } else if(top->m_type == std::string("SPIKING")){ new Spiking(top->m_name); } else if(top->m_type == std::string("SOFTMAXSPIKING")){ new SoftMaxSpiking(top->m_name); } sprintf(logStr, "layer %15s:", top->m_name.c_str()); LOG(logStr, "Result/log.txt"); for(int n = 0; n < (int)top->m_next.size(); n++){ if(inque.find(top->m_next[n]) == inque.end()){ qqq.push(top->m_next[n]); inque.insert(top->m_next[n]); } sprintf(logStr, "%s ", top->m_next[n]->m_name.c_str()); LOG(logStr, "Result/log.txt"); }sprintf(logStr, "\n"); LOG(logStr, "Result/log.txt"); } sprintf(logStr, "\n\n******************layer nexts end********************\n"); LOG(logStr, "Result/log.txt"); //* correct and cuSVote for tracking the test results if(cuSCorrect == NULL) { cuSCorrect = new cuMatrix<int>(1,1,1); cuSVote = new cuMatrix<int>(testLen, Config::instance()->getClasses(), 1); cuSPredictions = new cuMatrix<bool>(testLen, 1, 1); } //* cuSTrCorrect and cuSTrVote for tracking the training results if(cuSTrCorrect == NULL) { cuSTrCorrect = new cuMatrix<int>(1,1,1); cuSTrVote = new cuMatrix<int>(trainLen, Config::instance()->getClasses(), 1); cuSTrPredictions = new cuMatrix<bool>(trainLen, 1, 1); } // boost weighted training if(cuSampleWeight == NULL) { cuSampleWeight = new cuMatrix<float>(trainLen, 1, 1); for(int i = 0; i < cuSampleWeight->getLen(); i++){ cuSampleWeight->getHost()[i] = 1.0f; } cuSampleWeight->toGpu(); } } void cuFreeSNNMemory( int batch, cuMatrixVector<bool>&trainX, cuMatrixVector<bool>&testX) { } /* * Get the network prediction result * block = dim3(1) * thread = dim3(batch) */ __global__ void g_getPredict(int* batchfireCount, int cols, int start, int* vote) { int batchid = threadIdx.x; if(batchid < start) return; int* p = batchfireCount + batchid * cols; int* votep = vote + batchid * cols; int r = 0; int maxCount = 0; for(int i = 0; i < cols; i++) { int cnt = p[i]; if(maxCount < cnt) { maxCount = cnt; r = i; } } votep[r]++; } /* * Get the predict based on softmax * dim3(1),dim3(batch) */ __global__ void g_getPredict_softmax(float* softMaxP, int cols, int start, int* vote) { int id = threadIdx.x; if(id < start) return; float* p = softMaxP + id * cols; int* votep= vote + id * cols; int r = 0; float maxele = log(p[0]); for(int i = 1; i < cols; i++) { float val = log(p[i]); if(maxele < val) { maxele = val; r = i; } } votep[r]++; } //* get the prediction from the spiking output layer void outputPredict(int* vote, int start) { for(int i = 0; i < (int)spiking_que.size(); i++){ if(spiking_que[i]->m_name == std::string("output")){ g_getPredict<<<dim3(1), Config::instance()->getBatchSize()>>>( Layers::instance()->get(spiking_que[i]->m_name)->getFireCount()->getDev(), Layers::instance()->get(spiking_que[i]->m_name)->getFireCount()->cols, start, vote); cudaStreamSynchronize(0); getLastCudaError("g_getPredict"); } if(spiking_que[i]->m_type == std::string("SOFTMAXSPIKING")){ g_getPredict_softmax<<<dim3(1), Config::instance()->getBatchSize()>>>( Layers::instance()->get(spiking_que[i]->m_name)->getOutputs()->getDev(), Layers::instance()->get(spiking_que[i]->m_name)->getOutputs()->cols, start, vote); cudaStreamSynchronize(0); getLastCudaError("g_getPredict_softmax"); } } } void getSpikingNetworkCost(int* y, float* weights, int* vote, int start) { /*feedforward*/ for(int i = 0; i < (int)spiking_que.size(); i++){ if(spiking_que[i]->m_name == std::string("output") || spiking_que[i]->m_type == std::string("SOFTMAXSPIKING")){ SpikingLayerBase* output = (SpikingLayerBase*)Layers::instance()->get(spiking_que[i]->m_name); output->setPredict(y); output->setSampleWeight(weights); } } for(int i = 0; i < (int)spiking_que.size(); i++){ LayerBase* layer = Layers::instance()->get(spiking_que[i]->m_name); layer->feedforward(); } /*record the prediction*/ outputPredict(vote, start); /*backpropagation*/ for(int i = (int)spiking_que.size() - 1; i >=0; i--){ ConfigBase* top = spiking_que[i]; if(top->m_name == std::string("reservoir")) continue; SpikingLayerBase* layer = (SpikingLayerBase*)Layers::instance()->get(top->m_name); layer->backpropagation(); layer->getGrad(); layer->updateWeight(); } cudaStreamSynchronize(Layers::instance()->get_stream()); getLastCudaError("updateWB"); } void resultPredict(int* y, int* vote, int start) { /*feedforward*/ for(int i = 0; i < (int)spiking_que.size(); i++){ if(spiking_que[i]->m_name == std::string("output") || spiking_que[i]->m_type == std::string("SOFTMAXSPIKING")){ SpikingLayerBase* output = (SpikingLayerBase*)Layers::instance()->get(spiking_que[i]->m_name); output->setPredict(y); } } for(int i = 0; i < (int)spiking_que.size(); i++){ LayerBase* layer = Layers::instance()->get(spiking_que[i]->m_name); layer->feedforward(); } /*obtain the prediction predict*/ outputPredict(vote, start); } void gradientChecking(bool**x, int*y, int batch, int nclasses, cublasHandle_t handle) { } /* * block = (testX.size() + batch - 1) / batch * thread = batch */ void __global__ g_getSpikeVotingResult(int* voting, int* y, int* correct, bool* predictions, int len, int nclasses) { for(int i = 0; i < len; i += blockDim.x * gridDim.x) { int idx = i + blockDim.x * blockIdx.x + threadIdx.x; if(idx < len) { int* pvoting = voting + idx * nclasses; int _max = pvoting[0]; int rid = 0; for(int j = 1; j < nclasses; j++) { if(pvoting[j] > _max) { _max = pvoting[j]; rid = j; } } if(rid == y[idx]) { atomicAdd(correct, 1); predictions[idx] = true; } } } } /* * block = 1 * thread = nclasses */ void __global__ g_boostWeightUpdate(float* weights, bool* predictions, int* y, int len, int nclasses) { extern __shared__ float sums[]; float * sum_weights = (float*)sums; float * error_weighted = (float*)&sums[nclasses]; int tid = threadIdx.x; sum_weights[tid] = 0; error_weighted[tid] = 0; __syncthreads(); // 1. compute the sum of the boosting weight for each class for(int i = 0; i < len; i += blockDim.x) { int idx = i + tid; if(idx < len) { int cls = y[idx]; float w = weights[idx]; atomicAdd(&sum_weights[cls], w); } } __syncthreads(); // 2. compute the weighted error for each class for(int i = 0; i < len; i += blockDim.x) { int idx = i + tid; if(idx < len) { int cls = y[idx]; bool prediction = predictions[idx]; float w = weights[idx]; atomicAdd(&error_weighted[cls], w*(!prediction)/sum_weights[cls]); } } __syncthreads(); // 3. update the boost weight for each training sample for(int i = 0; i < len; i += blockDim.x) { int idx = i + tid; if(idx < len) { bool prediction = predictions[idx]; int cls = y[idx]; float w = weights[idx]; float stage = error_weighted[cls]/20.0f; float new_w = w * __expf(stage * (!prediction)); weights[idx] = new_w; /* if(prediction) printf("Sample: %d predicts correctly old sample weight: %f new sample weight %f\n", cls, w, new_w); else printf("Sample: %d predicts incorrectly old sample weight: %f new sample weight %f\n", cls, w, new_w); */ } } } //* verify that the GPU sim result aligns with CPU sim void verifyResult(std::string phrase) { for(int i = 0; i < (int)spiking_que.size(); i++){ SpikingLayerBase* layer = (SpikingLayerBase*) Layers::instance()->get(spiking_que[i]->m_name); layer->verify(phrase); } } void predictTestRate(cuMatrixVector<bool>&x, cuMatrix<int>*y , cuMatrixVector<bool>&testX, cuMatrix<int>* testY, int batch, int nclasses, cublasHandle_t handle) { Config::instance()->setTraining(false); DataLayerSpiking *dl = static_cast<DataLayerSpiking*>(Layers::instance()->get("data")); dl->getBatchSpikes(testX, 0); cuSVote->gpuClear(); for (int k = 0; k < ((int)testX.size() + batch - 1) / batch; k ++) { dl->synchronize(); int start = k * batch; printf("test %2d%%", 100 * start / (((int)testX.size() + batch - 1))); if(start + batch <= (int)testX.size() - batch) dl->getBatchSpikes(testX, start + batch); else{ int tstart = testX.size() - batch; dl->getBatchSpikes(testX, tstart); } if(start + batch > (int)testX.size()){ start = (int)testX.size() - batch; } dl->testData(); resultPredict(testY->getDev() + start, cuSVote->getDev() + start * nclasses, k * batch - start); printf("\b\b\b\b\b\b\b\b\b"); } cuSCorrect->gpuClear(); cuSPredictions->gpuClear(); g_getSpikeVotingResult<<<dim3((testX.size() + batch - 1) / batch), dim3(batch)>>>( cuSVote->getDev(), testY->getDev(), cuSCorrect->getDev(), cuSPredictions->getDev(), testX.size(), nclasses); cudaStreamSynchronize(0); getLastCudaError("g_getSpikeVotingResult"); cuSCorrect->toCpu(); if (cuSCorrect->get(0, 0, 0) > cuSCurCorrect) { cuSCurCorrect = cuSCorrect->get(0, 0, 0); cuSaveSpikingNet(); } } float getSpikingCost(){ float cost = 0.0; for(int i = 0; i < (int)spiking_que.size(); i++){ if(spiking_que[i]->m_name == "output" || spiking_que[i]->m_type == std::string("SOFTMAXSPIKING")){ LayerBase* layer = (LayerBase*)Layers::instance()->get(spiking_que[i]->m_name); layer->calCost(); cost += layer->getCost(); } } return cost; } void cuTrainSpikingNetwork(cuMatrixVector<bool>&x, cuMatrix<int>*y, cuMatrixVector<bool>&testX, cuMatrix<int>* testY, int batch, int nclasses, std::vector<float>&nlrate, std::vector<float>&nMomentum, std::vector<int>&epoCount, cublasHandle_t handle) { char logStr[1024]; if(nlrate.size() != nMomentum.size() || nMomentum.size() != epoCount.size() || nlrate.size() != epoCount.size()) { printf("nlrate, nMomentum, epoCount size not equal\n"); exit(0); } if(Config::instance()->getIsGradientChecking()) gradientChecking(x.m_devPoint, y->getDev(), batch, nclasses, handle); float my_start = (float)clock(); predictTestRate(x, y, testX, testY, batch, nclasses, handle); float my_end = (float)clock(); sprintf(logStr, "===================output fire counts================\n"); LOG(logStr, "Result/log.txt"); testY->toCpu(); sprintf(logStr, "The last sample label: %d\n", testY->get(testY->getLen() - batch, 0, 0)); LOG(logStr, "Result/log.txt"); for(int i = 0; i < (int)spiking_que.size(); i++){ SpikingLayerBase* layer = (SpikingLayerBase*) Layers::instance()->get(spiking_que[i]->m_name); layer->printFireCount(); } sprintf(logStr, "time spent on test : time=%.03lfs\n", (float) (my_end - my_start) / CLOCKS_PER_SEC); LOG(logStr, "Result/log.txt"); if(Config::instance()->getIsGradientChecking()) verifyResult(std::string("train")); sprintf(logStr, "correct is %d\n", cuSCorrect->get(0,0,0)); LOG(logStr, "Result/log.txt"); int epochs = Config::instance()->getTestEpoch(); float lrate = 0.05f; float Momentum = 0.9f; int id = 0; //cudaProfilerStart(); for (int epo = 0; epo < epochs; epo++) { if (id >= (int)nlrate.size()) break; lrate = nlrate[id]; Momentum = nMomentum[id]; Config::instance()->setLrate(lrate); Config::instance()->setMomentum(Momentum); float start, end; start = (float)clock(); if(Config::instance()->applyPreproc()){ int ImgSize = 28; cuApplyRandom(batch, clock() + epo, ImgSize); } Config::instance()->setTraining(true); x.shuffle(5000, y, cuSampleWeight); DataLayerSpiking *dl = static_cast<DataLayerSpiking*>(Layers::instance()->get("data")); dl->loadBatchSpikes(x, 0); cuSTrVote->gpuClear(); float cost = 0.0f; for (int k = 0; k < ((int)x.size() + batch - 1) / batch; k ++) { dl->synchronize(); int start = k * batch; printf("train %2d%%", 100 * start / (((int)x.size() + batch - 1))); if(start + batch <= (int)x.size() - batch){ dl->loadBatchSpikes(x, start + batch); }else{ int tstart = x.size() - batch; dl->loadBatchSpikes(x, tstart); } if(start + batch > (int)x.size()){ start = (int)x.size() - batch; } getSpikingNetworkCost( y->getDev() + start, cuSampleWeight->getDev() + start, cuSTrVote->getDev() + start * nclasses, k * batch - start); cost += getSpikingCost(); printf("\b\b\b\b\b\b\b\b\b"); } cost /= (float)x.size(); end = (float)clock(); sprintf(logStr, "epoch=%d time=%.03lfs cost=%f Momentum=%.06lf lrate=%.08lf\n", epo, (float) (end - start) / CLOCKS_PER_SEC, cost, Config::instance()->getMomentum(), Config::instance()->getLrate()); LOG(logStr, "Result/log.txt"); cuSTrCorrect->gpuClear(); cuSTrPredictions->gpuClear(); g_getSpikeVotingResult<<<dim3((x.size() + batch - 1) / batch), dim3(batch)>>>( cuSTrVote->getDev(), y->getDev(), cuSTrCorrect->getDev(), cuSTrPredictions->getDev(), x.size(), nclasses); cudaStreamSynchronize(0); getLastCudaError("g_getSpikeVotingResult"); cuSTrCorrect->toCpu(); sprintf(logStr, "train performance: %.2lf%%\n", 100.0 * cuSTrCorrect->get(0, 0, 0) / x.size()); LOG(logStr, "Result/log.txt"); if (Config::instance()->hasBoostWeightTrain()) { g_boostWeightUpdate<<<dim3(1), dim3(nclasses), sizeof(float) * 2 * nclasses>>>( cuSampleWeight->getDev(), cuSTrPredictions->getDev(), y->getDev(), x.size(), nclasses); cudaStreamSynchronize(0); getLastCudaError("g_getSpikeVotingResult"); cuSampleWeight->toCpu(); } if (epo && epo % epoCount[id] == 0) { id++; } sprintf(logStr, "===================weight value================\n"); LOG(logStr, "Result/log.txt"); for(int i = 0; i < (int)spiking_que.size(); i++){ LayerBase* layer = Layers::instance()->get(spiking_que[i]->m_name); layer->printParameter(); } sprintf(logStr, "===================test Result================\n"); LOG(logStr, "Result/log.txt"); predictTestRate(x, y, testX, testY, batch, nclasses, handle); if(Config::instance()->getIsGradientChecking()) verifyResult(std::string("test")); sprintf(logStr, "test %.2lf%%/%.2lf%%\n", 100.0 * cuSCorrect->get(0, 0, 0) / testX.size(), 100.0 * cuSCurCorrect / testX.size()); LOG(logStr, "Result/log.txt"); sprintf(logStr, "===================output fire counts================\n"); LOG(logStr, "Result/log.txt"); testY->toCpu(); sprintf(logStr, "The last sample label: %d\n", testY->get(testY->getLen() - batch, 0, 0)); LOG(logStr, "Result/log.txt"); for(int i = 0; i < (int)spiking_que.size(); i++){ SpikingLayerBase* layer = (SpikingLayerBase*) Layers::instance()->get(spiking_que[i]->m_name); layer->printFireCount(); } if(epo == 0){ MemoryMonitor::instance()->printCpuMemory(); MemoryMonitor::instance()->printGpuMemory(); } } //cudaProfilerStop(); }
a1d2d2faf7404444a4e469cfb4b0d10723dfc799.hip
// !!! This is a file automatically generated by hipify!!! #define GRB_USE_APSPIE #define private public #include <iostream> #include <algorithm> #include <string> #include <cstdio> #include <cstdlib> #include <boost/program_options.hpp> #include "graphblas/graphblas.hpp" #include "test/test.hpp" int main( int argc, char** argv ) { std::vector<graphblas::Index> row_indices; std::vector<graphblas::Index> col_indices; std::vector<float> values; graphblas::Index nrows, ncols, nvals; // Parse arguments bool DEBUG = true; // Read in sparse matrix if (argc < 2) { fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]); exit(1); } else { readMtx( argv[argc-1], row_indices, col_indices, values, nrows, ncols, nvals, 0, DEBUG ); } // Matrix A graphblas::Matrix<float> a(nrows, ncols); a.build( &row_indices, &col_indices, &values, nvals, GrB_NULL ); a.nrows( &nrows ); a.ncols( &ncols ); a.nvals( &nvals ); if( DEBUG ) a.print(); // Vector x graphblas::Vector<float> x(nrows); CHECK( x.fill( 0.f ) ); CHECK( x.setElement(1.f, 0) ); CHECK( x.size( &nrows ) ); if( DEBUG ) x.print(); // Vector y graphblas::Vector<float> y(nrows); if( DEBUG ) y.print(); // Mask graphblas::Vector<float> m(nrows); CHECK( m.fill(-1.f) ); CHECK( m.setElement(0.f, 0) ); CHECK( m.size(&nrows) ); if( DEBUG ) CHECK( m.print() ); // Descriptor graphblas::Descriptor desc; CHECK( desc.set(graphblas::GrB_MASK, graphblas::GrB_SCMP) ); // Warmup CpuTimer warmup; warmup.Start(); graphblas::vxm<float, float, float>( &y, &m, GrB_NULL, graphblas::PlusMultipliesSemiring<float>(), &x, &a, &desc ); //graphblas::vxm<float, float, float>( &y, GrB_NULL, GrB_NULL, &GrB_FP32AddMul, // &x, &a, &desc ); warmup.Stop(); CpuTimer cpu_vxm; //hipProfilerStart(); cpu_vxm.Start(); int NUM_ITER = 10; for( int i=0; i<NUM_ITER; i++ ) { graphblas::vxm<float, float, float>( &y, &m, GrB_NULL, graphblas::PlusMultipliesSemiring<float>(), &x, &a, &desc ); //graphblas::vxm<float, float, float>( &y, GrB_NULL, GrB_NULL, // &GrB_FP32AddMul, &x, &a, &desc ); } //hipProfilerStop(); cpu_vxm.Stop(); float flop = 0; if( DEBUG ) std::cout << "warmup, " << warmup.ElapsedMillis() << ", " << flop/warmup.ElapsedMillis()/1000000.0 << "\n"; float elapsed_vxm = cpu_vxm.ElapsedMillis(); std::cout << "vxm, " << elapsed_vxm/NUM_ITER << "\n"; if( DEBUG ) y.print(); /*c.extractTuples( out_denseVal ); for( int i=0; i<nvals; i++ ) { graphblas::Index row = row_indices[i]; graphblas::Index col = col_indices[i]; float val = values[i]; if( col<max_ncols ) { // Row major order if( ROW_MAJOR ) //std::cout << row << " " << col << " " << val << " " << out_denseVal[row*max_ncols+col] << std::endl; BOOST_ASSERT( val==out_denseVal[row*max_ncols+col] ); else // Column major order //std::cout << row << " " << col << " " << val << " " << out_denseVal[col*nrows+row] << std::endl; BOOST_ASSERT( val==out_denseVal[col*nrows+row] ); } }*/ return 0; }
a1d2d2faf7404444a4e469cfb4b0d10723dfc799.cu
#define GRB_USE_APSPIE #define private public #include <iostream> #include <algorithm> #include <string> #include <cstdio> #include <cstdlib> #include <boost/program_options.hpp> #include "graphblas/graphblas.hpp" #include "test/test.hpp" int main( int argc, char** argv ) { std::vector<graphblas::Index> row_indices; std::vector<graphblas::Index> col_indices; std::vector<float> values; graphblas::Index nrows, ncols, nvals; // Parse arguments bool DEBUG = true; // Read in sparse matrix if (argc < 2) { fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]); exit(1); } else { readMtx( argv[argc-1], row_indices, col_indices, values, nrows, ncols, nvals, 0, DEBUG ); } // Matrix A graphblas::Matrix<float> a(nrows, ncols); a.build( &row_indices, &col_indices, &values, nvals, GrB_NULL ); a.nrows( &nrows ); a.ncols( &ncols ); a.nvals( &nvals ); if( DEBUG ) a.print(); // Vector x graphblas::Vector<float> x(nrows); CHECK( x.fill( 0.f ) ); CHECK( x.setElement(1.f, 0) ); CHECK( x.size( &nrows ) ); if( DEBUG ) x.print(); // Vector y graphblas::Vector<float> y(nrows); if( DEBUG ) y.print(); // Mask graphblas::Vector<float> m(nrows); CHECK( m.fill(-1.f) ); CHECK( m.setElement(0.f, 0) ); CHECK( m.size(&nrows) ); if( DEBUG ) CHECK( m.print() ); // Descriptor graphblas::Descriptor desc; CHECK( desc.set(graphblas::GrB_MASK, graphblas::GrB_SCMP) ); // Warmup CpuTimer warmup; warmup.Start(); graphblas::vxm<float, float, float>( &y, &m, GrB_NULL, graphblas::PlusMultipliesSemiring<float>(), &x, &a, &desc ); //graphblas::vxm<float, float, float>( &y, GrB_NULL, GrB_NULL, &GrB_FP32AddMul, // &x, &a, &desc ); warmup.Stop(); CpuTimer cpu_vxm; //cudaProfilerStart(); cpu_vxm.Start(); int NUM_ITER = 10; for( int i=0; i<NUM_ITER; i++ ) { graphblas::vxm<float, float, float>( &y, &m, GrB_NULL, graphblas::PlusMultipliesSemiring<float>(), &x, &a, &desc ); //graphblas::vxm<float, float, float>( &y, GrB_NULL, GrB_NULL, // &GrB_FP32AddMul, &x, &a, &desc ); } //cudaProfilerStop(); cpu_vxm.Stop(); float flop = 0; if( DEBUG ) std::cout << "warmup, " << warmup.ElapsedMillis() << ", " << flop/warmup.ElapsedMillis()/1000000.0 << "\n"; float elapsed_vxm = cpu_vxm.ElapsedMillis(); std::cout << "vxm, " << elapsed_vxm/NUM_ITER << "\n"; if( DEBUG ) y.print(); /*c.extractTuples( out_denseVal ); for( int i=0; i<nvals; i++ ) { graphblas::Index row = row_indices[i]; graphblas::Index col = col_indices[i]; float val = values[i]; if( col<max_ncols ) { // Row major order if( ROW_MAJOR ) //std::cout << row << " " << col << " " << val << " " << out_denseVal[row*max_ncols+col] << std::endl; BOOST_ASSERT( val==out_denseVal[row*max_ncols+col] ); else // Column major order //std::cout << row << " " << col << " " << val << " " << out_denseVal[col*nrows+row] << std::endl; BOOST_ASSERT( val==out_denseVal[col*nrows+row] ); } }*/ return 0; }
24b6a6f08ab06289ed612f0b34e547fcb242165e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> c d s @author Peng Du @author Tingxing Dong @author Mark Gates This file implements upper case, and is called by ztrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "common_magma.h" #include "ztrtri.h" /* This inverts the diagonal IB by IB inner blocks of A, and stores the results in d_dinvA. Each thread block with IB threads does one inner block. Each thread deals with one row of the inner block. */ __global__ void ztrtri_diag_kernel_upper( magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA) { int tx = threadIdx.x; int bx = blockIdx.x; int blk_ind = bx*IB; //int ind = blk_ind + tx; A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind) // TODO sB should be [IB][IB+1] to avoid bank conflicts, right? __shared__ magmaDoubleComplex sB[IB*IB]; magmaDoubleComplex y_tx; // load upper triangle of inner block of A; zero lower triangle & outside matrix #pragma unroll for( int j=0; j < IB; j++ ) { if (tx <= j && blk_ind + j < n) { sB[tx + j*IB] = A[tx + j*lda]; } else { sB[tx + j*IB] = MAGMA_Z_ZERO; } } __syncthreads(); // invert the diagonal if (diag == MagmaUnit) { sB[tx + tx*IB] = MAGMA_Z_ONE; } else { if ( sB[tx + tx*IB] == MAGMA_Z_ZERO ) { // singular or outside matrix sB[tx + tx*IB] = MAGMA_Z_ONE; } else { sB[tx + tx*IB] = MAGMA_Z_ONE / sB[tx + tx*IB]; } } // compute elements 0:j-1 of j-th column. for( int j=1; j < IB; j++ ) { if ( tx < j ) { // trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j) // each thread sums one element, y[tx] y_tx = MAGMA_Z_ZERO; #pragma unroll for( int k=0; k < j; k++ ) y_tx += sB[tx + k*IB] * sB[k + j*IB]; // scal: sB(0:j-1, j) = -sB(j,j) * y sB[tx + j*IB] = -sB[j + j*IB] * y_tx; } __syncthreads(); } // go to the (bx / ib_per_NB) outer NB*NB block, // then the (bx % ib_per_NB) inner IB*IB block inside that. int ib_per_NB = NB/IB; d_dinvA += (bx / ib_per_NB)*NB*NB + (bx % ib_per_NB)*(NB*IB + IB); // write result #pragma unroll for( int j=0; j < IB; j++ ) { d_dinvA[tx + j*NB] = sB[tx + j*IB]; } } /* Let A be an NB*NB upper triangular matrix, and B its inverse. Then the block decomposition [ A11 A12 ] * [ B11 B12 ] = [ I 0 ] [ 0 A22 ] [ 0 B22 ] [ 0 I ] yields A11*B11 = I ==> B11 = A11^{-1}, A22*B22 = I ==> B22 = A22^{-1}, A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22. ztrtri_diag_kernel inverts A11 and A22. triple_zgemm16 routines multiply: part 1: B12 = A12 * B22, part 2: B12 = -B11 * B12. At this level, inner block is jb=16, with one 4x4 thread block per inner block. Each submatrix Aij and Bij is jb x jb. The submatrix dimension is multiplied by 2 at each level, so the next level is jb*2 = 32. A "page" is the next bigger block, here jb*2=32, [ B11 B12 ] which contains [ 0 B22 ]. Outer blocks are NB x NB. A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on the bottom. Unfortunately, this means checking every single reference. We could easily verify that A12 is full, and select between a fast version without checks and a slow version with checks. B is stored in workspace that is a full multiple of NB x NB; no checks needed. */ /* * part 1: B12 = A12 * B22 * part 2: B12 = -B11 * B12 */ __global__ void triple_zgemm16_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } __syncthreads(); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; // shadows lda argument int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm32_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm32_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm64_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm64_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm_above64_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1. // Each section needs all of the B matrix, so C cannot overwrite B. // Therefore, store B21 temporarily in the previously unused B12 matrix // (i.e., above diagonal), then in part 3, zero out B12. // // Kernels with jb <= 64 don't have this problem, because only the // NT x 16 section of C that overwrites the same section of B depends // on that section of B. // // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B12; write to B21 temp location A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm_above64_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 B = d_dinvA + jb; // B12, read from B21 temp location C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * zero out B21 temp location */ __global__ void triple_zgemm_above64_part3_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part three---------------------------// { // zero out B21 temp location magmaDoubleComplex *B21; int ldb = NB; B21 = d_dinvA + jb; B21 += ibx + id + iby*ldb; #pragma unroll for( int i = 0; i < 16; i++ ) { B21[i*ldb] = MAGMA_Z_ZERO; } } }
24b6a6f08ab06289ed612f0b34e547fcb242165e.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> c d s @author Peng Du @author Tingxing Dong @author Mark Gates This file implements upper case, and is called by ztrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "common_magma.h" #include "ztrtri.h" /* This inverts the diagonal IB by IB inner blocks of A, and stores the results in d_dinvA. Each thread block with IB threads does one inner block. Each thread deals with one row of the inner block. */ __global__ void ztrtri_diag_kernel_upper( magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA) { int tx = threadIdx.x; int bx = blockIdx.x; int blk_ind = bx*IB; //int ind = blk_ind + tx; A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind) // TODO sB should be [IB][IB+1] to avoid bank conflicts, right? __shared__ magmaDoubleComplex sB[IB*IB]; magmaDoubleComplex y_tx; // load upper triangle of inner block of A; zero lower triangle & outside matrix #pragma unroll for( int j=0; j < IB; j++ ) { if (tx <= j && blk_ind + j < n) { sB[tx + j*IB] = A[tx + j*lda]; } else { sB[tx + j*IB] = MAGMA_Z_ZERO; } } __syncthreads(); // invert the diagonal if (diag == MagmaUnit) { sB[tx + tx*IB] = MAGMA_Z_ONE; } else { if ( sB[tx + tx*IB] == MAGMA_Z_ZERO ) { // singular or outside matrix sB[tx + tx*IB] = MAGMA_Z_ONE; } else { sB[tx + tx*IB] = MAGMA_Z_ONE / sB[tx + tx*IB]; } } // compute elements 0:j-1 of j-th column. for( int j=1; j < IB; j++ ) { if ( tx < j ) { // trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j) // each thread sums one element, y[tx] y_tx = MAGMA_Z_ZERO; #pragma unroll for( int k=0; k < j; k++ ) y_tx += sB[tx + k*IB] * sB[k + j*IB]; // scal: sB(0:j-1, j) = -sB(j,j) * y sB[tx + j*IB] = -sB[j + j*IB] * y_tx; } __syncthreads(); } // go to the (bx / ib_per_NB) outer NB*NB block, // then the (bx % ib_per_NB) inner IB*IB block inside that. int ib_per_NB = NB/IB; d_dinvA += (bx / ib_per_NB)*NB*NB + (bx % ib_per_NB)*(NB*IB + IB); // write result #pragma unroll for( int j=0; j < IB; j++ ) { d_dinvA[tx + j*NB] = sB[tx + j*IB]; } } /* Let A be an NB*NB upper triangular matrix, and B its inverse. Then the block decomposition [ A11 A12 ] * [ B11 B12 ] = [ I 0 ] [ 0 A22 ] [ 0 B22 ] [ 0 I ] yields A11*B11 = I ==> B11 = A11^{-1}, A22*B22 = I ==> B22 = A22^{-1}, A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22. ztrtri_diag_kernel inverts A11 and A22. triple_zgemm16 routines multiply: part 1: B12 = A12 * B22, part 2: B12 = -B11 * B12. At this level, inner block is jb=16, with one 4x4 thread block per inner block. Each submatrix Aij and Bij is jb x jb. The submatrix dimension is multiplied by 2 at each level, so the next level is jb*2 = 32. A "page" is the next bigger block, here jb*2=32, [ B11 B12 ] which contains [ 0 B22 ]. Outer blocks are NB x NB. A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on the bottom. Unfortunately, this means checking every single reference. We could easily verify that A12 is full, and select between a fast version without checks and a slow version with checks. B is stored in workspace that is a full multiple of NB x NB; no checks needed. */ /* * part 1: B12 = A12 * B22 * part 2: B12 = -B11 * B12 */ __global__ void triple_zgemm16_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } __syncthreads(); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; // shadows lda argument int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 4 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm32_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm32_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = by * 16; const int id = tx + ty*blockDim.x; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 8 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm64_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm64_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 C = d_dinvA + jb*NB; // B12 B = C; // B12, okay to overwrite A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * B12 = A12 * B22 */ __global__ void triple_zgemm_above64_part1_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part one---------------------------// { // B12 = A12 * B22 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int ldb = NB; int ldc = NB; // For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1. // Each section needs all of the B matrix, so C cannot overwrite B. // Therefore, store B21 temporarily in the previously unused B12 matrix // (i.e., above diagonal), then in part 3, zero out B12. // // Kernels with jb <= 64 don't have this problem, because only the // NT x 16 section of C that overwrites the same section of B depends // on that section of B. // // in gemm notation: C = A*B A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12 B = d_dinvA + jb*NB + jb; // B22 C = d_dinvA + jb; // B12; write to B21 temp location A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. if ( col++ < n ) { rA[0] = A[0*lda]; } if ( col++ < n ) { rA[1] = A[1*lda]; } if ( col++ < n ) { rA[2] = A[2*lda]; } if ( col++ < n ) { rA[3] = A[3*lda]; } // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; } daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; } daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; } daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; } daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; } daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; } daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; } daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; } daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; } daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; } daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; } daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; } daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = rC[i]; C += ldc; } } } /* * B12 = -B11 * B12 */ __global__ void triple_zgemm_above64_part2_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; //int col = page*jb*2 + jb; __shared__ magmaDoubleComplex sB[16][17]; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part two---------------------------// { // B12 = -B11 * B12 const magmaDoubleComplex *A, *B; magmaDoubleComplex *C; int lda = NB; int ldb = NB; int ldc = NB; // in gemm notation: C = A*B A = d_dinvA; // B11 B = d_dinvA + jb; // B12, read from B21 temp location C = d_dinvA + jb*NB; // B12 A += ibx + id; B += tx + (iby + ty)*ldb; C += ibx + id + iby*ldc; const magmaDoubleComplex *Blast = B + jb; // compute NT x 16 block of C // each thread computes one 1x16 row, C(id,0:15) magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; magmaDoubleComplex rA[4]; do { // load 16 x 16 block of B using NX x 4 threads #pragma unroll for( int i=0; i < 16; i += 16 ) { // += blockDim.x #pragma unroll for( int j=0; j < 16; j += 4 ) { // += blockDim.y sB[tx + i][ty + j] = B[i + j*ldb]; } } __syncthreads(); // load NT x 16 block of A; each thread initially loads 1x4 row, // then continues loading more elements as axpys are done. rA[0] = A[0*lda]; rA[1] = A[1*lda]; rA[2] = A[2*lda]; rA[3] = A[3*lda]; // axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15 daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda]; daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda]; daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda]; daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda]; daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda]; daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda]; daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda]; daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda]; daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda]; daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda]; daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda]; daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda]; daxpy16( rA[0], &sB[12][0], rC ); daxpy16( rA[1], &sB[13][0], rC ); daxpy16( rA[2], &sB[14][0], rC ); daxpy16( rA[3], &sB[15][0], rC ); // move to next block of A and B A += 16*lda; B += 16; __syncthreads(); } while( B < Blast ); // write NT x 16 result; each thread writes one 16x1 row, C(id,0:15) for( int i = 0; i < 16; i++ ) { C[0] = -rC[i]; C += ldc; } } } /* * zero out B21 temp location */ __global__ void triple_zgemm_above64_part3_upper( int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages) { const int by = blockIdx.y / npages; const int page = blockIdx.y % npages; const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = by*16; const int id = tx + ty*16; // go to the (page / pages_per_NB) outer NB*NB block, // then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that. int pages_per_NB = NB/(jb*2); d_dinvA += (page / pages_per_NB)*NB*NB + (page % pages_per_NB)*(jb*2*NB + jb*2); //--------------------------part three---------------------------// { // zero out B21 temp location magmaDoubleComplex *B21; int ldb = NB; B21 = d_dinvA + jb; B21 += ibx + id + iby*ldb; #pragma unroll for( int i = 0; i < 16; i++ ) { B21[i*ldb] = MAGMA_Z_ZERO; } } }
c2cc4e84fc4ce8789186f4fd96dabc670fa26ca8.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIC CorporCtion. Cll rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIC ownership rights under U.S. Cnd * internCtionCl Copyright lCws. Users Cnd possessors of this source code * Cre hereby grCnted C nonexclusive, royClty-free license to use this code * in individuCl Cnd commerciCl softwCre. * * NVIDIC MCKES NO REPRESENTCTION CBOUT THE SUITCBILITY OF THIS SOURCE * CODE FOR CNY PURPOSE. IT IS PROVIDED "CS IS" WITHOUT EXPRESS OR * IMPLIED WCRRCNTY OF CNY KIND. NVIDIC DISCLCIMS CLL WCRRCNTIES WITH * REGCRD TO THIS SOURCE CODE, INCLUDING CLL IMPLIED WCRRCNTIES OF * MERCHCNTCBILITY, NONINFRINGEMENT, CND FITNESS FOR C PCRTICULCR PURPOSE. * IN NO EVENT SHCLL NVIDIC BE LICBLE FOR CNY SPECICL, INDIRECT, INCIDENTCL, * OR CONSEQUENTICL DCMCGES, OR CNY DCMCGES WHCTSOEVER RESULTING FROM LOSS * OF USE, DCTC OR PROFITS, WHETHER IN CN CCTION OF CONTRCCT, NEGLIGENCE * OR OTHER TORTIOUS CCTION, CRISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMCNCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is C "commerciCl item" Cs * thCt term is defined Ct 48 C.F.R. 2.101 (OCT 1995), consisting of * "commerciCl computer softwCre" Cnd "commerciCl computer softwCre * documentCtion" Cs such terms Cre used in 48 C.F.R. 12.212 (SEPT 1995) * Cnd is provided to the U.S. Government only Cs C commerciCl end item. * Consistent with 48 C.F.R.12.212 Cnd 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), Cll U.S. Government End Users Ccquire the * source code with only those rights set forth herein. * * Cny use of this source code in individuCl Cnd commerciCl softwCre must * include, in the user documentCtion Cnd internCl comments to the code, * the Cbove DisclCimer Cnd U.S. Government End Users Notice. */ /* This exCmple demonstrCtes how to use the CUBLCS librCry * by scCling Cn CrrCy of floCting-point vClues on the device * Cnd compCring the result to the sCme operCtion performed * on the host. */ /* Includes, system */ #include <iostream> #include <unistd.h> #include <sys/mman.h> #include<sys/types.h> #include <stdio.h> #include <stdlib.h> #include <string.h> /* Includes, cudC */ #include <rocblas.h> #include <hipsparse.h> #include <cublasXt.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <string> #include<sys/types.h> #include<fcntl.h> #include<string.h> #include<stdio.h> #include<unistd.h> #include <string> #include <iostream> #include <sstream> #define IDX2C(i,j,ld) (((j)*(ld))+(i)) /* MCtrix size */ //#define N (275) #define N (1024) // Restricting the mCx used GPUs Cs input mCtrix is not so lCrge #define MCX_NUM_OF_GPUS 2 namespace RL { using namespace std; // // __global__ void find_abx_point_d(int * csr_row,int *csr_col,int* abs_point,int row_total){ int idx = gridDim.x*(blockDim.x*blockDim.y)*blockIdx.y+(blockDim.x*blockDim.y)*blockIdx.x+blockDim.x*threadIdx.y+threadIdx.x; int block=blockDim.x*blockDim.y*gridDim.x*gridDim.y; for (int i=idx;i<row_total;i+=block) { // printf("csr_row[i]:%d,csr_row[i+1]:%d,i:%d,col[i]:%d\n",csr_row[i],csr_row[i+1],i,csr_col[i]); if(csr_row[i]==csr_row[i+1]-1 and i==csr_col[csr_row[i]]){//only one abs_point[i]=1; } } }; // __global__ void sum_rate_d(int * csr_row,float *csr_p2p,float* csr_p2p_sum,int row_total){ int idx = gridDim.x*(blockDim.x*blockDim.y)*blockIdx.y+(blockDim.x*blockDim.y)*blockIdx.x+blockDim.x*threadIdx.y+threadIdx.x; int block=blockDim.x*blockDim.y*gridDim.x*gridDim.y; // if (idx==0) // printf("row:%d\n",row_total); for (int i=idx;i<row_total;i=i+block) { int start=csr_row[i]; int end=csr_row[i+1]; // printf("%d,%d \n",start,end); for (int j=start;j<end;++j) { // printf("j:%d\n",j); if(j==start){ csr_p2p_sum[j]=csr_p2p[j]; } else { csr_p2p_sum[j]=csr_p2p[j]+ csr_p2p_sum[j-1]; } } } }; int getFileSize(const string &filename) { int size = 0; FILE *fp = NULL; fp=fopen(filename.c_str(),"r"); if( NULL == fp) { return size; } fseek(fp,0L,SEEK_END); size = ftell(fp); fclose(fp); return size; } // void mmapSaveDataIntoFiles(const string &filename,char *rezult) { int fileLength = 0; int dataLength = 0; int offset = 0; /* */ fileLength = getFileSize(filename); int fd = open(filename.c_str(),O_CREAT |O_RDWR|O_APPEND,00777); if(fd < 0) { cout<<"OPEN FILE ERROR!"<<endl; } char *buffer = (char*)mmap(NULL,fileLength,PROT_READ,MAP_SHARED,fd,0); close(fd); memcpy(rezult,buffer,fileLength); rezult[fileLength]='\0'; munmap(buffer,fileLength); } //RL template<class T> class RL_gpu{ private: public: struct coo_mat_h{ T* cooValA_h; int* cooRowIndA; int* cooColIndA; long nnz; int mb; int nb; hipsparseMatDescr_t matdes; }; struct csr_mat_h{ long nnz; T* csrValA; int* csrRowPtrA; int* csrColIndA; int mb; int nb; hipDataType csrValAtype; hipsparseMatDescr_t matdes; }; csr_mat_h* csr_mat_p2p;// csr_mat_h* csr_mat_vlaue;// int* abs_point;// coo_mat_h* mat_s2coo(const char* file_path,int rownum,int colomnnum){ coo_mat_h* coo_matrix=(coo_mat_h*)malloc(sizeof(coo_mat_h)); coo_matrix->cooRowIndA=(int *)malloc(rownum*colomnnum*sizeof(int)); coo_matrix->cooColIndA=(int *)malloc(rownum*colomnnum*sizeof(int)); coo_matrix->cooValA_h=(T* )malloc(sizeof(T)*(rownum*colomnnum)); char *data_txt=(char *)malloc(sizeof(char)*(rownum*colomnnum*10));// mmapSaveDataIntoFiles(file_path,data_txt); // //checkCudaErrors(hipMalloc((void **)&(coo_matrix->cooValA_h),row_num*col_num*sizeof(*(rezult->matrix_data_T)))); stringstream ss(data_txt); string line; int row=0; int col=0; int index=0; T value_T; string value; while (getline(ss, line, '\n')) { // col=0; stringstream ss_in(line); while(getline(ss_in,value,',')){ stringstream ss_inn(value); ss_inn>>value_T; if(value_T!=(T)0.0f) {coo_matrix->cooValA_h[index]=value_T; coo_matrix->cooRowIndA[index]=row; coo_matrix->cooColIndA[index]=col; index++; } col++; } row++; } coo_matrix->nnz=index; // cout<<"nnz:="<<coo_matrix->nnz<<endl; // // for(int i=0;i<coo_matrix->nnz;i++) // {cout<<"coo_value:="<<coo_matrix->cooValA_h[i]<<",row:="<<coo_matrix->cooRowIndA[i]<<"col:="<<coo_matrix->cooColIndA[i]<<endl; // } delete data_txt; //output rezult coo_mat_h* coo_matrix_d=(coo_mat_h*)malloc(sizeof(coo_mat_h)); coo_matrix_d->mb=rownum; coo_matrix_d->nb=colomnnum; coo_matrix_d->nnz=coo_matrix->nnz; checkCudaErrors(hipMalloc(&coo_matrix_d->cooColIndA,coo_matrix->nnz*sizeof(int))); checkCudaErrors(hipMemcpy(coo_matrix_d->cooColIndA,coo_matrix->cooColIndA,coo_matrix->nnz*sizeof(coo_matrix_d->cooColIndA[0]), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc(&coo_matrix_d->cooRowIndA,coo_matrix->nnz*sizeof(int))); checkCudaErrors(hipMemcpy(coo_matrix_d->cooRowIndA, coo_matrix->cooRowIndA,coo_matrix->nnz*sizeof(coo_matrix_d->cooRowIndA[0]), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc(&coo_matrix_d->cooValA_h,coo_matrix->nnz*sizeof(coo_matrix_d->cooValA_h[0]))); checkCudaErrors(hipMemcpy(coo_matrix_d->cooValA_h, coo_matrix->cooValA_h,coo_matrix->nnz*sizeof(coo_matrix->cooValA_h[0]), hipMemcpyHostToDevice)); return coo_matrix_d; } //csr csr_mat_h* create_csr(string filepath="/lf_tool/matrix_cuda/coo",int row=5,int col=5){ hipsparseHandle_t handle=0; hipsparseCreate(&handle); csr_mat_h* csr_mat_d=(csr_mat_h*)malloc(sizeof(csr_mat_h)); coo_mat_h* coo_mat_d=mat_s2coo(filepath.c_str(),row,col); checkCudaErrors(hipMalloc(&csr_mat_d->csrRowPtrA,(row+1)*sizeof(int))); csr_mat_d->mb=row; csr_mat_d->nb=col; csr_mat_d->nnz=coo_mat_d->nnz; hipsparseStatus_t status=hipsparseXcoo2csr(handle, coo_mat_d->cooRowIndA, coo_mat_d->nnz, coo_mat_d->mb, csr_mat_d->csrRowPtrA, HIPSPARSE_INDEX_BASE_ZERO); if(status!=0) {cout<<"hipsparseXcoo2csr erres"<<endl; exit(status); } // int *hostPointer=(int *)malloc((row+1)*sizeof(int)); // checkCudaErrors(hipMemcpy(hostPointer,csr_mat_d->csrRowPtrA,(row+1)*sizeof(int),hipMemcpyDeviceToHost)); // for (int i=0;i<(row+1);++i){ // cout<<"csrRow:="<<hostPointer[i]<<endl; // } csr_mat_d->csrColIndA=coo_mat_d->cooColIndA; csr_mat_d->csrValA=coo_mat_d->cooValA_h; hipsparseCreateMatDescr(&csr_mat_d->matdes); // int *hostPointer_2=(int *)malloc((coo_mat_d->nnz)*sizeof(int)); // checkCudaErrors(hipMemcpy(hostPointer_2,csr_mat_d->csrColIndA,(coo_mat_d->nnz)*sizeof(int),hipMemcpyDeviceToHost)); // for (int i=0;i<coo_mat_d->nnz;++i) { // cout<<"csrcol:="<<hostPointer_2[i]<<endl; // } // hipsparseSetMatDiagType(csr_mat_d->matdes,HIPSPARSE_DIAG_TYPE_UNIT); cout<<"create_csr finished!"<<endl; return csr_mat_d; } void mat_nnz(int* nnz){ nnz[0]=csr_mat_p2p->nnz; } RL_gpu(int* coocol,int* rowcol,float* coop2p,float* coovlaue,const char* file_path_p2p,const char* file_path_vlaue,int rownum,int colomnnum){ // csr_mat_p2p=create_csr(file_path_p2p,rownum,colomnnum); checkCudaErrors(hipMemcpy(coocol,csr_mat_p2p->csrColIndA,(csr_mat_p2p->nnz)*sizeof(int),hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(rowcol,csr_mat_p2p->csrRowPtrA,((csr_mat_p2p->mb)+1)*sizeof(int),hipMemcpyDeviceToHost)); // csr_mat_vlaue=create_csr(file_path_vlaue,rownum,colomnnum); checkCudaErrors(hipMemcpy(coovlaue,csr_mat_vlaue->csrValA,(csr_mat_vlaue->nnz)*sizeof(T),hipMemcpyDeviceToHost)); checkCudaErrors(hipMalloc(&this->abs_point,((this->csr_mat_p2p->mb)+1)*sizeof(int))); dim3 gridsize(3,3); dim3 blocksize(32,32); T *devicePointer; checkCudaErrors(hipMalloc(&devicePointer,(csr_mat_p2p->nnz)*sizeof(T))); hipLaunchKernelGGL(( sum_rate_d), dim3(gridsize),dim3(blocksize), 0, 0, csr_mat_p2p->csrRowPtrA,csr_mat_p2p->csrValA,devicePointer,(this->csr_mat_p2p->mb)); hipDeviceSynchronize(); checkCudaErrors(hipMemcpy(coop2p,devicePointer,(csr_mat_p2p->nnz)*sizeof(T),hipMemcpyDeviceToHost)); } // void find_abx_point(int* abs_point_p){ dim3 gridsize(3,3); dim3 blocksize(32,32); int row=(this->csr_mat_p2p->mb); hipLaunchKernelGGL(( find_abx_point_d), dim3(gridsize),dim3(blocksize), 0, 0, this->csr_mat_p2p->csrRowPtrA,this->csr_mat_p2p->csrColIndA,this->abs_point,row); hipDeviceSynchronize(); checkCudaErrors(hipMemcpy(abs_point_p,this->abs_point,(this->csr_mat_p2p->mb)*sizeof(this->abs_point[0]),hipMemcpyDeviceToHost)); } }; //batchSize=1 template<class T> void least_square(int batchSize,T* Aarray[],T*Carray[],int m,int n,int nrhs){ hipblasHandle_t handle_cublas; hipblasCreate(&handle_cublas); int* info=(int *)malloc(batchSize*sizeof(info[0])); int* devInfoArray; checkCudaErrors(hipMalloc(&devInfoArray, batchSize*sizeof(devInfoArray[0]))); cout<<"if 0 all right ,eles wrong!"<<hipblasSgelsBatched(handle_cublas, HIPBLAS_OP_N, m, n, nrhs, Aarray, m, Carray, m, info, devInfoArray, batchSize)<<endl; hipDeviceSynchronize(); // hipFree(info); // hipFree(devInfoArray); } template<class T> //convert dd2hh T** dp2printf(T** devcie_m,int bithsize,int row_num,int col_num){ // cout<<"bithsizedevice"<<endl; // printf("bithsize:%d,row_num:%d,col_num:%d\n",bithsize,row_num,col_num); int array_length=row_num*col_num; T** host_m=(T**)malloc(bithsize*sizeof(*host_m)); T** host_print=(T**)malloc(bithsize*sizeof(*host_m)); for(int i=0;i<bithsize;i++){ host_print[i]=(T* )malloc(array_length*sizeof(host_print[0][0])); } checkCudaErrors(hipMemcpy(host_m,devcie_m,bithsize*sizeof(host_m[0]),hipMemcpyDeviceToHost)); for(int i=0;i<bithsize;i++){ checkCudaErrors(hipMemcpy(host_print[i],host_m[i],array_length*sizeof(host_m[0][0]),hipMemcpyDeviceToHost)); cout<<"the ith_matrix:="<<i<<"******************************"<<endl; string output="["; for(int row_N=0;row_N<row_num;row_N++){ for (int col_N=0;col_N<col_num;col_N++) { // cout<<"row:="<<i<<"|col:="<<j<<"|value:"<<rezult[IDX2C(i,j,m)]<<endl; stringstream ss; ss<<host_print[i][IDX2C(row_N,col_N,row_num)]; string temp; ss>>temp; output+=temp; if(col_N!=(col_num-1)) output+=","; ss.clear(); } if (row_N!=row_num-1) output+="\n"; } output+=("]\n"); cout<<output<<endl; } return host_print; } //so extern "C" { void create_csr_mat(int* coocol,int* rowcol,float* coovp2p,float* coovalue,const char* file_path,const char* file_path_vlaue,int rownum,int colomnnum,int* abs_point_p){ RL_gpu<float> obj=RL_gpu<float>(coocol,rowcol,coovp2p,coovalue,file_path,file_path_vlaue,rownum,colomnnum); obj.find_abx_point(abs_point_p); } void mat_nnz_gpu(const char* file_path,int rownum,int* len){ char *data_txt=(char *)malloc(sizeof(char)*(rownum*rownum*10));// mmapSaveDataIntoFiles(file_path,data_txt); // //checkCudaErrors(hipMalloc((void **)&(coo_matrix->cooValA_h),row_num*col_num*sizeof(*(rezult->matrix_data_T)))); stringstream ss(data_txt); string line; int row=0; int col=0; int index=0; float value_T; string value; while (getline(ss, line, '\n')) { // col=0; stringstream ss_in(line); while(getline(ss_in,value,',')){ stringstream ss_inn(value); ss_inn>>value_T; if(value_T!=(float)0.0f) {index++; } col++; } row++; } len[0]=index; } void least_square_cublas(float* Aarray,float* Carray,int m,int n,int nrhs){ //-----------------todd_start----------------------------Aarrau //allocate T** hostpoint_hh_N on host an assign value int size_N=1; int pitch_N=m*n; float **hostPointer_hd=(float **)malloc(size_N*sizeof(hostPointer_hd[0])); checkCudaErrors(hipMalloc((void **)(&hostPointer_hd[0]),pitch_N*sizeof(hostPointer_hd[0][0]))); checkCudaErrors(hipMemcpy(hostPointer_hd[0],Aarray,pitch_N*sizeof(hostPointer_hd[0][0]),hipMemcpyHostToDevice)); float **devicePointer_dd; checkCudaErrors(hipMalloc((void **)(&devicePointer_dd),size_N*sizeof(devicePointer_dd[0]))); checkCudaErrors(hipMemcpy(devicePointer_dd,hostPointer_hd,size_N*sizeof(devicePointer_dd[0]), hipMemcpyHostToDevice)); //-----------------todd_end---------------------------------- //-----------------todd_start----------------------------Carray //allocate T** hostpoint_hh_N on host an assign value size_N=1; pitch_N=m*nrhs; float **hostPointer_hd_carray=(float **)malloc(size_N*sizeof(hostPointer_hd_carray[0])); checkCudaErrors(hipMalloc((void **)(&hostPointer_hd_carray[0]),pitch_N*sizeof(hostPointer_hd_carray[0][0]))); checkCudaErrors(hipMemcpy(hostPointer_hd_carray[0],Carray,pitch_N*sizeof(hostPointer_hd_carray[0][0]), hipMemcpyHostToDevice)); float **devicePointer_dd_carray; checkCudaErrors(hipMalloc((void **)(&devicePointer_dd_carray),size_N*sizeof(devicePointer_dd_carray[0]))); checkCudaErrors(hipMemcpy(devicePointer_dd_carray,hostPointer_hd_carray,size_N*sizeof(devicePointer_dd_carray[0]), hipMemcpyHostToDevice)); //-----------------todd_end---------------------------------- least_square<float>(1,devicePointer_dd,devicePointer_dd_carray,m,n,nrhs); // dp2printf<float>(devicePointer_dd_carray,1,4,1); // float **r=(float **)malloc(size_N*sizeof(r[0])); checkCudaErrors(hipMemcpy(r,devicePointer_dd_carray,size_N*sizeof(r[0]), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(Carray,r[0],pitch_N*sizeof(r[0][0]), hipMemcpyDeviceToHost)); } } } // ////RL //} ///* MCin */
c2cc4e84fc4ce8789186f4fd96dabc670fa26ca8.cu
/* * Copyright 1993-2015 NVIDIC CorporCtion. Cll rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIC ownership rights under U.S. Cnd * internCtionCl Copyright lCws. Users Cnd possessors of this source code * Cre hereby grCnted C nonexclusive, royClty-free license to use this code * in individuCl Cnd commerciCl softwCre. * * NVIDIC MCKES NO REPRESENTCTION CBOUT THE SUITCBILITY OF THIS SOURCE * CODE FOR CNY PURPOSE. IT IS PROVIDED "CS IS" WITHOUT EXPRESS OR * IMPLIED WCRRCNTY OF CNY KIND. NVIDIC DISCLCIMS CLL WCRRCNTIES WITH * REGCRD TO THIS SOURCE CODE, INCLUDING CLL IMPLIED WCRRCNTIES OF * MERCHCNTCBILITY, NONINFRINGEMENT, CND FITNESS FOR C PCRTICULCR PURPOSE. * IN NO EVENT SHCLL NVIDIC BE LICBLE FOR CNY SPECICL, INDIRECT, INCIDENTCL, * OR CONSEQUENTICL DCMCGES, OR CNY DCMCGES WHCTSOEVER RESULTING FROM LOSS * OF USE, DCTC OR PROFITS, WHETHER IN CN CCTION OF CONTRCCT, NEGLIGENCE * OR OTHER TORTIOUS CCTION, CRISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMCNCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is C "commerciCl item" Cs * thCt term is defined Ct 48 C.F.R. 2.101 (OCT 1995), consisting of * "commerciCl computer softwCre" Cnd "commerciCl computer softwCre * documentCtion" Cs such terms Cre used in 48 C.F.R. 12.212 (SEPT 1995) * Cnd is provided to the U.S. Government only Cs C commerciCl end item. * Consistent with 48 C.F.R.12.212 Cnd 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), Cll U.S. Government End Users Ccquire the * source code with only those rights set forth herein. * * Cny use of this source code in individuCl Cnd commerciCl softwCre must * include, in the user documentCtion Cnd internCl comments to the code, * the Cbove DisclCimer Cnd U.S. Government End Users Notice. */ /* This exCmple demonstrCtes how to use the CUBLCS librCry * by scCling Cn CrrCy of floCting-point vClues on the device * Cnd compCring the result to the sCme operCtion performed * on the host. */ /* Includes, system */ #include <iostream> #include <unistd.h> #include <sys/mman.h> #include<sys/types.h> #include <stdio.h> #include <stdlib.h> #include <string.h> /* Includes, cudC */ #include <cublas.h> #include <cusparse.h> #include <cublasXt.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <string> #include<sys/types.h> #include<fcntl.h> #include<string.h> #include<stdio.h> #include<unistd.h> #include <string> #include <iostream> #include <sstream> #define IDX2C(i,j,ld) (((j)*(ld))+(i)) /* MCtrix size */ //#define N (275) #define N (1024) // Restricting the mCx used GPUs Cs input mCtrix is not so lCrge #define MCX_NUM_OF_GPUS 2 namespace RL { using namespace std; //一、测试函数 //找到吸收点 __global__ void find_abx_point_d(int * csr_row,int *csr_col,int* abs_point,int row_total){ int idx = gridDim.x*(blockDim.x*blockDim.y)*blockIdx.y+(blockDim.x*blockDim.y)*blockIdx.x+blockDim.x*threadIdx.y+threadIdx.x; int block=blockDim.x*blockDim.y*gridDim.x*gridDim.y; for (int i=idx;i<row_total;i+=block) { // printf("csr_row[i]:%d,csr_row[i+1]:%d,i:%d,col[i]:%d\n",csr_row[i],csr_row[i+1],i,csr_col[i]); if(csr_row[i]==csr_row[i+1]-1 and i==csr_col[csr_row[i]]){//only one abs_point[i]=1; } } }; //累加转移矩阵概率 __global__ void sum_rate_d(int * csr_row,float *csr_p2p,float* csr_p2p_sum,int row_total){ int idx = gridDim.x*(blockDim.x*blockDim.y)*blockIdx.y+(blockDim.x*blockDim.y)*blockIdx.x+blockDim.x*threadIdx.y+threadIdx.x; int block=blockDim.x*blockDim.y*gridDim.x*gridDim.y; // if (idx==0) // printf("row:%d\n",row_total); for (int i=idx;i<row_total;i=i+block) { int start=csr_row[i]; int end=csr_row[i+1]; // printf("%d,%d \n",start,end); for (int j=start;j<end;++j) { // printf("j:%d\n",j); if(j==start){ csr_p2p_sum[j]=csr_p2p[j]; } else { csr_p2p_sum[j]=csr_p2p[j]+ csr_p2p_sum[j-1]; } } } }; int getFileSize(const string &filename) { int size = 0; FILE *fp = NULL; fp=fopen(filename.c_str(),"r"); if( NULL == fp) { return size; } fseek(fp,0L,SEEK_END); size = ftell(fp); fclose(fp); return size; } //把矩阵数据文件读取到内存中去,等到转化到cublas用的标准矩阵数据格式 void mmapSaveDataIntoFiles(const string &filename,char *rezult) { int fileLength = 0; int dataLength = 0; int offset = 0; /* 获取文件大小和数据长度 */ fileLength = getFileSize(filename); int fd = open(filename.c_str(),O_CREAT |O_RDWR|O_APPEND,00777); if(fd < 0) { cout<<"OPEN FILE ERROR!"<<endl; } char *buffer = (char*)mmap(NULL,fileLength,PROT_READ,MAP_SHARED,fd,0); close(fd); memcpy(rezult,buffer,fileLength); rezult[fileLength]='\0'; munmap(buffer,fileLength); } //二、RL函数 template<class T> class RL_gpu{ private: public: struct coo_mat_h{ T* cooValA_h; int* cooRowIndA; int* cooColIndA; long nnz; int mb; int nb; cusparseMatDescr_t matdes; }; struct csr_mat_h{ long nnz; T* csrValA; int* csrRowPtrA; int* csrColIndA; int mb; int nb; cudaDataType csrValAtype; cusparseMatDescr_t matdes; }; csr_mat_h* csr_mat_p2p;//转移概率稀疏矩阵 csr_mat_h* csr_mat_vlaue;//回报函数矩阵 int* abs_point;//吸收点 coo_mat_h* mat_s2coo(const char* file_path,int rownum,int colomnnum){ coo_mat_h* coo_matrix=(coo_mat_h*)malloc(sizeof(coo_mat_h)); coo_matrix->cooRowIndA=(int *)malloc(rownum*colomnnum*sizeof(int)); coo_matrix->cooColIndA=(int *)malloc(rownum*colomnnum*sizeof(int)); coo_matrix->cooValA_h=(T* )malloc(sizeof(T)*(rownum*colomnnum)); char *data_txt=(char *)malloc(sizeof(char)*(rownum*colomnnum*10));//整体长度设计 mmapSaveDataIntoFiles(file_path,data_txt); //逐行扫描获取矩阵内容 //checkCudaErrors(cudaMalloc((void **)&(coo_matrix->cooValA_h),row_num*col_num*sizeof(*(rezult->matrix_data_T)))); stringstream ss(data_txt); string line; int row=0; int col=0; int index=0; T value_T; string value; while (getline(ss, line, '\n')) { //开始一行的数据导入 col=0; stringstream ss_in(line); while(getline(ss_in,value,',')){ stringstream ss_inn(value); ss_inn>>value_T; if(value_T!=(T)0.0f) {coo_matrix->cooValA_h[index]=value_T; coo_matrix->cooRowIndA[index]=row; coo_matrix->cooColIndA[index]=col; index++; } col++; } row++; } coo_matrix->nnz=index; // cout<<"nnz:="<<coo_matrix->nnz<<endl; //打印结果 // for(int i=0;i<coo_matrix->nnz;i++) // {cout<<"coo_value:="<<coo_matrix->cooValA_h[i]<<",row:="<<coo_matrix->cooRowIndA[i]<<"col:="<<coo_matrix->cooColIndA[i]<<endl; // } delete data_txt; //output rezult coo_mat_h* coo_matrix_d=(coo_mat_h*)malloc(sizeof(coo_mat_h)); coo_matrix_d->mb=rownum; coo_matrix_d->nb=colomnnum; coo_matrix_d->nnz=coo_matrix->nnz; checkCudaErrors(cudaMalloc(&coo_matrix_d->cooColIndA,coo_matrix->nnz*sizeof(int))); checkCudaErrors(cudaMemcpy(coo_matrix_d->cooColIndA,coo_matrix->cooColIndA,coo_matrix->nnz*sizeof(coo_matrix_d->cooColIndA[0]), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc(&coo_matrix_d->cooRowIndA,coo_matrix->nnz*sizeof(int))); checkCudaErrors(cudaMemcpy(coo_matrix_d->cooRowIndA, coo_matrix->cooRowIndA,coo_matrix->nnz*sizeof(coo_matrix_d->cooRowIndA[0]), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc(&coo_matrix_d->cooValA_h,coo_matrix->nnz*sizeof(coo_matrix_d->cooValA_h[0]))); checkCudaErrors(cudaMemcpy(coo_matrix_d->cooValA_h, coo_matrix->cooValA_h,coo_matrix->nnz*sizeof(coo_matrix->cooValA_h[0]), cudaMemcpyHostToDevice)); return coo_matrix_d; } //常规矩阵转csr稀疏矩阵 csr_mat_h* create_csr(string filepath="/lf_tool/matrix_cuda/coo",int row=5,int col=5){ cusparseHandle_t handle=0; cusparseCreate(&handle); csr_mat_h* csr_mat_d=(csr_mat_h*)malloc(sizeof(csr_mat_h)); coo_mat_h* coo_mat_d=mat_s2coo(filepath.c_str(),row,col); checkCudaErrors(cudaMalloc(&csr_mat_d->csrRowPtrA,(row+1)*sizeof(int))); csr_mat_d->mb=row; csr_mat_d->nb=col; csr_mat_d->nnz=coo_mat_d->nnz; cusparseStatus_t status=cusparseXcoo2csr(handle, coo_mat_d->cooRowIndA, coo_mat_d->nnz, coo_mat_d->mb, csr_mat_d->csrRowPtrA, CUSPARSE_INDEX_BASE_ZERO); if(status!=0) {cout<<"cusparseXcoo2csr erres"<<endl; exit(status); } // int *hostPointer=(int *)malloc((row+1)*sizeof(int)); // checkCudaErrors(cudaMemcpy(hostPointer,csr_mat_d->csrRowPtrA,(row+1)*sizeof(int),cudaMemcpyDeviceToHost)); // for (int i=0;i<(row+1);++i){ // cout<<"csrRow:="<<hostPointer[i]<<endl; // } csr_mat_d->csrColIndA=coo_mat_d->cooColIndA; csr_mat_d->csrValA=coo_mat_d->cooValA_h; cusparseCreateMatDescr(&csr_mat_d->matdes); // int *hostPointer_2=(int *)malloc((coo_mat_d->nnz)*sizeof(int)); // checkCudaErrors(cudaMemcpy(hostPointer_2,csr_mat_d->csrColIndA,(coo_mat_d->nnz)*sizeof(int),cudaMemcpyDeviceToHost)); // for (int i=0;i<coo_mat_d->nnz;++i) { // cout<<"csrcol:="<<hostPointer_2[i]<<endl; // } // cusparseSetMatDiagType(csr_mat_d->matdes,CUSPARSE_DIAG_TYPE_UNIT); cout<<"create_csr finished!"<<endl; return csr_mat_d; } void mat_nnz(int* nnz){ nnz[0]=csr_mat_p2p->nnz; } RL_gpu(int* coocol,int* rowcol,float* coop2p,float* coovlaue,const char* file_path_p2p,const char* file_path_vlaue,int rownum,int colomnnum){ //生成转移概率矩阵 csr_mat_p2p=create_csr(file_path_p2p,rownum,colomnnum); checkCudaErrors(cudaMemcpy(coocol,csr_mat_p2p->csrColIndA,(csr_mat_p2p->nnz)*sizeof(int),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(rowcol,csr_mat_p2p->csrRowPtrA,((csr_mat_p2p->mb)+1)*sizeof(int),cudaMemcpyDeviceToHost)); //生成回报函数矩阵 csr_mat_vlaue=create_csr(file_path_vlaue,rownum,colomnnum); checkCudaErrors(cudaMemcpy(coovlaue,csr_mat_vlaue->csrValA,(csr_mat_vlaue->nnz)*sizeof(T),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMalloc(&this->abs_point,((this->csr_mat_p2p->mb)+1)*sizeof(int))); dim3 gridsize(3,3); dim3 blocksize(32,32); T *devicePointer; checkCudaErrors(cudaMalloc(&devicePointer,(csr_mat_p2p->nnz)*sizeof(T))); sum_rate_d<<<gridsize,blocksize>>>(csr_mat_p2p->csrRowPtrA,csr_mat_p2p->csrValA,devicePointer,(this->csr_mat_p2p->mb)); cudaDeviceSynchronize(); checkCudaErrors(cudaMemcpy(coop2p,devicePointer,(csr_mat_p2p->nnz)*sizeof(T),cudaMemcpyDeviceToHost)); } //寻找转移概率中的吸收点 void find_abx_point(int* abs_point_p){ dim3 gridsize(3,3); dim3 blocksize(32,32); int row=(this->csr_mat_p2p->mb); find_abx_point_d<<<gridsize,blocksize>>>(this->csr_mat_p2p->csrRowPtrA,this->csr_mat_p2p->csrColIndA,this->abs_point,row); cudaDeviceSynchronize(); checkCudaErrors(cudaMemcpy(abs_point_p,this->abs_point,(this->csr_mat_p2p->mb)*sizeof(this->abs_point[0]),cudaMemcpyDeviceToHost)); } }; //默认只有一组数据batchSize=1 template<class T> void least_square(int batchSize,T* Aarray[],T*Carray[],int m,int n,int nrhs){ cublasHandle_t handle_cublas; cublasCreate(&handle_cublas); int* info=(int *)malloc(batchSize*sizeof(info[0])); int* devInfoArray; checkCudaErrors(cudaMalloc(&devInfoArray, batchSize*sizeof(devInfoArray[0]))); cout<<"if 0 all right ,eles wrong!"<<cublasSgelsBatched(handle_cublas, CUBLAS_OP_N, m, n, nrhs, Aarray, m, Carray, m, info, devInfoArray, batchSize)<<endl; cudaDeviceSynchronize(); // cudaFree(info); // cudaFree(devInfoArray); } template<class T> //convert dd2hh T** dp2printf(T** devcie_m,int bithsize,int row_num,int col_num){ // cout<<"多bithsize矩阵,输出在device上的矩阵"<<endl; // printf("bithsize:%d,row_num:%d,col_num:%d\n",bithsize,row_num,col_num); int array_length=row_num*col_num; T** host_m=(T**)malloc(bithsize*sizeof(*host_m)); T** host_print=(T**)malloc(bithsize*sizeof(*host_m)); for(int i=0;i<bithsize;i++){ host_print[i]=(T* )malloc(array_length*sizeof(host_print[0][0])); } checkCudaErrors(cudaMemcpy(host_m,devcie_m,bithsize*sizeof(host_m[0]),cudaMemcpyDeviceToHost)); for(int i=0;i<bithsize;i++){ checkCudaErrors(cudaMemcpy(host_print[i],host_m[i],array_length*sizeof(host_m[0][0]),cudaMemcpyDeviceToHost)); cout<<"the ith_matrix:="<<i<<"******************************"<<endl; string output="["; for(int row_N=0;row_N<row_num;row_N++){ for (int col_N=0;col_N<col_num;col_N++) { // cout<<"row:="<<i<<"|col:="<<j<<"|value:"<<rezult[IDX2C(i,j,m)]<<endl; stringstream ss; ss<<host_print[i][IDX2C(row_N,col_N,row_num)]; string temp; ss>>temp; output+=temp; if(col_N!=(col_num-1)) output+=","; ss.clear(); } if (row_N!=row_num-1) output+="\n"; } output+=("]\n"); cout<<output<<endl; } return host_print; } //三、so文件函数 extern "C" { void create_csr_mat(int* coocol,int* rowcol,float* coovp2p,float* coovalue,const char* file_path,const char* file_path_vlaue,int rownum,int colomnnum,int* abs_point_p){ RL_gpu<float> obj=RL_gpu<float>(coocol,rowcol,coovp2p,coovalue,file_path,file_path_vlaue,rownum,colomnnum); obj.find_abx_point(abs_point_p); } void mat_nnz_gpu(const char* file_path,int rownum,int* len){ char *data_txt=(char *)malloc(sizeof(char)*(rownum*rownum*10));//整体长度设计 mmapSaveDataIntoFiles(file_path,data_txt); //逐行扫描获取矩阵内容 //checkCudaErrors(cudaMalloc((void **)&(coo_matrix->cooValA_h),row_num*col_num*sizeof(*(rezult->matrix_data_T)))); stringstream ss(data_txt); string line; int row=0; int col=0; int index=0; float value_T; string value; while (getline(ss, line, '\n')) { //开始一行的数据导入 col=0; stringstream ss_in(line); while(getline(ss_in,value,',')){ stringstream ss_inn(value); ss_inn>>value_T; if(value_T!=(float)0.0f) {index++; } col++; } row++; } len[0]=index; } void least_square_cublas(float* Aarray,float* Carray,int m,int n,int nrhs){ //-----------------todd_start----------------------------Aarrau //allocate T** hostpoint_hh_N on host an assign value int size_N=1; int pitch_N=m*n; float **hostPointer_hd=(float **)malloc(size_N*sizeof(hostPointer_hd[0])); checkCudaErrors(cudaMalloc((void **)(&hostPointer_hd[0]),pitch_N*sizeof(hostPointer_hd[0][0]))); checkCudaErrors(cudaMemcpy(hostPointer_hd[0],Aarray,pitch_N*sizeof(hostPointer_hd[0][0]),cudaMemcpyHostToDevice)); float **devicePointer_dd; checkCudaErrors(cudaMalloc((void **)(&devicePointer_dd),size_N*sizeof(devicePointer_dd[0]))); checkCudaErrors(cudaMemcpy(devicePointer_dd,hostPointer_hd,size_N*sizeof(devicePointer_dd[0]), cudaMemcpyHostToDevice)); //-----------------todd_end---------------------------------- //-----------------todd_start----------------------------Carray //allocate T** hostpoint_hh_N on host an assign value size_N=1; pitch_N=m*nrhs; float **hostPointer_hd_carray=(float **)malloc(size_N*sizeof(hostPointer_hd_carray[0])); checkCudaErrors(cudaMalloc((void **)(&hostPointer_hd_carray[0]),pitch_N*sizeof(hostPointer_hd_carray[0][0]))); checkCudaErrors(cudaMemcpy(hostPointer_hd_carray[0],Carray,pitch_N*sizeof(hostPointer_hd_carray[0][0]), cudaMemcpyHostToDevice)); float **devicePointer_dd_carray; checkCudaErrors(cudaMalloc((void **)(&devicePointer_dd_carray),size_N*sizeof(devicePointer_dd_carray[0]))); checkCudaErrors(cudaMemcpy(devicePointer_dd_carray,hostPointer_hd_carray,size_N*sizeof(devicePointer_dd_carray[0]), cudaMemcpyHostToDevice)); //-----------------todd_end---------------------------------- least_square<float>(1,devicePointer_dd,devicePointer_dd_carray,m,n,nrhs); // dp2printf<float>(devicePointer_dd_carray,1,4,1); //返回 float **r=(float **)malloc(size_N*sizeof(r[0])); checkCudaErrors(cudaMemcpy(r,devicePointer_dd_carray,size_N*sizeof(r[0]), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(Carray,r[0],pitch_N*sizeof(r[0][0]), cudaMemcpyDeviceToHost)); } } } // ////RL相关函数 //} ///* MCin */
40796ddda2078c701b1e05edb600dd41dd632e97.hip
// !!! This is a file automatically generated by hipify!!! /* ************************************************************************** */ /* */ /* ::: :::::::: */ /* cuda_malloc_camera.cu :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: jwalsh <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2017/05/18 16:13:20 by jwalsh #+# #+# */ /* Updated: 2017/06/08 14:14:09 by jwalsh ### ########.fr */ /* */ /* ************************************************************************** */ #include "../../inc/rt.cuh" #include "../inc/cuda_call.cuh" /* ** Allocated memory on the device for the first camera. */ bool cuda_malloc_camera(t_raytracing_tools *r) { if (r->update.cameras >= 1) { if (r->update.cameras == 2) { if(test_cuda_malloc((void **)(&r->h_d_scene->cameras), sizeof(t_camera)) == false) return(false); } if (r->scene->is_3d) r->scene->cameras->filter = F_LEFT_RED; gpu_errchk((hipMemcpy(r->h_d_scene->cameras, r->scene->cameras, sizeof(t_camera), hipMemcpyHostToDevice))); } return(true); }
40796ddda2078c701b1e05edb600dd41dd632e97.cu
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* cuda_malloc_camera.cu :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: jwalsh <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2017/05/18 16:13:20 by jwalsh #+# #+# */ /* Updated: 2017/06/08 14:14:09 by jwalsh ### ########.fr */ /* */ /* ************************************************************************** */ #include "../../inc/rt.cuh" #include "../inc/cuda_call.cuh" /* ** Allocated memory on the device for the first camera. */ bool cuda_malloc_camera(t_raytracing_tools *r) { if (r->update.cameras >= 1) { if (r->update.cameras == 2) { if(test_cuda_malloc((void **)(&r->h_d_scene->cameras), sizeof(t_camera)) == false) return(false); } if (r->scene->is_3d) r->scene->cameras->filter = F_LEFT_RED; gpu_errchk((cudaMemcpy(r->h_d_scene->cameras, r->scene->cameras, sizeof(t_camera), cudaMemcpyHostToDevice))); } return(true); }
eb9bace26b12a313ee326a09c3f52f737a6addf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Modifications Copyright (c) Microsoft. */ #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/nn/dropout_impl.h" #include <hiprand/hiprand_kernel.h> #include <algorithm> namespace onnxruntime { namespace cuda { constexpr int UNROLL = 4; template <typename T> __global__ void DropoutKernel( const int64_t N, const float ratio, const std::pair<uint64_t, uint64_t> seeds, const T* X_data, T* Y_data, bool* mask_data) { const float p = 1.0f - ratio; const T scale = T(1.0f / p); CUDA_LONG idx = blockDim.x * blockIdx.x + threadIdx.x; CUDA_LONG step_size = gridDim.x * blockDim.x * UNROLL; CUDA_LONG rounded_size = ((N - 1) / step_size + 1) * step_size; hiprandStatePhilox4_32_10_t state; hiprand_init(seeds.first, idx, seeds.second, &state); // We ensure every thread generates the same number of random numbers (by rounding // up the size) and at the same timestep (by syncing threads). // From CUDA hiprand documentation: // The Philox_4x32_10 algorithm is closely tied to the thread and block count. // Each thread computes 4 random numbers in the same time thus the most efficient // use of Philox_4x32_10 is to generate a multiple of 4 times number of threads. for (CUDA_LONG id = idx; id < rounded_size; id += step_size) { float4 rand = hiprand_uniform4(&state); for (CUDA_LONG i = 0; i < UNROLL; i++) { CUDA_LONG li = id + gridDim.x * blockDim.x * i; if (li < N) { mask_data[li] = (&rand.x)[i] < p; Y_data[li] = X_data[li] * T(mask_data[li]) * scale; } } __syncthreads(); } } template <typename T> void DropoutKernelImpl( const hipDeviceProp_t& prop, const int64_t N, const float ratio, PhiloxGenerator& generator, const T* X_data, T* Y_data, bool* mask_data) { const int block_size = 256; const int blocks_per_sm = prop.maxThreadsPerMultiProcessor / block_size; const int grid_size = ::min(prop.multiProcessorCount * blocks_per_sm, static_cast<int>(CeilDiv(N, block_size))); // Compute the number of random numbers generated by each thread, and increment philox generator offset by that amount. const uint64_t counter_offset = static_cast<uint64_t>(((N - 1) / (block_size * grid_size * UNROLL) + 1) * UNROLL); auto seeds = generator.NextPhiloxSeeds(counter_offset); hipLaunchKernelGGL(( DropoutKernel<T>), dim3(grid_size), dim3(block_size), 0, 0, N, ratio, seeds, X_data, Y_data, mask_data); } #define SPECIALIZED_DROPOUT_IMPL(T) \ template void DropoutKernelImpl( \ const hipDeviceProp_t& prop, \ const int64_t N, \ const float ratio, \ PhiloxGenerator& generator, \ const T* X_data, \ T* Y_data, \ bool* mask_data); SPECIALIZED_DROPOUT_IMPL(float) SPECIALIZED_DROPOUT_IMPL(double) SPECIALIZED_DROPOUT_IMPL(half) } // namespace cuda } // namespace onnxruntime
eb9bace26b12a313ee326a09c3f52f737a6addf9.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Modifications Copyright (c) Microsoft. */ #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/nn/dropout_impl.h" #include <curand_kernel.h> #include <algorithm> namespace onnxruntime { namespace cuda { constexpr int UNROLL = 4; template <typename T> __global__ void DropoutKernel( const int64_t N, const float ratio, const std::pair<uint64_t, uint64_t> seeds, const T* X_data, T* Y_data, bool* mask_data) { const float p = 1.0f - ratio; const T scale = T(1.0f / p); CUDA_LONG idx = blockDim.x * blockIdx.x + threadIdx.x; CUDA_LONG step_size = gridDim.x * blockDim.x * UNROLL; CUDA_LONG rounded_size = ((N - 1) / step_size + 1) * step_size; curandStatePhilox4_32_10_t state; curand_init(seeds.first, idx, seeds.second, &state); // We ensure every thread generates the same number of random numbers (by rounding // up the size) and at the same timestep (by syncing threads). // From CUDA curand documentation: // The Philox_4x32_10 algorithm is closely tied to the thread and block count. // Each thread computes 4 random numbers in the same time thus the most efficient // use of Philox_4x32_10 is to generate a multiple of 4 times number of threads. for (CUDA_LONG id = idx; id < rounded_size; id += step_size) { float4 rand = curand_uniform4(&state); for (CUDA_LONG i = 0; i < UNROLL; i++) { CUDA_LONG li = id + gridDim.x * blockDim.x * i; if (li < N) { mask_data[li] = (&rand.x)[i] < p; Y_data[li] = X_data[li] * T(mask_data[li]) * scale; } } __syncthreads(); } } template <typename T> void DropoutKernelImpl( const cudaDeviceProp& prop, const int64_t N, const float ratio, PhiloxGenerator& generator, const T* X_data, T* Y_data, bool* mask_data) { const int block_size = 256; const int blocks_per_sm = prop.maxThreadsPerMultiProcessor / block_size; const int grid_size = std::min(prop.multiProcessorCount * blocks_per_sm, static_cast<int>(CeilDiv(N, block_size))); // Compute the number of random numbers generated by each thread, and increment philox generator offset by that amount. const uint64_t counter_offset = static_cast<uint64_t>(((N - 1) / (block_size * grid_size * UNROLL) + 1) * UNROLL); auto seeds = generator.NextPhiloxSeeds(counter_offset); DropoutKernel<T><<<grid_size, block_size, 0>>>(N, ratio, seeds, X_data, Y_data, mask_data); } #define SPECIALIZED_DROPOUT_IMPL(T) \ template void DropoutKernelImpl( \ const cudaDeviceProp& prop, \ const int64_t N, \ const float ratio, \ PhiloxGenerator& generator, \ const T* X_data, \ T* Y_data, \ bool* mask_data); SPECIALIZED_DROPOUT_IMPL(float) SPECIALIZED_DROPOUT_IMPL(double) SPECIALIZED_DROPOUT_IMPL(half) } // namespace cuda } // namespace onnxruntime
304a81c035f28ac83e20982b8eb804043edcc97d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - Neither the name(s) of the copyright holder(s) nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <cudalibmg.h> #define HANDLE_CUDALIBMG_ERROR(x) \ { \ const auto err = x; \ if (err != CUDALIBMG_STATUS_SUCCESS) \ { \ printf("Error: in %s Line: %d\n", __FILE__, __LINE__); \ exit(-1); \ } \ } /** * \brief This example illustrates how to create and destroy a matrix descriptor * using a 3-by-2 device grid. */ int main(int argc, char * argv[]) { /* Create a 3-by-2 device grid. */ const int32_t numRowDevices = 3; const int32_t numColDevices = 2; const int32_t numDevices = numRowDevices * numColDevices; /* * cudaLibMg allow duplicated deviceIds. this example only uses GPU 0. As a result, the matrix created based * on this 3-by-2 grid will be distributed as 6 piceses but all on the same device. If you have more devices * available, change the device list below. */ int32_t deviceId[numDevices] = {0}; /* * Assign the 1D deviceId to this 3-by-2 grid using row-major: * * grid = [ deviceId[0], deviceId[1]; * [ deviceId[2], deviceId[3]; * [ deviceId[4], deviceId[5]; ]; */ cudaLibMgGridMapping_t mapping = CUDALIBMG_GRID_MAPPING_ROW_MAJOR; cudaLibMgGrid_t grid; HANDLE_CUDALIBMG_ERROR(cudaLibMgCreateDeviceGrid(&grid, numRowDevices, numColDevices, deviceId, mapping)); /* * We use the rank to illustrate how this 7-by-9 matrix is distributed on a 3-by-2 grid using 2D block-cyclic * matrix distribution. This distribution is designed for load-balancing and preserve the data locality in mind. * We can observe a 3-by-2 matrix block is owned entirely by a rank in a cyclic fashion * in both row and column directions. * * float A[7 * 9] = {0, 0, 0, 2, 2, 2, 4, * 0, 0, 0, 2, 2, 2, 4, * * 1, 1, 1, 3, 3, 3, 5, * 1, 1, 1, 3, 3, 3, 5, * * 0, 0, 0, 2, 2, 2, 4, * 0, 0, 0, 2, 2, 2, 4, * * 1, 1, 1, 3, 3, 3, 5, * 1, 1, 1, 3, 3, 3, 5, * * 0, 0, 0, 2, 2, 2, 4}; */ const int64_t numRows = 7; const int64_t numCols = 9; const int64_t rowBlockSize = 3; const int64_t colBlockSize = 2; hipDataType dataType = HIP_R_32F; cudaLibMgMatrixDesc_t descA; HANDLE_CUDALIBMG_ERROR(cudaLibMgCreateMatrixDesc(&descA, numRows, numCols, rowBlockSize, colBlockSize, dataType, grid)); /* Clean up. */ HANDLE_CUDALIBMG_ERROR(cudaLibMgDestroyMatrixDesc(descA)); HANDLE_CUDALIBMG_ERROR(cudaLibMgDestroyGrid(grid)); return 0; };
304a81c035f28ac83e20982b8eb804043edcc97d.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - Neither the name(s) of the copyright holder(s) nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <cudalibmg.h> #define HANDLE_CUDALIBMG_ERROR(x) \ { \ const auto err = x; \ if (err != CUDALIBMG_STATUS_SUCCESS) \ { \ printf("Error: in %s Line: %d\n", __FILE__, __LINE__); \ exit(-1); \ } \ } /** * \brief This example illustrates how to create and destroy a matrix descriptor * using a 3-by-2 device grid. */ int main(int argc, char * argv[]) { /* Create a 3-by-2 device grid. */ const int32_t numRowDevices = 3; const int32_t numColDevices = 2; const int32_t numDevices = numRowDevices * numColDevices; /* * cudaLibMg allow duplicated deviceIds. this example only uses GPU 0. As a result, the matrix created based * on this 3-by-2 grid will be distributed as 6 piceses but all on the same device. If you have more devices * available, change the device list below. */ int32_t deviceId[numDevices] = {0}; /* * Assign the 1D deviceId to this 3-by-2 grid using row-major: * * grid = [ deviceId[0], deviceId[1]; * [ deviceId[2], deviceId[3]; * [ deviceId[4], deviceId[5]; ]; */ cudaLibMgGridMapping_t mapping = CUDALIBMG_GRID_MAPPING_ROW_MAJOR; cudaLibMgGrid_t grid; HANDLE_CUDALIBMG_ERROR(cudaLibMgCreateDeviceGrid(&grid, numRowDevices, numColDevices, deviceId, mapping)); /* * We use the rank to illustrate how this 7-by-9 matrix is distributed on a 3-by-2 grid using 2D block-cyclic * matrix distribution. This distribution is designed for load-balancing and preserve the data locality in mind. * We can observe a 3-by-2 matrix block is owned entirely by a rank in a cyclic fashion * in both row and column directions. * * float A[7 * 9] = {0, 0, 0, 2, 2, 2, 4, * 0, 0, 0, 2, 2, 2, 4, * * 1, 1, 1, 3, 3, 3, 5, * 1, 1, 1, 3, 3, 3, 5, * * 0, 0, 0, 2, 2, 2, 4, * 0, 0, 0, 2, 2, 2, 4, * * 1, 1, 1, 3, 3, 3, 5, * 1, 1, 1, 3, 3, 3, 5, * * 0, 0, 0, 2, 2, 2, 4}; */ const int64_t numRows = 7; const int64_t numCols = 9; const int64_t rowBlockSize = 3; const int64_t colBlockSize = 2; cudaDataType_t dataType = CUDA_R_32F; cudaLibMgMatrixDesc_t descA; HANDLE_CUDALIBMG_ERROR(cudaLibMgCreateMatrixDesc(&descA, numRows, numCols, rowBlockSize, colBlockSize, dataType, grid)); /* Clean up. */ HANDLE_CUDALIBMG_ERROR(cudaLibMgDestroyMatrixDesc(descA)); HANDLE_CUDALIBMG_ERROR(cudaLibMgDestroyGrid(grid)); return 0; };
f01a6be7138de655646e439c0f0e1937f81eb163.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void LSTMDeltaKernelBPTT( float* deltas, float* cellStates, float* previousCellStates, float* cellStateErrors, float* nextCellStateErrors, float* outputGateDeltas, float* forgetGateDeltas, float* nextForgetGateDeltas, float* inputGateDeltas, float* nextInputGateDeltas, float* cellInputDeltas, float* cellInputActivations, float* cellStateActivations, float* outputGateActivations, float* nextForgetGateActivations, float* inputGateActivations, float* cellInputActivationDerivatives, float* cellStateActivationDerivatives, float* outputGateActivationDerivatives, float* forgetGateActivationDerivatives, float* inputGateActivationDerivatives, float* cellInputWeights, float* outputGateWeights, float* forgetGateWeights, float* inputGateWeights, int inputCount, int cellCount, int cellsPerBlock ) { int memoryBlockId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (memoryBlockId < cellCount / cellsPerBlock) { outputGateDeltas[memoryBlockId] = 0; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { outputGateDeltas[memoryBlockId] += cellStateActivations[cellId] * deltas[cellId]; } outputGateDeltas[memoryBlockId] *= outputGateActivationDerivatives[memoryBlockId]; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { int relativeCellId = cellId - (memoryBlockId * cellsPerBlock); int peepHoleWeightId = (memoryBlockId * (inputCount + cellCount + cellsPerBlock + 1)) + inputCount + cellCount + relativeCellId; cellStateErrors[cellId] = deltas[cellId] * outputGateActivations[memoryBlockId] * cellStateActivationDerivatives[cellId] + nextCellStateErrors[cellId] * nextForgetGateActivations[memoryBlockId] + nextInputGateDeltas[memoryBlockId] * inputGateWeights[peepHoleWeightId] + nextForgetGateDeltas[memoryBlockId] * forgetGateWeights[peepHoleWeightId] + outputGateDeltas[memoryBlockId] * outputGateWeights[peepHoleWeightId]; cellInputDeltas[cellId] = inputGateActivations[memoryBlockId] * cellInputActivationDerivatives[cellId] * cellStateErrors[cellId]; } inputGateDeltas[memoryBlockId] = 0; forgetGateDeltas[memoryBlockId] = 0; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { inputGateDeltas[memoryBlockId] += cellStateErrors[cellId] * cellInputActivations[cellId]; forgetGateDeltas[memoryBlockId] += cellStateErrors[cellId] * previousCellStates[cellId]; } inputGateDeltas[memoryBlockId] *= inputGateActivationDerivatives[memoryBlockId]; forgetGateDeltas[memoryBlockId] *= forgetGateActivationDerivatives[memoryBlockId]; } }
f01a6be7138de655646e439c0f0e1937f81eb163.cu
#include "includes.h" __global__ void LSTMDeltaKernelBPTT( float* deltas, float* cellStates, float* previousCellStates, float* cellStateErrors, float* nextCellStateErrors, float* outputGateDeltas, float* forgetGateDeltas, float* nextForgetGateDeltas, float* inputGateDeltas, float* nextInputGateDeltas, float* cellInputDeltas, float* cellInputActivations, float* cellStateActivations, float* outputGateActivations, float* nextForgetGateActivations, float* inputGateActivations, float* cellInputActivationDerivatives, float* cellStateActivationDerivatives, float* outputGateActivationDerivatives, float* forgetGateActivationDerivatives, float* inputGateActivationDerivatives, float* cellInputWeights, float* outputGateWeights, float* forgetGateWeights, float* inputGateWeights, int inputCount, int cellCount, int cellsPerBlock ) { int memoryBlockId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (memoryBlockId < cellCount / cellsPerBlock) { outputGateDeltas[memoryBlockId] = 0; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { outputGateDeltas[memoryBlockId] += cellStateActivations[cellId] * deltas[cellId]; } outputGateDeltas[memoryBlockId] *= outputGateActivationDerivatives[memoryBlockId]; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { int relativeCellId = cellId - (memoryBlockId * cellsPerBlock); int peepHoleWeightId = (memoryBlockId * (inputCount + cellCount + cellsPerBlock + 1)) + inputCount + cellCount + relativeCellId; cellStateErrors[cellId] = deltas[cellId] * outputGateActivations[memoryBlockId] * cellStateActivationDerivatives[cellId] + nextCellStateErrors[cellId] * nextForgetGateActivations[memoryBlockId] + nextInputGateDeltas[memoryBlockId] * inputGateWeights[peepHoleWeightId] + nextForgetGateDeltas[memoryBlockId] * forgetGateWeights[peepHoleWeightId] + outputGateDeltas[memoryBlockId] * outputGateWeights[peepHoleWeightId]; cellInputDeltas[cellId] = inputGateActivations[memoryBlockId] * cellInputActivationDerivatives[cellId] * cellStateErrors[cellId]; } inputGateDeltas[memoryBlockId] = 0; forgetGateDeltas[memoryBlockId] = 0; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { inputGateDeltas[memoryBlockId] += cellStateErrors[cellId] * cellInputActivations[cellId]; forgetGateDeltas[memoryBlockId] += cellStateErrors[cellId] * previousCellStates[cellId]; } inputGateDeltas[memoryBlockId] *= inputGateActivationDerivatives[memoryBlockId]; forgetGateDeltas[memoryBlockId] *= forgetGateActivationDerivatives[memoryBlockId]; } }
aa58eba9198580e3738706e62772a2bcbad1072d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2014 BVLC and contributors. #include <vector> #include "caffe/layer.hpp" #include "caffe/layers/resample_layer.hpp" #include "caffe/util/math_functions.hpp" #include <opencv2/opencv.hpp> //#include <opencv2/gpu/gpu.hpp> #include "thirdparty/gpu/gpu.hpp" namespace caffe { static __device__ __forceinline__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) return x * x * (1.5f * x - 2.5f) + 1.0f; else if (x < 2.0f) return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; else return 0.0f; } static __device__ __forceinline__ float boxCoeff(float x) { if (-0.5 <= x && x<0.5) return 1.0; return 0; } static __device__ __forceinline__ float triangleCoeff(float x) { if (-1<=x && x<0) return x+1; if (0<=x && x<=1) return 1-x; return 0; } #define FILTER_BICUBIC 0 #define FILTER_BOX 1 #define FILTER_TRIANGLE 2 template <typename Dtype> __global__ void InterpolationKernel( const int nthreads, const int in_channelsize, const int out_channelsize, const Dtype* in_ptr, const int in_width, const int in_height, const float fx, const float fy, Dtype* out_ptr, const int out_width, const int out_height, int filter_type, int kernel_width, const bool antialias) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index / out_channelsize; int x_out = (index % out_channelsize) % out_width; int y_out = (index % out_channelsize) / out_width; float x_in = x_out * fx + fy / 2.0f - 0.5f; float y_in = y_out * fy + fx / 2.0f - 0.5f; int x_in_round = round(x_in); int y_in_round = round(y_in); Dtype sum=0; Dtype wsum=0; float ax = 1.0f / (antialias ? fx : 1.0f); float ay = 1.0f / (antialias ? fy : 1.0f); int rx = (fx < 1.0f) ? 2 : ceil(float(kernel_width)/ax); int ry = (fy < 1.0f) ? 2 : ceil(float(kernel_width)/ay); for(int y=y_in_round-ry; y<=y_in_round+ry; y++) for(int x=x_in_round-rx; x<=x_in_round+rx; x++) { if(y<0 || x<0) continue; if(y>=in_height || x>=in_width) continue; float dx = x_in - x; float dy = y_in - y; float w; if(filter_type == FILTER_BICUBIC) w = ax*bicubicCoeff(ax*dx) * ay*bicubicCoeff(ay*dy); else if(filter_type == FILTER_BOX) w = ax*boxCoeff(ax*dx) * ay*boxCoeff(ay*dy); else w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy); sum += w * in_ptr[c*in_channelsize + y*in_width+x]; wsum += w; } out_ptr[index] = (!wsum) ? 0 : (sum / wsum); } } template <typename Dtype> __global__ void NearestNeighborKernel( const int nthreads, const int in_channelsize, const int out_channelsize, const Dtype* in_ptr, const int in_width, const int in_height, const float fx, const float fy, Dtype* out_ptr, const int out_width, const int out_height) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index / out_channelsize; int x_out = (index % out_channelsize) % out_width; int y_out = (index % out_channelsize) / out_width; float x_in = x_out * fx + fy / 2.0f - 0.5f; float y_in = y_out * fy + fx / 2.0f - 0.5f; int x_in_round = round(x_in); int y_in_round = round(y_in); out_ptr[index] = in_ptr[c*in_channelsize + y_in_round*in_width+x_in_round]; } } template <typename Dtype> void ResampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); // dest int topwidth = top[0]->width(); int topheight = top[0]->height(); int topchannels = top[0]->channels(); int topcount = top[0]->count(); Dtype* bottom_data = bottom[0]->mutable_gpu_data(); // source int bottomnum = (bottom)[0]->num(); int bottomchannels = (bottom)[0]->channels(); int bottomwidth = (bottom)[0]->width(); int bottomheight = (bottom)[0]->height(); int bottomcount = (bottom)[0]->count(); CHECK_EQ(topchannels, bottomchannels) << "ResampleLayer top channel count must match bottom channel count"; float fx = float(bottomwidth)/float(topwidth); float fy = float(bottomheight)/float(topheight); //int botsize = bottomwidth*bottomheight*bottomchannels*bottomnum; int topsize = topwidth*topheight*topchannels*bottomnum; int topchannelsize = topwidth*topheight; int botchannelsize = bottomwidth*bottomheight; if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_NEAREST) { hipLaunchKernelGGL(( NearestNeighborKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(topsize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, topsize, botchannelsize, topchannelsize, (Dtype*)bottom_data, bottomwidth, bottomheight, fx, fy, (Dtype*)top_data, topwidth, topheight ); CUDA_POST_KERNEL_CHECK; } else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC || this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR) { int filter_type; if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC) filter_type = FILTER_BICUBIC; else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR) filter_type = FILTER_TRIANGLE; bool isDownsample = (fx > 1) || (fy > 1); bool antialias = isDownsample && this->layer_param_.resample_param().antialias(); int kernel_width; if(filter_type == FILTER_BICUBIC) kernel_width = 4; else if(filter_type == FILTER_BOX) kernel_width = 1; else kernel_width = 2; hipLaunchKernelGGL(( InterpolationKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(topsize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, topsize, botchannelsize, topchannelsize, (Dtype*)bottom_data, bottomwidth, bottomheight, fx, fy, (Dtype*)top_data, topwidth, topheight, filter_type, kernel_width, antialias); CUDA_POST_KERNEL_CHECK; } else LOG(FATAL) << "unsupported downsampling type"; } template <typename Dtype> void ResampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { for(int i=0; i<propagate_down.size(); i++) if(propagate_down[i]) LOG(FATAL) << "ResampleLayer cannot do backward."; } INSTANTIATE_LAYER_GPU_FUNCS(ResampleLayer); } // namespace caffe // cv::gpu::GpuMat input(bottomheight, bottomwidth, CV_32FC3); // float* input_ptr=(float*)input.data; // int input_stride=input.step/4; // BlobToOpenCV<Dtype><<<CAFFE_GET_BLOCKS(bottomwidth*bottomheight), CAFFE_CUDA_NUM_THREADS>>>( // bottomwidth*bottomheight, // (Dtype*)bottom_data, // bottomwidth, // bottomheight, // input_stride, // (Dtype*)input_ptr); // cv::gpu::GpuMat output; // cv::Size output_size; // output_size.width = topwidth; // output_size.height = topheight; // cv::gpu::resize(input,output,output_size,0,0,interpolation,cv::gpu::Stream::Null(),false); // float* output_ptr=(float*)output.data; // int output_stride=output.step/4; // OpenCVToBlob<Dtype><<<CAFFE_GET_BLOCKS(topwidth*topheight), CAFFE_CUDA_NUM_THREADS>>>( // topwidth*topheight, // (Dtype*)output_ptr, // topwidth, // topheight, // output_stride, // (Dtype*)top_data); // top_data += topsize; // bottom_data += botsize; //template <typename Dtype> //__global__ void BlobToOpenCV( // const int nthreads, // const Dtype* blob_ptr, // const int width, // const int height, // const int stride, // Dtype* mat_ptr) //{ // CUDA_KERNEL_LOOP(index, nthreads) // { // int x=index % width; // int y=index / width; // for(int c=0; c<3; c++) // mat_ptr[y*stride+x*3+c]=blob_ptr[((c*height)+y)*width+x]; // } //} //template <typename Dtype> //__global__ void OpenCVToBlob( // const int nthreads, // const Dtype* mat_ptr, // const int width, // const int height, // const int stride, // Dtype* blob_ptr) //{ // CUDA_KERNEL_LOOP(index, nthreads) // { // int x=index % width; // int y=index / width; // for(int c=0; c<3; c++) // blob_ptr[((c*height)+y)*width+x]=mat_ptr[y*stride+x*3+c]; // } //}
aa58eba9198580e3738706e62772a2bcbad1072d.cu
// Copyright 2014 BVLC and contributors. #include <vector> #include "caffe/layer.hpp" #include "caffe/layers/resample_layer.hpp" #include "caffe/util/math_functions.hpp" #include <opencv2/opencv.hpp> //#include <opencv2/gpu/gpu.hpp> #include "thirdparty/gpu/gpu.hpp" namespace caffe { static __device__ __forceinline__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) return x * x * (1.5f * x - 2.5f) + 1.0f; else if (x < 2.0f) return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; else return 0.0f; } static __device__ __forceinline__ float boxCoeff(float x) { if (-0.5 <= x && x<0.5) return 1.0; return 0; } static __device__ __forceinline__ float triangleCoeff(float x) { if (-1<=x && x<0) return x+1; if (0<=x && x<=1) return 1-x; return 0; } #define FILTER_BICUBIC 0 #define FILTER_BOX 1 #define FILTER_TRIANGLE 2 template <typename Dtype> __global__ void InterpolationKernel( const int nthreads, const int in_channelsize, const int out_channelsize, const Dtype* in_ptr, const int in_width, const int in_height, const float fx, const float fy, Dtype* out_ptr, const int out_width, const int out_height, int filter_type, int kernel_width, const bool antialias) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index / out_channelsize; int x_out = (index % out_channelsize) % out_width; int y_out = (index % out_channelsize) / out_width; float x_in = x_out * fx + fy / 2.0f - 0.5f; float y_in = y_out * fy + fx / 2.0f - 0.5f; int x_in_round = round(x_in); int y_in_round = round(y_in); Dtype sum=0; Dtype wsum=0; float ax = 1.0f / (antialias ? fx : 1.0f); float ay = 1.0f / (antialias ? fy : 1.0f); int rx = (fx < 1.0f) ? 2 : ceil(float(kernel_width)/ax); int ry = (fy < 1.0f) ? 2 : ceil(float(kernel_width)/ay); for(int y=y_in_round-ry; y<=y_in_round+ry; y++) for(int x=x_in_round-rx; x<=x_in_round+rx; x++) { if(y<0 || x<0) continue; if(y>=in_height || x>=in_width) continue; float dx = x_in - x; float dy = y_in - y; float w; if(filter_type == FILTER_BICUBIC) w = ax*bicubicCoeff(ax*dx) * ay*bicubicCoeff(ay*dy); else if(filter_type == FILTER_BOX) w = ax*boxCoeff(ax*dx) * ay*boxCoeff(ay*dy); else w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy); sum += w * in_ptr[c*in_channelsize + y*in_width+x]; wsum += w; } out_ptr[index] = (!wsum) ? 0 : (sum / wsum); } } template <typename Dtype> __global__ void NearestNeighborKernel( const int nthreads, const int in_channelsize, const int out_channelsize, const Dtype* in_ptr, const int in_width, const int in_height, const float fx, const float fy, Dtype* out_ptr, const int out_width, const int out_height) { CUDA_KERNEL_LOOP(index, nthreads) { int c = index / out_channelsize; int x_out = (index % out_channelsize) % out_width; int y_out = (index % out_channelsize) / out_width; float x_in = x_out * fx + fy / 2.0f - 0.5f; float y_in = y_out * fy + fx / 2.0f - 0.5f; int x_in_round = round(x_in); int y_in_round = round(y_in); out_ptr[index] = in_ptr[c*in_channelsize + y_in_round*in_width+x_in_round]; } } template <typename Dtype> void ResampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); // dest int topwidth = top[0]->width(); int topheight = top[0]->height(); int topchannels = top[0]->channels(); int topcount = top[0]->count(); Dtype* bottom_data = bottom[0]->mutable_gpu_data(); // source int bottomnum = (bottom)[0]->num(); int bottomchannels = (bottom)[0]->channels(); int bottomwidth = (bottom)[0]->width(); int bottomheight = (bottom)[0]->height(); int bottomcount = (bottom)[0]->count(); CHECK_EQ(topchannels, bottomchannels) << "ResampleLayer top channel count must match bottom channel count"; float fx = float(bottomwidth)/float(topwidth); float fy = float(bottomheight)/float(topheight); //int botsize = bottomwidth*bottomheight*bottomchannels*bottomnum; int topsize = topwidth*topheight*topchannels*bottomnum; int topchannelsize = topwidth*topheight; int botchannelsize = bottomwidth*bottomheight; if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_NEAREST) { NearestNeighborKernel<Dtype><<<CAFFE_GET_BLOCKS(topsize), CAFFE_CUDA_NUM_THREADS>>>( topsize, botchannelsize, topchannelsize, (Dtype*)bottom_data, bottomwidth, bottomheight, fx, fy, (Dtype*)top_data, topwidth, topheight ); CUDA_POST_KERNEL_CHECK; } else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC || this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR) { int filter_type; if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC) filter_type = FILTER_BICUBIC; else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR) filter_type = FILTER_TRIANGLE; bool isDownsample = (fx > 1) || (fy > 1); bool antialias = isDownsample && this->layer_param_.resample_param().antialias(); int kernel_width; if(filter_type == FILTER_BICUBIC) kernel_width = 4; else if(filter_type == FILTER_BOX) kernel_width = 1; else kernel_width = 2; InterpolationKernel<Dtype><<<CAFFE_GET_BLOCKS(topsize), CAFFE_CUDA_NUM_THREADS>>>( topsize, botchannelsize, topchannelsize, (Dtype*)bottom_data, bottomwidth, bottomheight, fx, fy, (Dtype*)top_data, topwidth, topheight, filter_type, kernel_width, antialias); CUDA_POST_KERNEL_CHECK; } else LOG(FATAL) << "unsupported downsampling type"; } template <typename Dtype> void ResampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { for(int i=0; i<propagate_down.size(); i++) if(propagate_down[i]) LOG(FATAL) << "ResampleLayer cannot do backward."; } INSTANTIATE_LAYER_GPU_FUNCS(ResampleLayer); } // namespace caffe // cv::gpu::GpuMat input(bottomheight, bottomwidth, CV_32FC3); // float* input_ptr=(float*)input.data; // int input_stride=input.step/4; // BlobToOpenCV<Dtype><<<CAFFE_GET_BLOCKS(bottomwidth*bottomheight), CAFFE_CUDA_NUM_THREADS>>>( // bottomwidth*bottomheight, // (Dtype*)bottom_data, // bottomwidth, // bottomheight, // input_stride, // (Dtype*)input_ptr); // cv::gpu::GpuMat output; // cv::Size output_size; // output_size.width = topwidth; // output_size.height = topheight; // cv::gpu::resize(input,output,output_size,0,0,interpolation,cv::gpu::Stream::Null(),false); // float* output_ptr=(float*)output.data; // int output_stride=output.step/4; // OpenCVToBlob<Dtype><<<CAFFE_GET_BLOCKS(topwidth*topheight), CAFFE_CUDA_NUM_THREADS>>>( // topwidth*topheight, // (Dtype*)output_ptr, // topwidth, // topheight, // output_stride, // (Dtype*)top_data); // top_data += topsize; // bottom_data += botsize; //template <typename Dtype> //__global__ void BlobToOpenCV( // const int nthreads, // const Dtype* blob_ptr, // const int width, // const int height, // const int stride, // Dtype* mat_ptr) //{ // CUDA_KERNEL_LOOP(index, nthreads) // { // int x=index % width; // int y=index / width; // for(int c=0; c<3; c++) // mat_ptr[y*stride+x*3+c]=blob_ptr[((c*height)+y)*width+x]; // } //} //template <typename Dtype> //__global__ void OpenCVToBlob( // const int nthreads, // const Dtype* mat_ptr, // const int width, // const int height, // const int stride, // Dtype* blob_ptr) //{ // CUDA_KERNEL_LOOP(index, nthreads) // { // int x=index % width; // int y=index / width; // for(int c=0; c<3; c++) // blob_ptr[((c*height)+y)*width+x]=mat_ptr[y*stride+x*3+c]; // } //}
e6fd6d8a8560047f2ad5c090b2c743f6b20819a7.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2020-2023, XGBoost contributors */ #include <algorithm> #include <memory> #include <type_traits> #include "../common/hist_util.cuh" #include "batch_utils.h" // for RegenGHist #include "device_adapter_hip.cuh" #include "ellpack_page.cuh" #include "gradient_index.h" #include "iterative_dmatrix.h" #include "proxy_dmatrix.cuh" #include "proxy_dmatrix.h" #include "simple_batch_iterator.h" #include "sparse_page_source.h" namespace xgboost::data { void IterativeDMatrix::InitFromCUDA(Context const* ctx, BatchParam const& p, DataIterHandle iter_handle, float missing, std::shared_ptr<DMatrix> ref) { // A handle passed to external iterator. DMatrixProxy* proxy = MakeProxy(proxy_); CHECK(proxy); // The external iterator auto iter = DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{iter_handle, reset_, next_}; dh::XGBCachingDeviceAllocator<char> alloc; auto num_rows = [&]() { return cuda_impl::Dispatch(proxy, [](auto const& value) { return value.NumRows(); }); }; auto num_cols = [&]() { return cuda_impl::Dispatch(proxy, [](auto const& value) { return value.NumCols(); }); }; size_t row_stride = 0; size_t nnz = 0; // Sketch for all batches. std::vector<common::SketchContainer> sketch_containers; size_t batches = 0; size_t accumulated_rows = 0; bst_feature_t cols = 0; int32_t current_device; dh::safe_cuda(hipGetDevice(&current_device)); auto get_device = [&]() -> int32_t { std::int32_t d = (ctx->gpu_id == Context::kCpuId) ? current_device : ctx->gpu_id; CHECK_NE(d, Context::kCpuId); return d; }; /** * Generate quantiles */ common::HistogramCuts cuts; do { // We use do while here as the first batch is fetched in ctor // ctx_.gpu_id = proxy->DeviceIdx(); CHECK_LT(ctx->gpu_id, common::AllVisibleGPUs()); dh::safe_cuda(hipSetDevice(get_device())); if (cols == 0) { cols = num_cols(); collective::Allreduce<collective::Operation::kMax>(&cols, 1); this->info_.num_col_ = cols; } else { CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns."; } if (!ref) { sketch_containers.emplace_back(proxy->Info().feature_types, p.max_bin, cols, num_rows(), get_device()); auto* p_sketch = &sketch_containers.back(); proxy->Info().weights_.SetDevice(get_device()); cuda_impl::Dispatch(proxy, [&](auto const& value) { common::AdapterDeviceSketch(value, p.max_bin, proxy->Info(), missing, p_sketch); }); } auto batch_rows = num_rows(); accumulated_rows += batch_rows; dh::device_vector<size_t> row_counts(batch_rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); row_stride = ::max(row_stride, cuda_impl::Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); })); nnz += thrust::reduce(thrust::hip::par(alloc), row_counts.begin(), row_counts.end()); batches++; } while (iter.Next()); iter.Reset(); auto n_features = cols; CHECK_GE(n_features, 1) << "Data must has at least 1 column."; dh::safe_cuda(hipSetDevice(get_device())); if (!ref) { HostDeviceVector<FeatureType> ft; common::SketchContainer final_sketch( sketch_containers.empty() ? ft : sketch_containers.front().FeatureTypes(), p.max_bin, cols, accumulated_rows, get_device()); for (auto const& sketch : sketch_containers) { final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data()); final_sketch.FixError(); } sketch_containers.clear(); sketch_containers.shrink_to_fit(); final_sketch.MakeCuts(&cuts, this->info_.IsColumnSplit()); } else { GetCutsFromRef(ctx, ref, Info().num_col_, p, &cuts); } this->info_.num_row_ = accumulated_rows; this->info_.num_nonzero_ = nnz; auto init_page = [this, &cuts, row_stride, accumulated_rows, get_device]() { if (!ellpack_) { // Should be put inside the while loop to protect against empty batch. In // that case device id is invalid. ellpack_.reset(new EllpackPage); *(ellpack_->Impl()) = EllpackPageImpl(get_device(), cuts, this->IsDense(), row_stride, accumulated_rows); } }; /** * Generate gradient index. */ size_t offset = 0; iter.Reset(); size_t n_batches_for_verification = 0; while (iter.Next()) { init_page(); dh::safe_cuda(hipSetDevice(get_device())); auto rows = num_rows(); dh::device_vector<size_t> row_counts(rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); cuda_impl::Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); }); auto is_dense = this->IsDense(); proxy->Info().feature_types.SetDevice(get_device()); auto d_feature_types = proxy->Info().feature_types.ConstDeviceSpan(); auto new_impl = cuda_impl::Dispatch(proxy, [&](auto const& value) { return EllpackPageImpl(value, missing, get_device(), is_dense, row_counts_span, d_feature_types, row_stride, rows, cuts); }); size_t num_elements = ellpack_->Impl()->Copy(get_device(), &new_impl, offset); offset += num_elements; proxy->Info().num_row_ = num_rows(); proxy->Info().num_col_ = cols; if (batches != 1) { this->info_.Extend(std::move(proxy->Info()), false, true); } n_batches_for_verification++; } CHECK_EQ(batches, n_batches_for_verification) << "Different number of batches returned between 2 iterations"; if (batches == 1) { this->info_ = std::move(proxy->Info()); this->info_.num_nonzero_ = nnz; CHECK_EQ(proxy->Info().labels.Size(), 0); } iter.Reset(); // Synchronise worker columns info_.SynchronizeNumberOfColumns(); } BatchSet<EllpackPage> IterativeDMatrix::GetEllpackBatches(Context const* ctx, BatchParam const& param) { if (param.Initialized()) { CheckParam(param); CHECK(!detail::RegenGHist(param, batch_)) << error::InconsistentMaxBin(); } if (!ellpack_ && !ghist_) { LOG(FATAL) << "`QuantileDMatrix` not initialized."; } if (!ellpack_) { ellpack_.reset(new EllpackPage()); if (ctx->IsCUDA()) { this->Info().feature_types.SetDevice(ctx->gpu_id); *ellpack_->Impl() = EllpackPageImpl(ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } else if (fmat_ctx_.IsCUDA()) { this->Info().feature_types.SetDevice(fmat_ctx_.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&fmat_ctx_, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } else { // Can happen when QDM is initialized on CPU, but a GPU version is queried by a different QDM // for cut reference. auto cuda_ctx = ctx->MakeCUDA(); this->Info().feature_types.SetDevice(cuda_ctx.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&cuda_ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } } CHECK(ellpack_); auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(ellpack_)); return BatchSet<EllpackPage>(begin_iter); } void GetCutsFromEllpack(EllpackPage const& page, common::HistogramCuts* cuts) { *cuts = page.Impl()->Cuts(); } } // namespace xgboost::data
e6fd6d8a8560047f2ad5c090b2c743f6b20819a7.cu
/** * Copyright 2020-2023, XGBoost contributors */ #include <algorithm> #include <memory> #include <type_traits> #include "../common/hist_util.cuh" #include "batch_utils.h" // for RegenGHist #include "device_adapter.cuh" #include "ellpack_page.cuh" #include "gradient_index.h" #include "iterative_dmatrix.h" #include "proxy_dmatrix.cuh" #include "proxy_dmatrix.h" #include "simple_batch_iterator.h" #include "sparse_page_source.h" namespace xgboost::data { void IterativeDMatrix::InitFromCUDA(Context const* ctx, BatchParam const& p, DataIterHandle iter_handle, float missing, std::shared_ptr<DMatrix> ref) { // A handle passed to external iterator. DMatrixProxy* proxy = MakeProxy(proxy_); CHECK(proxy); // The external iterator auto iter = DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{iter_handle, reset_, next_}; dh::XGBCachingDeviceAllocator<char> alloc; auto num_rows = [&]() { return cuda_impl::Dispatch(proxy, [](auto const& value) { return value.NumRows(); }); }; auto num_cols = [&]() { return cuda_impl::Dispatch(proxy, [](auto const& value) { return value.NumCols(); }); }; size_t row_stride = 0; size_t nnz = 0; // Sketch for all batches. std::vector<common::SketchContainer> sketch_containers; size_t batches = 0; size_t accumulated_rows = 0; bst_feature_t cols = 0; int32_t current_device; dh::safe_cuda(cudaGetDevice(&current_device)); auto get_device = [&]() -> int32_t { std::int32_t d = (ctx->gpu_id == Context::kCpuId) ? current_device : ctx->gpu_id; CHECK_NE(d, Context::kCpuId); return d; }; /** * Generate quantiles */ common::HistogramCuts cuts; do { // We use do while here as the first batch is fetched in ctor // ctx_.gpu_id = proxy->DeviceIdx(); CHECK_LT(ctx->gpu_id, common::AllVisibleGPUs()); dh::safe_cuda(cudaSetDevice(get_device())); if (cols == 0) { cols = num_cols(); collective::Allreduce<collective::Operation::kMax>(&cols, 1); this->info_.num_col_ = cols; } else { CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns."; } if (!ref) { sketch_containers.emplace_back(proxy->Info().feature_types, p.max_bin, cols, num_rows(), get_device()); auto* p_sketch = &sketch_containers.back(); proxy->Info().weights_.SetDevice(get_device()); cuda_impl::Dispatch(proxy, [&](auto const& value) { common::AdapterDeviceSketch(value, p.max_bin, proxy->Info(), missing, p_sketch); }); } auto batch_rows = num_rows(); accumulated_rows += batch_rows; dh::device_vector<size_t> row_counts(batch_rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); row_stride = std::max(row_stride, cuda_impl::Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); })); nnz += thrust::reduce(thrust::cuda::par(alloc), row_counts.begin(), row_counts.end()); batches++; } while (iter.Next()); iter.Reset(); auto n_features = cols; CHECK_GE(n_features, 1) << "Data must has at least 1 column."; dh::safe_cuda(cudaSetDevice(get_device())); if (!ref) { HostDeviceVector<FeatureType> ft; common::SketchContainer final_sketch( sketch_containers.empty() ? ft : sketch_containers.front().FeatureTypes(), p.max_bin, cols, accumulated_rows, get_device()); for (auto const& sketch : sketch_containers) { final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data()); final_sketch.FixError(); } sketch_containers.clear(); sketch_containers.shrink_to_fit(); final_sketch.MakeCuts(&cuts, this->info_.IsColumnSplit()); } else { GetCutsFromRef(ctx, ref, Info().num_col_, p, &cuts); } this->info_.num_row_ = accumulated_rows; this->info_.num_nonzero_ = nnz; auto init_page = [this, &cuts, row_stride, accumulated_rows, get_device]() { if (!ellpack_) { // Should be put inside the while loop to protect against empty batch. In // that case device id is invalid. ellpack_.reset(new EllpackPage); *(ellpack_->Impl()) = EllpackPageImpl(get_device(), cuts, this->IsDense(), row_stride, accumulated_rows); } }; /** * Generate gradient index. */ size_t offset = 0; iter.Reset(); size_t n_batches_for_verification = 0; while (iter.Next()) { init_page(); dh::safe_cuda(cudaSetDevice(get_device())); auto rows = num_rows(); dh::device_vector<size_t> row_counts(rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); cuda_impl::Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); }); auto is_dense = this->IsDense(); proxy->Info().feature_types.SetDevice(get_device()); auto d_feature_types = proxy->Info().feature_types.ConstDeviceSpan(); auto new_impl = cuda_impl::Dispatch(proxy, [&](auto const& value) { return EllpackPageImpl(value, missing, get_device(), is_dense, row_counts_span, d_feature_types, row_stride, rows, cuts); }); size_t num_elements = ellpack_->Impl()->Copy(get_device(), &new_impl, offset); offset += num_elements; proxy->Info().num_row_ = num_rows(); proxy->Info().num_col_ = cols; if (batches != 1) { this->info_.Extend(std::move(proxy->Info()), false, true); } n_batches_for_verification++; } CHECK_EQ(batches, n_batches_for_verification) << "Different number of batches returned between 2 iterations"; if (batches == 1) { this->info_ = std::move(proxy->Info()); this->info_.num_nonzero_ = nnz; CHECK_EQ(proxy->Info().labels.Size(), 0); } iter.Reset(); // Synchronise worker columns info_.SynchronizeNumberOfColumns(); } BatchSet<EllpackPage> IterativeDMatrix::GetEllpackBatches(Context const* ctx, BatchParam const& param) { if (param.Initialized()) { CheckParam(param); CHECK(!detail::RegenGHist(param, batch_)) << error::InconsistentMaxBin(); } if (!ellpack_ && !ghist_) { LOG(FATAL) << "`QuantileDMatrix` not initialized."; } if (!ellpack_) { ellpack_.reset(new EllpackPage()); if (ctx->IsCUDA()) { this->Info().feature_types.SetDevice(ctx->gpu_id); *ellpack_->Impl() = EllpackPageImpl(ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } else if (fmat_ctx_.IsCUDA()) { this->Info().feature_types.SetDevice(fmat_ctx_.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&fmat_ctx_, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } else { // Can happen when QDM is initialized on CPU, but a GPU version is queried by a different QDM // for cut reference. auto cuda_ctx = ctx->MakeCUDA(); this->Info().feature_types.SetDevice(cuda_ctx.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&cuda_ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } } CHECK(ellpack_); auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(ellpack_)); return BatchSet<EllpackPage>(begin_iter); } void GetCutsFromEllpack(EllpackPage const& page, common::HistogramCuts* cuts) { *cuts = page.Impl()->Cuts(); } } // namespace xgboost::data
73884d52e86d3e5b81c272df73e7e8142b86481a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void cuda_hello(){ printf("Hello World from GPU!\n"); } int main() { hipLaunchKernelGGL(( cuda_hello), dim3(1),dim3(1), 0, 0, ); return 0; }
73884d52e86d3e5b81c272df73e7e8142b86481a.cu
#include <stdio.h> __global__ void cuda_hello(){ printf("Hello World from GPU!\n"); } int main() { cuda_hello<<<1,1>>>(); return 0; }
115dad2a48d2046648be2cfee74b10ad173055e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "Indice1D.h" #include "cudaTools.h" #include <stdio.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * output : void required !! */ __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n) { secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n) { const int NB_THREAD = Indice2D::nbThread(); const int TID = Indice2D::tid(); // Debug, facultatif // if (TID == 0) // { // printf("Coucou from device tid = %d", TID); //required Device::synchronize(); after the call of kernel // } int s = TID; while (s < n) { ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s]; s = s + NB_THREAD; } } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
115dad2a48d2046648be2cfee74b10ad173055e5.cu
#include "Indice2D.h" #include "Indice1D.h" #include "cudaTools.h" #include <stdio.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * output : void required !! */ __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n) { secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n) { const int NB_THREAD = Indice2D::nbThread(); const int TID = Indice2D::tid(); // Debug, facultatif // if (TID == 0) // { // printf("Coucou from device tid = %d", TID); //required Device::synchronize(); after the call of kernel // } int s = TID; while (s < n) { ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s]; s = s + NB_THREAD; } } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
138951e0373cb5be0b5db0b1547716536a88a8c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <math.h> #include "SW_GPU.h" #define GAP_PEN 3 #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void InitNucArray (const char* RefSeq, SeqData OutSeq) { int i = 1; *(OutSeq.Seq + 0) = 0; char CurNuc = 'B'; // Input checked by sequence input function while (CurNuc != '\0') { CurNuc = *(RefSeq + i-1); *(OutSeq.Seq + i) = (int8_t) CurNuc; //printf("%d ", *(OutSeq.Seq + i)); i++; } printf("\n"); } void InitSubGPMat (SeqData RefSeq, SeqData ReadSeq, int* DataMat) { uint32_t ReadLength = ReadSeq.SeqLength; uint32_t RefLength = RefSeq.SeqLength; int32_t i,j; for (i = 1; i < ReadLength; i++) { for (j = 1; j < RefLength; j++) { *(DataMat + i*RefLength + j) = (*(RefSeq.Seq + j) == *(ReadSeq.Seq + i)) ? MATCH_HIT : MATCH_MISS; } } } __global__ void kernal_scoring(SeqData *d_read, SeqData *d_ref, int * d_sub_matrix, int *d_score_matrix, int step, int ReadLength, int RefLength) { int i = threadIdx.x +1; //+ blockIdx.x * blockDim.x; int j = step - i; int16_t Score1 = 0; int16_t Score2 = 0; int16_t Score3 = 0; //printf("%d %d \n", i, j); Score1 = *(d_score_matrix + ((i-1)*RefLength + (j-1))) + *(d_sub_matrix + (i*RefLength + j)); Score2 = *(d_score_matrix + ((i-1)*RefLength + j)) - GAP_PEN; Score3 = *(d_score_matrix + (i*RefLength + (j-1))) - GAP_PEN; //printf(" %d %d %d \n ", Score1, Score2, Score3 ); int16_t MaxScore = 0; MaxScore = (Score1 > MaxScore) ? Score1 : MaxScore; MaxScore = (Score2 > MaxScore) ? Score2 : MaxScore; MaxScore = (Score3 > MaxScore) ? Score3 : MaxScore; //printf("%d \n ", MaxScore); *(d_score_matrix + (i*RefLength + j)) = MaxScore; } int iDivUp(int a, int b){ return (a%b != 0) ? (a/b + 1) : (a/b); } int main(){ //// GPU Timing variables/////// hipEvent_t gpu_start, gpu_stop, cpu_start, cpu_stop; float elapsed_gpu, elapsed_cpu; ///// initializing /////////// uint32_t RefLength = 40; // Includes additional row needed for SW uint32_t ReadLength = 16; // Includes additional row needed for SW SeqData ReadSeq, RefSeq; ReadSeq.SeqLength = ReadLength; RefSeq.SeqLength = RefLength; /* Default Sequences for testing purposes */ const char ref_seq_def[40] = {'C', 'A', 'G', 'C', 'C', 'T', 'T', 'T', 'C', 'T', 'G', 'A','C', 'C', 'C', 'G', 'G', 'A', 'A', 'A', 'T','C', 'A', 'A', 'A', 'A', 'T', 'A', 'G', 'G', 'C', 'A', 'C', 'A', 'A', 'C', 'A', 'A', 'A', '\0'}; const char read_seq_def[16] = {'C', 'T', 'G', 'A', 'G', 'C', 'C', 'G', 'G', 'T', 'A', 'A', 'A', 'T', 'C', '\0'}; /* Initialize input sequences as int8_t arrays */ RefSeq.Seq = (uint8_t*) malloc(RefSeq.SeqLength); // Change to dynamic malloc depending on input ReadSeq.Seq = (uint8_t*) malloc(ReadSeq.SeqLength); // Change to dynamic malloc depending on input InitNucArray (ref_seq_def, RefSeq); InitNucArray (read_seq_def, ReadSeq); //substution matrix int* DataMat = (int*) calloc((RefLength)*(ReadLength),sizeof(int)); InitSubGPMat (RefSeq, ReadSeq, DataMat); //scoring matrix int* score_matrix = (int*) calloc(((RefSeq.SeqLength)+1)*((ReadSeq.SeqLength)+1),sizeof(uint32_t)); ////// cuda initializing /////// // Create the cuda events hipEventCreate(&gpu_start); hipEventCreate(&gpu_stop); // Record event on the default stream hipEventRecord(gpu_start, 0); CUDA_SAFE_CALL(hipSetDevice(0)); SeqData * d_read, * d_ref; int* d_score_matrix, * d_sub_matrix; //setting the arrays CUDA_SAFE_CALL(hipMalloc((void **)&d_read, ((ReadSeq.SeqLength)*sizeof(SeqData)))); CUDA_SAFE_CALL(hipMalloc((void **)&d_ref, ((RefSeq.SeqLength)*sizeof(SeqData)))); CUDA_SAFE_CALL(hipMalloc((void **)&d_score_matrix, (((RefSeq.SeqLength)+1)*((ReadSeq.SeqLength)+1)*sizeof(uint32_t)))); CUDA_SAFE_CALL(hipMalloc((void **)&d_sub_matrix,((RefSeq.SeqLength)*(ReadSeq.SeqLength))*sizeof(int))); //transfering the arrays CUDA_SAFE_CALL(hipMemcpy(d_read, ReadSeq.Seq, ((ReadSeq.SeqLength)*sizeof(SeqData)), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_ref, RefSeq.Seq, ((RefSeq.SeqLength)*sizeof(SeqData)), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_sub_matrix, DataMat, ((RefSeq.SeqLength)*(ReadSeq.SeqLength))*sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_score_matrix, score_matrix, (((RefSeq.SeqLength)+1)*((ReadSeq.SeqLength)+1)*sizeof(uint32_t)), hipMemcpyHostToDevice )); //kernal function call int step; int num_iters = RefSeq.SeqLength+ReadSeq.SeqLength-1; int max_thread = RefSeq.SeqLength-ReadSeq.SeqLength+1; int k = 0; //for(k = 0; k < 1600; k++){ for(step = 1; step< num_iters; step++) { dim3 dimGrid(iDivUp(step,max_thread)); hipLaunchKernelGGL(( kernal_scoring), dim3(dimGrid), dim3(1), 0, 0, d_read, d_ref, d_sub_matrix, d_score_matrix, step, ReadSeq.SeqLength, RefSeq.SeqLength); } //} //CUDA return CUDA_SAFE_CALL(hipPeekAtLastError()); CUDA_SAFE_CALL(hipMemcpy(ReadSeq.Seq, d_read, ((ReadSeq.SeqLength)*sizeof(uint32_t)), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(RefSeq.Seq, d_ref, ((RefSeq.SeqLength)*sizeof(uint32_t)), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(score_matrix, d_score_matrix, (((RefSeq.SeqLength)+1)*((ReadSeq.SeqLength)+1)*sizeof(uint32_t)), hipMemcpyDeviceToHost )); // Stop and destroy the timer hipEventRecord(gpu_stop,0); hipEventSynchronize(gpu_stop); hipEventElapsedTime(&elapsed_gpu, gpu_start, gpu_stop); printf("\nGPU time: %f (msec)\n", elapsed_gpu); int i,j; // for (i = 0; i < RefLength; i++) {printf(" %3c ",*(RefSeq.Seq + i));} /* //printf("\n"); for (i = 1; i < ReadLength; i++) { //printf("%c ",*(ReadSeq.Seq + i)); for (j = 1; j < RefLength; j++) { printf(" %3d ", *(score_matrix + (i*RefLength + j))); } printf("\n"); } */ return 0; }
138951e0373cb5be0b5db0b1547716536a88a8c7.cu
#include <cstdio> #include <cstdlib> #include <math.h> #include "SW_GPU.h" #define GAP_PEN 3 #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void InitNucArray (const char* RefSeq, SeqData OutSeq) { int i = 1; *(OutSeq.Seq + 0) = 0; char CurNuc = 'B'; // Input checked by sequence input function while (CurNuc != '\0') { CurNuc = *(RefSeq + i-1); *(OutSeq.Seq + i) = (int8_t) CurNuc; //printf("%d ", *(OutSeq.Seq + i)); i++; } printf("\n"); } void InitSubGPMat (SeqData RefSeq, SeqData ReadSeq, int* DataMat) { uint32_t ReadLength = ReadSeq.SeqLength; uint32_t RefLength = RefSeq.SeqLength; int32_t i,j; for (i = 1; i < ReadLength; i++) { for (j = 1; j < RefLength; j++) { *(DataMat + i*RefLength + j) = (*(RefSeq.Seq + j) == *(ReadSeq.Seq + i)) ? MATCH_HIT : MATCH_MISS; } } } __global__ void kernal_scoring(SeqData *d_read, SeqData *d_ref, int * d_sub_matrix, int *d_score_matrix, int step, int ReadLength, int RefLength) { int i = threadIdx.x +1; //+ blockIdx.x * blockDim.x; int j = step - i; int16_t Score1 = 0; int16_t Score2 = 0; int16_t Score3 = 0; //printf("%d %d \n", i, j); Score1 = *(d_score_matrix + ((i-1)*RefLength + (j-1))) + *(d_sub_matrix + (i*RefLength + j)); Score2 = *(d_score_matrix + ((i-1)*RefLength + j)) - GAP_PEN; Score3 = *(d_score_matrix + (i*RefLength + (j-1))) - GAP_PEN; //printf(" %d %d %d \n ", Score1, Score2, Score3 ); int16_t MaxScore = 0; MaxScore = (Score1 > MaxScore) ? Score1 : MaxScore; MaxScore = (Score2 > MaxScore) ? Score2 : MaxScore; MaxScore = (Score3 > MaxScore) ? Score3 : MaxScore; //printf("%d \n ", MaxScore); *(d_score_matrix + (i*RefLength + j)) = MaxScore; } int iDivUp(int a, int b){ return (a%b != 0) ? (a/b + 1) : (a/b); } int main(){ //// GPU Timing variables/////// cudaEvent_t gpu_start, gpu_stop, cpu_start, cpu_stop; float elapsed_gpu, elapsed_cpu; ///// initializing /////////// uint32_t RefLength = 40; // Includes additional row needed for SW uint32_t ReadLength = 16; // Includes additional row needed for SW SeqData ReadSeq, RefSeq; ReadSeq.SeqLength = ReadLength; RefSeq.SeqLength = RefLength; /* Default Sequences for testing purposes */ const char ref_seq_def[40] = {'C', 'A', 'G', 'C', 'C', 'T', 'T', 'T', 'C', 'T', 'G', 'A','C', 'C', 'C', 'G', 'G', 'A', 'A', 'A', 'T','C', 'A', 'A', 'A', 'A', 'T', 'A', 'G', 'G', 'C', 'A', 'C', 'A', 'A', 'C', 'A', 'A', 'A', '\0'}; const char read_seq_def[16] = {'C', 'T', 'G', 'A', 'G', 'C', 'C', 'G', 'G', 'T', 'A', 'A', 'A', 'T', 'C', '\0'}; /* Initialize input sequences as int8_t arrays */ RefSeq.Seq = (uint8_t*) malloc(RefSeq.SeqLength); // Change to dynamic malloc depending on input ReadSeq.Seq = (uint8_t*) malloc(ReadSeq.SeqLength); // Change to dynamic malloc depending on input InitNucArray (ref_seq_def, RefSeq); InitNucArray (read_seq_def, ReadSeq); //substution matrix int* DataMat = (int*) calloc((RefLength)*(ReadLength),sizeof(int)); InitSubGPMat (RefSeq, ReadSeq, DataMat); //scoring matrix int* score_matrix = (int*) calloc(((RefSeq.SeqLength)+1)*((ReadSeq.SeqLength)+1),sizeof(uint32_t)); ////// cuda initializing /////// // Create the cuda events cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_stop); // Record event on the default stream cudaEventRecord(gpu_start, 0); CUDA_SAFE_CALL(cudaSetDevice(0)); SeqData * d_read, * d_ref; int* d_score_matrix, * d_sub_matrix; //setting the arrays CUDA_SAFE_CALL(cudaMalloc((void **)&d_read, ((ReadSeq.SeqLength)*sizeof(SeqData)))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_ref, ((RefSeq.SeqLength)*sizeof(SeqData)))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_score_matrix, (((RefSeq.SeqLength)+1)*((ReadSeq.SeqLength)+1)*sizeof(uint32_t)))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_sub_matrix,((RefSeq.SeqLength)*(ReadSeq.SeqLength))*sizeof(int))); //transfering the arrays CUDA_SAFE_CALL(cudaMemcpy(d_read, ReadSeq.Seq, ((ReadSeq.SeqLength)*sizeof(SeqData)), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_ref, RefSeq.Seq, ((RefSeq.SeqLength)*sizeof(SeqData)), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_sub_matrix, DataMat, ((RefSeq.SeqLength)*(ReadSeq.SeqLength))*sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_score_matrix, score_matrix, (((RefSeq.SeqLength)+1)*((ReadSeq.SeqLength)+1)*sizeof(uint32_t)), cudaMemcpyHostToDevice )); //kernal function call int step; int num_iters = RefSeq.SeqLength+ReadSeq.SeqLength-1; int max_thread = RefSeq.SeqLength-ReadSeq.SeqLength+1; int k = 0; //for(k = 0; k < 1600; k++){ for(step = 1; step< num_iters; step++) { dim3 dimGrid(iDivUp(step,max_thread)); kernal_scoring<<< dimGrid, 1>>>(d_read, d_ref, d_sub_matrix, d_score_matrix, step, ReadSeq.SeqLength, RefSeq.SeqLength); } //} //CUDA return CUDA_SAFE_CALL(cudaPeekAtLastError()); CUDA_SAFE_CALL(cudaMemcpy(ReadSeq.Seq, d_read, ((ReadSeq.SeqLength)*sizeof(uint32_t)), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(RefSeq.Seq, d_ref, ((RefSeq.SeqLength)*sizeof(uint32_t)), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(score_matrix, d_score_matrix, (((RefSeq.SeqLength)+1)*((ReadSeq.SeqLength)+1)*sizeof(uint32_t)), cudaMemcpyDeviceToHost )); // Stop and destroy the timer cudaEventRecord(gpu_stop,0); cudaEventSynchronize(gpu_stop); cudaEventElapsedTime(&elapsed_gpu, gpu_start, gpu_stop); printf("\nGPU time: %f (msec)\n", elapsed_gpu); int i,j; // for (i = 0; i < RefLength; i++) {printf(" %3c ",*(RefSeq.Seq + i));} /* //printf("\n"); for (i = 1; i < ReadLength; i++) { //printf("%c ",*(ReadSeq.Seq + i)); for (j = 1; j < RefLength; j++) { printf(" %3d ", *(score_matrix + (i*RefLength + j))); } printf("\n"); } */ return 0; }
92492791c14e11dc316ff06445b73fd5f2cbccf9.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_runtime.h> #include <stdio.h> #include <cassert> #include <hipcub/hipcub.hpp> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { template <typename T> __global__ void SliceKernel(int num, int dims, const T *input, const int *offsets_info, T *output) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ int shared_data[]; for (int i = threadIdx.x; i < dims * 3; i += blockDim.x) { shared_data[i] = offsets_info[i]; } __syncthreads(); if (idx < num) { int t_idx = idx; int in_idx = 0; for (int i = dims - 1; i >= 0; i--) { // output_shape auto t = t_idx % shared_data[i * 3 + 1]; // out offset auto s = t + shared_data[i * 3]; // input_seg_offset in_idx = in_idx + shared_data[i * 3 + 2] * s; t_idx = t_idx / shared_data[i * 3 + 1]; } output[idx] = input[in_idx]; } } SlicePlugin::SlicePlugin(std::vector<int> starts, std::vector<int> ends, std::vector<int> axes, bool with_fp16) : starts_(starts), ends_(ends), axes_(axes) { with_fp16_ = with_fp16; } SlicePlugin::SlicePlugin(void const *serial_data, size_t serial_length) { deserializeBase(serial_data, serial_length); DeserializeValue(&serial_data, &serial_length, &starts_); DeserializeValue(&serial_data, &serial_length, &ends_); DeserializeValue(&serial_data, &serial_length, &axes_); DeserializeValue(&serial_data, &serial_length, &with_fp16_); DeserializeValue(&serial_data, &serial_length, &offset_info_); } SlicePlugin::~SlicePlugin() { hipFree(offset_temp_data_); } SlicePlugin *SlicePlugin::clone() const TRT_NOEXCEPT { return new SlicePlugin(starts_, ends_, axes_, with_fp16_); } bool SlicePlugin::supportsFormat( nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT { if (with_fp16_) { return ((type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF) && (format == nvinfer1::PluginFormat::kLINEAR)); } else { return ((type == nvinfer1::DataType::kFLOAT) && (format == nvinfer1::PluginFormat::kLINEAR)); } } nvinfer1::Dims SlicePlugin::getOutputDimensions( int index, const nvinfer1::Dims *inputs, int nb_input_dims) TRT_NOEXCEPT { auto in_dims = inputs[0]; nvinfer1::Dims out_dims = in_dims; for (size_t i = 0; i < axes_.size(); i++) { int start = starts_[i]; int end = ends_[i]; out_dims.d[axes_[i] - 1] = end - start; } return out_dims; } int SlicePlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, hipStream_t stream) { #else void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT { #endif auto input_dims = getInputDims(0); // notice input dims is [C, H, W], add input batch dim here auto out_dims = getOutputDimensions(0, &input_dims, 1); input_dims.nbDims += 1; out_dims.nbDims += 1; for (auto i = input_dims.nbDims; i > 0; --i) { input_dims.d[i] = input_dims.d[i - 1]; out_dims.d[i] = out_dims.d[i - 1]; } input_dims.d[0] = batch_size; out_dims.d[0] = batch_size; auto num_dims = input_dims.nbDims; size_t out_num = ProductDim(out_dims); std::vector<int> seg_offsets; std::vector<int> offsets; std::vector<int> extends; offsets.resize(num_dims); extends.resize(num_dims); seg_offsets.resize(num_dims); seg_offsets[num_dims - 1] = 1; for (int i = num_dims - 2; i >= 0; i--) { seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1]; } for (size_t i = 0; i < num_dims; ++i) { offsets[i] = 0; extends[i] = out_dims.d[i]; } for (size_t i = 0; i < axes_.size(); ++i) { offsets[axes_[i]] = starts_[i]; } std::vector<int> offset_info; for (size_t i = 0; i < num_dims; ++i) { offset_info.push_back(offsets[i]); offset_info.push_back(extends[i]); offset_info.push_back(seg_offsets[i]); } if (offset_temp_data_ == nullptr) { hipMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int)); } hipMemcpyAsync(offset_temp_data_, offset_info.data(), sizeof(int) * 3 * num_dims, hipMemcpyHostToDevice, stream); int threads = 256; int blocks = (out_num + threads - 1) / threads; auto input_type = getDataType(); if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32"; const float *input1 = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); hipLaunchKernelGGL(( SliceKernel<float>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream, out_num, num_dims, input1, offset_temp_data_, output); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16"; const half *input1 = static_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); hipLaunchKernelGGL(( SliceKernel<half>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream, out_num, num_dims, input1, offset_temp_data_, output); } else { PADDLE_THROW(platform::errors::Fatal( "The Slice TRT Plugin's input type should be float or half.")); } return hipGetLastError() != hipSuccess; } size_t SlicePlugin::getSerializationSize() const TRT_NOEXCEPT { return getBaseSerializationSize() + SerializedSize(starts_) + SerializedSize(ends_) + SerializedSize(axes_) + SerializedSize(with_fp16_) + SerializedSize(offset_info_); } void SlicePlugin::serialize(void *buffer) const TRT_NOEXCEPT { serializeBase(buffer); SerializeValue(&buffer, starts_); SerializeValue(&buffer, ends_); SerializeValue(&buffer, axes_); SerializeValue(&buffer, with_fp16_); SerializeValue(&buffer, offset_info_); } // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) SlicePluginDynamic::SlicePluginDynamic(std::vector<int> starts, std::vector<int> ends, std::vector<int> axes, int decrease_axis, bool with_fp16) : starts_(starts), ends_(ends), axes_(axes), decrease_axis_(decrease_axis) { with_fp16_ = with_fp16; } SlicePluginDynamic::SlicePluginDynamic(void const *serialData, size_t serialLength) { DeserializeValue(&serialData, &serialLength, &starts_); DeserializeValue(&serialData, &serialLength, &ends_); DeserializeValue(&serialData, &serialLength, &axes_); DeserializeValue(&serialData, &serialLength, &decrease_axis_); DeserializeValue(&serialData, &serialLength, &with_fp16_); DeserializeValue(&serialData, &serialLength, &offset_info_); } void SlicePluginDynamic::destroy() TRT_NOEXCEPT { hipFree(offset_temp_data_); delete this; } int SlicePluginDynamic::initialize() TRT_NOEXCEPT { return 0; } size_t SlicePluginDynamic::getSerializationSize() const TRT_NOEXCEPT { size_t size = SerializedSize(starts_) + SerializedSize(ends_) + SerializedSize(axes_) + SerializedSize(decrease_axis_) + SerializedSize(with_fp16_) + SerializedSize(offset_info_); return size; } void SlicePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT { SerializeValue(&buffer, starts_); SerializeValue(&buffer, ends_); SerializeValue(&buffer, axes_); SerializeValue(&buffer, decrease_axis_); SerializeValue(&buffer, with_fp16_); SerializeValue(&buffer, offset_info_); } nvinfer1::DimsExprs SlicePluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { auto in_dims = inputs[0]; nvinfer1::DimsExprs ret = in_dims; // start, ends should greater 0 for (size_t i = 0; i < axes_.size(); i++) { int start = starts_[i]; int end = ends_[i]; #if IS_TRT_VERSION_GE(7200) ret.d[axes_[i]] = expr_builder.operation( nvinfer1::DimensionOperation::kSUB, *expr_builder.operation(nvinfer1::DimensionOperation::kMIN, *expr_builder.constant(ends_[i]), *in_dims.d[axes_[i]]), *expr_builder.constant(start)); #else ret.d[axes_[i]] = expr_builder.constant(end - start); #endif } if (decrease_axis_ != -1) { nvinfer1::DimsExprs res; res.nbDims = ret.nbDims - 1; int j = 0; for (size_t i = 0; i < in_dims.nbDims; i++) { if (decrease_axis_ == i) continue; res.d[j++] = expr_builder.operation(nvinfer1::DimensionOperation::kMAX, *expr_builder.constant(0), *ret.d[i]); } return res; } return ret; } bool SlicePluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { return (in.type == nvinfer1::DataType::kFLOAT || in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::TensorFormat::kLINEAR); } else { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType SlicePluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The Slice Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT || input_types[0] == nvinfer1::DataType::kHALF), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int SlicePluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT { auto input_dims = input_desc[0].dims; auto out_dims = output_desc[0].dims; if (decrease_axis_ != -1) { out_dims = input_dims; out_dims.d[decrease_axis_] = 1; } auto num_dims = input_dims.nbDims; size_t out_num = ProductDim(out_dims); std::vector<int> seg_offsets; std::vector<int> offsets; std::vector<int> extends; offsets.resize(num_dims); extends.resize(num_dims); seg_offsets.resize(num_dims); seg_offsets[num_dims - 1] = 1; for (int i = num_dims - 2; i >= 0; i--) { seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1]; } for (size_t i = 0; i < num_dims; ++i) { offsets[i] = 0; extends[i] = out_dims.d[i]; } for (size_t i = 0; i < axes_.size(); ++i) { offsets[axes_[i]] = starts_[i]; } offset_info_.resize(num_dims * 3); for (size_t i = 0; i < num_dims; ++i) { offset_info_[i * 3 + 0] = offsets[i]; offset_info_[i * 3 + 1] = extends[i]; offset_info_[i * 3 + 2] = seg_offsets[i]; } if (offset_temp_data_ == nullptr) { hipMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int)); } hipMemcpyAsync(offset_temp_data_, offset_info_.data(), sizeof(int) * 3 * num_dims, hipMemcpyHostToDevice, stream); int threads = 256; int blocks = (out_num + threads - 1) / threads; auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32"; const float *input1 = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); hipLaunchKernelGGL(( SliceKernel<float>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream, out_num, num_dims, input1, offset_temp_data_, output); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16"; const half *input1 = static_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); hipLaunchKernelGGL(( SliceKernel<half>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream, out_num, num_dims, input1, offset_temp_data_, output); } else { PADDLE_THROW(platform::errors::Fatal( "The Slice TRT Plugin's input type should be float or half.")); } return hipGetLastError() != hipSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
92492791c14e11dc316ff06445b73fd5f2cbccf9.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda_runtime.h> #include <stdio.h> #include <cassert> #include <cub/cub.cuh> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { template <typename T> __global__ void SliceKernel(int num, int dims, const T *input, const int *offsets_info, T *output) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ int shared_data[]; for (int i = threadIdx.x; i < dims * 3; i += blockDim.x) { shared_data[i] = offsets_info[i]; } __syncthreads(); if (idx < num) { int t_idx = idx; int in_idx = 0; for (int i = dims - 1; i >= 0; i--) { // output_shape auto t = t_idx % shared_data[i * 3 + 1]; // out offset auto s = t + shared_data[i * 3]; // input_seg_offset in_idx = in_idx + shared_data[i * 3 + 2] * s; t_idx = t_idx / shared_data[i * 3 + 1]; } output[idx] = input[in_idx]; } } SlicePlugin::SlicePlugin(std::vector<int> starts, std::vector<int> ends, std::vector<int> axes, bool with_fp16) : starts_(starts), ends_(ends), axes_(axes) { with_fp16_ = with_fp16; } SlicePlugin::SlicePlugin(void const *serial_data, size_t serial_length) { deserializeBase(serial_data, serial_length); DeserializeValue(&serial_data, &serial_length, &starts_); DeserializeValue(&serial_data, &serial_length, &ends_); DeserializeValue(&serial_data, &serial_length, &axes_); DeserializeValue(&serial_data, &serial_length, &with_fp16_); DeserializeValue(&serial_data, &serial_length, &offset_info_); } SlicePlugin::~SlicePlugin() { cudaFree(offset_temp_data_); } SlicePlugin *SlicePlugin::clone() const TRT_NOEXCEPT { return new SlicePlugin(starts_, ends_, axes_, with_fp16_); } bool SlicePlugin::supportsFormat( nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT { if (with_fp16_) { return ((type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF) && (format == nvinfer1::PluginFormat::kLINEAR)); } else { return ((type == nvinfer1::DataType::kFLOAT) && (format == nvinfer1::PluginFormat::kLINEAR)); } } nvinfer1::Dims SlicePlugin::getOutputDimensions( int index, const nvinfer1::Dims *inputs, int nb_input_dims) TRT_NOEXCEPT { auto in_dims = inputs[0]; nvinfer1::Dims out_dims = in_dims; for (size_t i = 0; i < axes_.size(); i++) { int start = starts_[i]; int end = ends_[i]; out_dims.d[axes_[i] - 1] = end - start; } return out_dims; } int SlicePlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, cudaStream_t stream) { #else void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT { #endif auto input_dims = getInputDims(0); // notice input dims is [C, H, W], add input batch dim here auto out_dims = getOutputDimensions(0, &input_dims, 1); input_dims.nbDims += 1; out_dims.nbDims += 1; for (auto i = input_dims.nbDims; i > 0; --i) { input_dims.d[i] = input_dims.d[i - 1]; out_dims.d[i] = out_dims.d[i - 1]; } input_dims.d[0] = batch_size; out_dims.d[0] = batch_size; auto num_dims = input_dims.nbDims; size_t out_num = ProductDim(out_dims); std::vector<int> seg_offsets; std::vector<int> offsets; std::vector<int> extends; offsets.resize(num_dims); extends.resize(num_dims); seg_offsets.resize(num_dims); seg_offsets[num_dims - 1] = 1; for (int i = num_dims - 2; i >= 0; i--) { seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1]; } for (size_t i = 0; i < num_dims; ++i) { offsets[i] = 0; extends[i] = out_dims.d[i]; } for (size_t i = 0; i < axes_.size(); ++i) { offsets[axes_[i]] = starts_[i]; } std::vector<int> offset_info; for (size_t i = 0; i < num_dims; ++i) { offset_info.push_back(offsets[i]); offset_info.push_back(extends[i]); offset_info.push_back(seg_offsets[i]); } if (offset_temp_data_ == nullptr) { cudaMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int)); } cudaMemcpyAsync(offset_temp_data_, offset_info.data(), sizeof(int) * 3 * num_dims, cudaMemcpyHostToDevice, stream); int threads = 256; int blocks = (out_num + threads - 1) / threads; auto input_type = getDataType(); if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32"; const float *input1 = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); SliceKernel<float><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>( out_num, num_dims, input1, offset_temp_data_, output); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16"; const half *input1 = static_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); SliceKernel<half><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>( out_num, num_dims, input1, offset_temp_data_, output); } else { PADDLE_THROW(platform::errors::Fatal( "The Slice TRT Plugin's input type should be float or half.")); } return cudaGetLastError() != cudaSuccess; } size_t SlicePlugin::getSerializationSize() const TRT_NOEXCEPT { return getBaseSerializationSize() + SerializedSize(starts_) + SerializedSize(ends_) + SerializedSize(axes_) + SerializedSize(with_fp16_) + SerializedSize(offset_info_); } void SlicePlugin::serialize(void *buffer) const TRT_NOEXCEPT { serializeBase(buffer); SerializeValue(&buffer, starts_); SerializeValue(&buffer, ends_); SerializeValue(&buffer, axes_); SerializeValue(&buffer, with_fp16_); SerializeValue(&buffer, offset_info_); } // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) SlicePluginDynamic::SlicePluginDynamic(std::vector<int> starts, std::vector<int> ends, std::vector<int> axes, int decrease_axis, bool with_fp16) : starts_(starts), ends_(ends), axes_(axes), decrease_axis_(decrease_axis) { with_fp16_ = with_fp16; } SlicePluginDynamic::SlicePluginDynamic(void const *serialData, size_t serialLength) { DeserializeValue(&serialData, &serialLength, &starts_); DeserializeValue(&serialData, &serialLength, &ends_); DeserializeValue(&serialData, &serialLength, &axes_); DeserializeValue(&serialData, &serialLength, &decrease_axis_); DeserializeValue(&serialData, &serialLength, &with_fp16_); DeserializeValue(&serialData, &serialLength, &offset_info_); } void SlicePluginDynamic::destroy() TRT_NOEXCEPT { cudaFree(offset_temp_data_); delete this; } int SlicePluginDynamic::initialize() TRT_NOEXCEPT { return 0; } size_t SlicePluginDynamic::getSerializationSize() const TRT_NOEXCEPT { size_t size = SerializedSize(starts_) + SerializedSize(ends_) + SerializedSize(axes_) + SerializedSize(decrease_axis_) + SerializedSize(with_fp16_) + SerializedSize(offset_info_); return size; } void SlicePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT { SerializeValue(&buffer, starts_); SerializeValue(&buffer, ends_); SerializeValue(&buffer, axes_); SerializeValue(&buffer, decrease_axis_); SerializeValue(&buffer, with_fp16_); SerializeValue(&buffer, offset_info_); } nvinfer1::DimsExprs SlicePluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { auto in_dims = inputs[0]; nvinfer1::DimsExprs ret = in_dims; // start, ends should greater 0 for (size_t i = 0; i < axes_.size(); i++) { int start = starts_[i]; int end = ends_[i]; #if IS_TRT_VERSION_GE(7200) ret.d[axes_[i]] = expr_builder.operation( nvinfer1::DimensionOperation::kSUB, *expr_builder.operation(nvinfer1::DimensionOperation::kMIN, *expr_builder.constant(ends_[i]), *in_dims.d[axes_[i]]), *expr_builder.constant(start)); #else ret.d[axes_[i]] = expr_builder.constant(end - start); #endif } if (decrease_axis_ != -1) { nvinfer1::DimsExprs res; res.nbDims = ret.nbDims - 1; int j = 0; for (size_t i = 0; i < in_dims.nbDims; i++) { if (decrease_axis_ == i) continue; res.d[j++] = expr_builder.operation(nvinfer1::DimensionOperation::kMAX, *expr_builder.constant(0), *ret.d[i]); } return res; } return ret; } bool SlicePluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { return (in.type == nvinfer1::DataType::kFLOAT || in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::TensorFormat::kLINEAR); } else { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType SlicePluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The Slice Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT || input_types[0] == nvinfer1::DataType::kHALF), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int SlicePluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT { auto input_dims = input_desc[0].dims; auto out_dims = output_desc[0].dims; if (decrease_axis_ != -1) { out_dims = input_dims; out_dims.d[decrease_axis_] = 1; } auto num_dims = input_dims.nbDims; size_t out_num = ProductDim(out_dims); std::vector<int> seg_offsets; std::vector<int> offsets; std::vector<int> extends; offsets.resize(num_dims); extends.resize(num_dims); seg_offsets.resize(num_dims); seg_offsets[num_dims - 1] = 1; for (int i = num_dims - 2; i >= 0; i--) { seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1]; } for (size_t i = 0; i < num_dims; ++i) { offsets[i] = 0; extends[i] = out_dims.d[i]; } for (size_t i = 0; i < axes_.size(); ++i) { offsets[axes_[i]] = starts_[i]; } offset_info_.resize(num_dims * 3); for (size_t i = 0; i < num_dims; ++i) { offset_info_[i * 3 + 0] = offsets[i]; offset_info_[i * 3 + 1] = extends[i]; offset_info_[i * 3 + 2] = seg_offsets[i]; } if (offset_temp_data_ == nullptr) { cudaMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int)); } cudaMemcpyAsync(offset_temp_data_, offset_info_.data(), sizeof(int) * 3 * num_dims, cudaMemcpyHostToDevice, stream); int threads = 256; int blocks = (out_num + threads - 1) / threads; auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32"; const float *input1 = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); SliceKernel<float><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>( out_num, num_dims, input1, offset_temp_data_, output); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16"; const half *input1 = static_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); SliceKernel<half><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>( out_num, num_dims, input1, offset_temp_data_, output); } else { PADDLE_THROW(platform::errors::Fatal( "The Slice TRT Plugin's input type should be float or half.")); } return cudaGetLastError() != cudaSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
abf05649829ed391d343ed4108e5e59d4148a8b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2015, Julian Straub <[email protected]> Licensed * under the MIT license. See the license file LICENSE. */ #include <stdint.h> #include <stdio.h> #include <nvidia/helper_cuda.h> #define PI 3.141592653589793f #define BLOCK_SIZE 256 // int tidk = 0*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 1*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 2*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 3*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 4*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 5*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 6*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 7*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 8*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 9*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 10*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 11*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 12*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 13*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 14*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 15*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 16*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 17*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 18*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 19*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 20*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 21*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 22*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 23*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; template<typename T> __device__ inline T atomicAdd_(T* address, T val) {}; template<> __device__ inline double atomicAdd_<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); }; template<> __device__ inline float atomicAdd_<float>(float* address, float val) { return atomicAdd(address,val); }; template <uint16_t blockSize, class T> __global__ void reduction_oldOptimizedMemLayout(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; int tidk = tid*6*4; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads #pragma unroll for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) { int ss = s*6*4; if(tid < s) { #pragma unroll for( int k=0; k<6*4; ++k) { mu[tidk+k] += mu[tidk+k + ss]; } } __syncthreads(); } //dbg[tid] = mu[tid+BLOCK_SIZE]; if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid]); } } template <uint16_t blockSize, class T> __global__ void reduction_oldOptimized(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads if(blockSize >= 512) if(tid<256){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 256]; } __syncthreads(); } if(blockSize >= 256) if(tid<128){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 128]; } __syncthreads(); } if(blockSize >= 128) if(tid<64){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 64]; } __syncthreads(); } if(blockSize >= 64) if(tid<32){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 32]; } } __syncthreads(); if(blockSize >= 32) if(tid<16){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 16]; } __syncthreads(); } if(blockSize >= 16) { if(tid<8){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 8]; } } __syncthreads(); } if(blockSize >= 8) { if(tid<4){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 4]; } } __syncthreads(); } if(blockSize >= 4) { if(tid<2*6*4){ int tidk = (tid/2)*BLOCK_SIZE + tid%2; mu[tidk] += mu[tidk+2]; dbg[tid] = tidk; // if(tid<2){ //#pragma unroll // for( int k=0; k<6*4; ++k) { // int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 2]; // dbg[k*2+tid] = tidk; // dbg[tid+k*2] = tidk; // } } } __syncthreads(); // if(blockSize >= 2) // { // if(tid<6*4) // { // int tidk = tid*BLOCK_SIZE; // mu[tidk] += mu[tidk+1]; // } // __syncthreads(); // } //dbg[tid] = mu[tid+BLOCK_SIZE*19]; if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { int tidk = tid*BLOCK_SIZE; //mu[tidk] += mu[tidk+1]; atomicAdd_<T>(&mu_karch[tid],mu[tidk]+mu[tidk+1]); //atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]); } } template<class T> __global__ void reduction_old(T *mu_karch, T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads #pragma unroll for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) { if(tid < s) { #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; } } __syncthreads(); } if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]+mu[tid*BLOCK_SIZE+1]); } } template<class T> __global__ void reduction_newNew(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads int s = (BLOCK_SIZE)/2; //128 #pragma unroll for (uint32_t k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; if(tid<s) mu[tidk] += mu[tidk + s]; } s = (BLOCK_SIZE)/4;//64 #pragma unroll for (uint32_t k=0; k<6*2; ++k) { #pragma unroll for (uint32_t j=0; j<2; ++j) { int ss = j*s; int tidk = (2*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } // if(tid<s) // { // int tidk = 2*k*BLOCK_SIZE+tid; // mu[tidk] += mu[tidk + s]; // }else{ // int tidk = (2*k+1)*BLOCK_SIZE+tid-s; // mu[tidk] += mu[tidk + s]; // } } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/8; //32 #pragma unroll for (uint32_t k=0; k<6; ++k) { #pragma unroll for (uint32_t j=0; j<4; ++j) { int ss = j*s; int tidk = (4*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/16; //16 #pragma unroll for (uint32_t k=0; k<3; ++k) { #pragma unroll for (uint32_t j=0; j<8; ++j) { int ss = j*s; int tidk = (8*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/32; //8 uint32_t k = 0; #pragma unroll for (uint32_t j=0; j<16; ++j) { int ss = j*s; int tidk = (16*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } k = 1; #pragma unroll for (uint32_t j=0; j<8; ++j) { int ss = j*s; int tidk = (16*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/64; //4 k = 0; #pragma unroll for (uint32_t j=0; j<24; ++j) { int ss = j*s; int tidk = (24*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } //__syncthreads(); //sync the threads s = (BLOCK_SIZE)/128; //2 k = 0; #pragma unroll for (uint32_t j=0; j<24; ++j) { int ss = j*s; int tidk = (24*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } //__syncthreads(); //sync the threads s = (BLOCK_SIZE)/256; //1 k = 0; #pragma unroll for (uint32_t j=0; j<24; ++j) { int tidk = (24*k+j)*BLOCK_SIZE+tid-j; if(tid==j) mu[tidk] += mu[tidk + s]; } if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]); } } template<class T> __global__ void reduction_new(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } bool exit=false; int tpr = BLOCK_SIZE/(4*6); // threads per row //reduction..... __syncthreads(); //sync the threads #pragma unroll for(int r=0; r<4*6; ++r) { if (r*tpr <= tid && tid < (r+1)*tpr) { int tidr = tid - r*tpr; // id in row int offset = r*BLOCK_SIZE+tidr; //dbg[id] = offset; #pragma unroll for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) { int expr = s/tpr; // executions per row //dbg[id] = expr; for (int ex=0; ex<expr; ++ex) mu[offset+ex*tpr] += mu[offset+ex*tpr+s]; int exprem = s%tpr; // remaining executions if (tidr <exprem) mu[offset+expr*tpr] += mu[offset+expr*tpr+s]; __syncthreads(); if(s==BLOCK_SIZE/4) { exit=true; break; } } } if(exit) break; } //dbg[id] = mu[id+BLOCK_SIZE*3]; //dbg[id] =tid; if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]); } } extern void reduction(float *h_mu, float *d_mu, float *h_dbg, float *d_dbg, int selection) { for(uint32_t i=0; i<4*6; ++i) h_mu[i] =float(0.0f); for(uint32_t i=0; i<256; ++i) h_dbg[i] =float(0.0f); checkCudaErrors(hipMemcpy(d_mu, h_mu, 6*4* sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_dbg, h_dbg, 256* sizeof(float), hipMemcpyHostToDevice)); dim3 threads(16,16,1); //dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1); dim3 blocks(1,1,1); if(selection == 0){ hipLaunchKernelGGL(( reduction_old<float>), dim3(blocks),dim3(threads), 0, 0, d_mu,d_dbg); }else if(selection == 1) { hipLaunchKernelGGL(( reduction_new<float>), dim3(blocks),dim3(threads), 0, 0, d_mu,d_dbg); }else if (selection ==2) hipLaunchKernelGGL(( reduction_newNew<float>), dim3(blocks),dim3(threads), 0, 0, d_mu,d_dbg); else if (selection ==3) hipLaunchKernelGGL(( reduction_oldOptimized<256,float>), dim3(blocks),dim3(threads), 0, 0, d_mu,d_dbg); else if (selection ==4) hipLaunchKernelGGL(( reduction_oldOptimizedMemLayout<256,float>), dim3(blocks),dim3(threads), 0, 0, d_mu,d_dbg); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemcpy(h_mu, d_mu, 6*4*sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_dbg, d_dbg, 256*sizeof(float), hipMemcpyDeviceToHost)); } extern void reduction(double *h_mu, double *d_mu, double *h_dbg, double *d_dbg, int selection) { for(uint32_t i=0; i<4*6; ++i) h_mu[i] =double(0.0f); for(uint32_t i=0; i<256; ++i) h_dbg[i] =double(0.0f); checkCudaErrors(hipMemcpy(d_mu, h_mu, 6*4* sizeof(double), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_dbg, h_dbg, 256* sizeof(double), hipMemcpyHostToDevice)); dim3 threads(16,16,1); //dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1); dim3 blocks(1,1,1); if(selection == 0){ hipLaunchKernelGGL(( reduction_old<double>), dim3(blocks),dim3(threads), 0, 0, d_mu,d_dbg); }else if(selection == 1) { hipLaunchKernelGGL(( reduction_new<double>), dim3(blocks),dim3(threads), 0, 0, d_mu,d_dbg); }else if (selection ==2) hipLaunchKernelGGL(( reduction_newNew<double>), dim3(blocks),dim3(threads), 0, 0, d_mu,d_dbg); else if (selection ==3) hipLaunchKernelGGL(( reduction_oldOptimized<256,double>), dim3(blocks),dim3(threads), 0, 0, d_mu,d_dbg); else if (selection ==4) hipLaunchKernelGGL(( reduction_oldOptimizedMemLayout<256,double>), dim3(blocks),dim3(threads), 0, 0, d_mu,d_dbg); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemcpy(h_mu, d_mu, 6*4*sizeof(double), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_dbg, d_dbg, 256*sizeof(double), hipMemcpyDeviceToHost)); }
abf05649829ed391d343ed4108e5e59d4148a8b7.cu
/* Copyright (c) 2015, Julian Straub <[email protected]> Licensed * under the MIT license. See the license file LICENSE. */ #include <stdint.h> #include <stdio.h> #include <nvidia/helper_cuda.h> #define PI 3.141592653589793f #define BLOCK_SIZE 256 // int tidk = 0*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 1*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 2*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 3*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 4*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 5*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 6*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 7*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 8*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 9*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 10*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 11*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 12*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 13*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 14*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 15*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 16*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 17*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 18*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 19*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 20*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 21*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 22*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 23*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; template<typename T> __device__ inline T atomicAdd_(T* address, T val) {}; template<> __device__ inline double atomicAdd_<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); }; template<> __device__ inline float atomicAdd_<float>(float* address, float val) { return atomicAdd(address,val); }; template <uint16_t blockSize, class T> __global__ void reduction_oldOptimizedMemLayout(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; int tidk = tid*6*4; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads #pragma unroll for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) { int ss = s*6*4; if(tid < s) { #pragma unroll for( int k=0; k<6*4; ++k) { mu[tidk+k] += mu[tidk+k + ss]; } } __syncthreads(); } //dbg[tid] = mu[tid+BLOCK_SIZE]; if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid]); } } template <uint16_t blockSize, class T> __global__ void reduction_oldOptimized(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads if(blockSize >= 512) if(tid<256){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 256]; } __syncthreads(); } if(blockSize >= 256) if(tid<128){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 128]; } __syncthreads(); } if(blockSize >= 128) if(tid<64){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 64]; } __syncthreads(); } if(blockSize >= 64) if(tid<32){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 32]; } } __syncthreads(); if(blockSize >= 32) if(tid<16){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 16]; } __syncthreads(); } if(blockSize >= 16) { if(tid<8){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 8]; } } __syncthreads(); } if(blockSize >= 8) { if(tid<4){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 4]; } } __syncthreads(); } if(blockSize >= 4) { if(tid<2*6*4){ int tidk = (tid/2)*BLOCK_SIZE + tid%2; mu[tidk] += mu[tidk+2]; dbg[tid] = tidk; // if(tid<2){ //#pragma unroll // for( int k=0; k<6*4; ++k) { // int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 2]; // dbg[k*2+tid] = tidk; // dbg[tid+k*2] = tidk; // } } } __syncthreads(); // if(blockSize >= 2) // { // if(tid<6*4) // { // int tidk = tid*BLOCK_SIZE; // mu[tidk] += mu[tidk+1]; // } // __syncthreads(); // } //dbg[tid] = mu[tid+BLOCK_SIZE*19]; if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { int tidk = tid*BLOCK_SIZE; //mu[tidk] += mu[tidk+1]; atomicAdd_<T>(&mu_karch[tid],mu[tidk]+mu[tidk+1]); //atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]); } } template<class T> __global__ void reduction_old(T *mu_karch, T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads #pragma unroll for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) { if(tid < s) { #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; } } __syncthreads(); } if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]+mu[tid*BLOCK_SIZE+1]); } } template<class T> __global__ void reduction_newNew(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads int s = (BLOCK_SIZE)/2; //128 #pragma unroll for (uint32_t k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; if(tid<s) mu[tidk] += mu[tidk + s]; } s = (BLOCK_SIZE)/4;//64 #pragma unroll for (uint32_t k=0; k<6*2; ++k) { #pragma unroll for (uint32_t j=0; j<2; ++j) { int ss = j*s; int tidk = (2*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } // if(tid<s) // { // int tidk = 2*k*BLOCK_SIZE+tid; // mu[tidk] += mu[tidk + s]; // }else{ // int tidk = (2*k+1)*BLOCK_SIZE+tid-s; // mu[tidk] += mu[tidk + s]; // } } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/8; //32 #pragma unroll for (uint32_t k=0; k<6; ++k) { #pragma unroll for (uint32_t j=0; j<4; ++j) { int ss = j*s; int tidk = (4*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/16; //16 #pragma unroll for (uint32_t k=0; k<3; ++k) { #pragma unroll for (uint32_t j=0; j<8; ++j) { int ss = j*s; int tidk = (8*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/32; //8 uint32_t k = 0; #pragma unroll for (uint32_t j=0; j<16; ++j) { int ss = j*s; int tidk = (16*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } k = 1; #pragma unroll for (uint32_t j=0; j<8; ++j) { int ss = j*s; int tidk = (16*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/64; //4 k = 0; #pragma unroll for (uint32_t j=0; j<24; ++j) { int ss = j*s; int tidk = (24*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } //__syncthreads(); //sync the threads s = (BLOCK_SIZE)/128; //2 k = 0; #pragma unroll for (uint32_t j=0; j<24; ++j) { int ss = j*s; int tidk = (24*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } //__syncthreads(); //sync the threads s = (BLOCK_SIZE)/256; //1 k = 0; #pragma unroll for (uint32_t j=0; j<24; ++j) { int tidk = (24*k+j)*BLOCK_SIZE+tid-j; if(tid==j) mu[tidk] += mu[tidk + s]; } if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]); } } template<class T> __global__ void reduction_new(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } bool exit=false; int tpr = BLOCK_SIZE/(4*6); // threads per row //reduction..... __syncthreads(); //sync the threads #pragma unroll for(int r=0; r<4*6; ++r) { if (r*tpr <= tid && tid < (r+1)*tpr) { int tidr = tid - r*tpr; // id in row int offset = r*BLOCK_SIZE+tidr; //dbg[id] = offset; #pragma unroll for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) { int expr = s/tpr; // executions per row //dbg[id] = expr; for (int ex=0; ex<expr; ++ex) mu[offset+ex*tpr] += mu[offset+ex*tpr+s]; int exprem = s%tpr; // remaining executions if (tidr <exprem) mu[offset+expr*tpr] += mu[offset+expr*tpr+s]; __syncthreads(); if(s==BLOCK_SIZE/4) { exit=true; break; } } } if(exit) break; } //dbg[id] = mu[id+BLOCK_SIZE*3]; //dbg[id] =tid; if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]); } } extern void reduction(float *h_mu, float *d_mu, float *h_dbg, float *d_dbg, int selection) { for(uint32_t i=0; i<4*6; ++i) h_mu[i] =float(0.0f); for(uint32_t i=0; i<256; ++i) h_dbg[i] =float(0.0f); checkCudaErrors(cudaMemcpy(d_mu, h_mu, 6*4* sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_dbg, h_dbg, 256* sizeof(float), cudaMemcpyHostToDevice)); dim3 threads(16,16,1); //dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1); dim3 blocks(1,1,1); if(selection == 0){ reduction_old<float><<<blocks,threads>>>(d_mu,d_dbg); }else if(selection == 1) { reduction_new<float><<<blocks,threads>>>(d_mu,d_dbg); }else if (selection ==2) reduction_newNew<float><<<blocks,threads>>>(d_mu,d_dbg); else if (selection ==3) reduction_oldOptimized<256,float><<<blocks,threads>>>(d_mu,d_dbg); else if (selection ==4) reduction_oldOptimizedMemLayout<256,float><<<blocks,threads>>>(d_mu,d_dbg); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(h_mu, d_mu, 6*4*sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_dbg, d_dbg, 256*sizeof(float), cudaMemcpyDeviceToHost)); } extern void reduction(double *h_mu, double *d_mu, double *h_dbg, double *d_dbg, int selection) { for(uint32_t i=0; i<4*6; ++i) h_mu[i] =double(0.0f); for(uint32_t i=0; i<256; ++i) h_dbg[i] =double(0.0f); checkCudaErrors(cudaMemcpy(d_mu, h_mu, 6*4* sizeof(double), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_dbg, h_dbg, 256* sizeof(double), cudaMemcpyHostToDevice)); dim3 threads(16,16,1); //dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1); dim3 blocks(1,1,1); if(selection == 0){ reduction_old<double><<<blocks,threads>>>(d_mu,d_dbg); }else if(selection == 1) { reduction_new<double><<<blocks,threads>>>(d_mu,d_dbg); }else if (selection ==2) reduction_newNew<double><<<blocks,threads>>>(d_mu,d_dbg); else if (selection ==3) reduction_oldOptimized<256,double><<<blocks,threads>>>(d_mu,d_dbg); else if (selection ==4) reduction_oldOptimizedMemLayout<256,double><<<blocks,threads>>>(d_mu,d_dbg); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(h_mu, d_mu, 6*4*sizeof(double), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_dbg, d_dbg, 256*sizeof(double), cudaMemcpyDeviceToHost)); }
2c28654cd6019ac30b50aac1d2dadb5e40be5207.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void combineIntArr(int* X, int y) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { int update = X[i] + y * X[i] atomicAdd(&y, update); } } __global__ void dotProductMultiplicationPart(float* A, float* B, float* C,int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { C[i] = A[i] * B[i]; } } #define BLOCK_SIZE 512 //size = number of elements __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ __shared__ float in_s[BLOCK_SIZE]; int idx = 2 * blockIdx.x * blockDim.x + threadIdx.x; in_s[threadIdx.x] = ((idx < size)? in[idx]: 0.0f) + ((idx + BLOCK_SIZE < size)? in[idx+BLOCK_SIZE]: 0.0f); for(int stride = BLOCK_SIZE / 2; stride >= 1; stride = stride / 2) { __syncthreads(); if(threadIdx.x < stride) in_s[threadIdx.x] += in_s[threadIdx.x + stride]; } if(threadIdx.x == 0) out[blockIdx.x] = in_s[0]; }
2c28654cd6019ac30b50aac1d2dadb5e40be5207.cu
__global__ void combineIntArr(int* X, int y) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { int update = X[i] + y * X[i] atomicAdd(&y, update); } } __global__ void dotProductMultiplicationPart(float* A, float* B, float* C,int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { C[i] = A[i] * B[i]; } } #define BLOCK_SIZE 512 //size = number of elements __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ __shared__ float in_s[BLOCK_SIZE]; int idx = 2 * blockIdx.x * blockDim.x + threadIdx.x; in_s[threadIdx.x] = ((idx < size)? in[idx]: 0.0f) + ((idx + BLOCK_SIZE < size)? in[idx+BLOCK_SIZE]: 0.0f); for(int stride = BLOCK_SIZE / 2; stride >= 1; stride = stride / 2) { __syncthreads(); if(threadIdx.x < stride) in_s[threadIdx.x] += in_s[threadIdx.x + stride]; } if(threadIdx.x == 0) out[blockIdx.x] = in_s[0]; }