hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
711e79a86f6651f728d7285bcf8a30740b6ec287.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <stdlib.h> #include <cusparse_v2.h> #include <hip/hip_runtime.h> /* * This is an example demonstrating usage of the cuSPARSE library to perform a * sparse matrix-vector multiplication on randomly generated data. */ /* * M = # of rows * N = # of columns */ int M = 1024; int N = 1024; /* * Generate a vector of length N with random single-precision floating-point * values between 0 and 100. */ void generate_random_vector(int N, float **outX) { int i; double rMax = (double)RAND_MAX; float *X = (float *)malloc(sizeof(float) * N); for (i = 0; i < N; i++) { int r = rand(); double dr = (double)r; X[i] = (dr / rMax) * 100.0; } *outX = X; } /* * Generate random dense matrix A in column-major order, while rounding some * elements down to zero to ensure it is sparse. */ int generate_random_dense_matrix(int M, int N, float **outA) { int i, j; double rMax = (double)RAND_MAX; float *A = (float *)malloc(sizeof(float) * M * N); int totalNnz = 0; for (j = 0; j < N; j++) { for (i = 0; i < M; i++) { int r = rand(); float *curr = A + (j * M + i); if (r % 3 > 0) { *curr = 0.0f; } else { double dr = (double)r; *curr = (dr / rMax) * 100.0; } if (*curr != 0.0f) { totalNnz++; } } } *outA = A; return totalNnz; } int main(int argc, char **argv) { int row; float *A, *dA; int *dNnzPerRow; float *dCsrValA; int *dCsrRowPtrA; int *dCsrColIndA; int totalNnz; float alpha = 3.0f; float beta = 4.0f; float *dX, *X; float *dY, *Y; hipsparseHandle_t handle = 0; hipsparseMatDescr_t descr = 0; // Generate input srand(9384); int trueNnz = generate_random_dense_matrix(M, N, &A); generate_random_vector(N, &X); generate_random_vector(M, &Y); // Create the cuSPARSE handle CHECK_CUSPARSE(hipsparseCreate(&handle)); // Allocate device memory for vectors and the dense form of the matrix A CHECK(hipMalloc((void **)&dX, sizeof(float) * N)); CHECK(hipMalloc((void **)&dY, sizeof(float) * M)); CHECK(hipMalloc((void **)&dA, sizeof(float) * M * N)); CHECK(hipMalloc((void **)&dNnzPerRow, sizeof(int) * M)); // Construct a descriptor of the matrix A CHECK_CUSPARSE(hipsparseCreateMatDescr(&descr)); CHECK_CUSPARSE(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); CHECK_CUSPARSE(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO)); // Transfer the input vectors and dense matrix A to the device CHECK(hipMemcpy(dX, X, sizeof(float) * N, hipMemcpyHostToDevice)); CHECK(hipMemcpy(dY, Y, sizeof(float) * M, hipMemcpyHostToDevice)); CHECK(hipMemcpy(dA, A, sizeof(float) * M * N, hipMemcpyHostToDevice)); // Compute the number of non-zero elements in A CHECK_CUSPARSE(hipsparseSnnz(handle, HIPSPARSE_DIRECTION_ROW, M, N, descr, dA, M, dNnzPerRow, &totalNnz)); if (totalNnz != trueNnz) { fprintf(stderr, "Difference detected between cuSPARSE NNZ and true " "value: expected %d but got %d\n", trueNnz, totalNnz); return 1; } // Allocate device memory to store the sparse CSR representation of A CHECK(hipMalloc((void **)&dCsrValA, sizeof(float) * totalNnz)); CHECK(hipMalloc((void **)&dCsrRowPtrA, sizeof(int) * (M + 1))); CHECK(hipMalloc((void **)&dCsrColIndA, sizeof(int) * totalNnz)); // Convert A from a dense formatting to a CSR formatting, using the GPU CHECK_CUSPARSE(hipsparseSdense2csr(handle, M, N, descr, dA, M, dNnzPerRow, dCsrValA, dCsrRowPtrA, dCsrColIndA)); // Perform matrix-vector multiplication with the CSR-formatted matrix A // CHECK_CUSPARSE(hipsparseScsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, // M, N, totalNnz, &alpha, descr, dCsrValA, // dCsrRowPtrA, dCsrColIndA, dX, &beta, dY)); // Copy the result vector back to the host CHECK(hipMemcpy(Y, dY, sizeof(float) * M, hipMemcpyDeviceToHost)); for (row = 0; row < 10; row++) { printf("%2.2f\n", Y[row]); } printf("...\n"); free(A); free(X); free(Y); CHECK(hipFree(dX)); CHECK(hipFree(dY)); CHECK(hipFree(dA)); CHECK(hipFree(dNnzPerRow)); CHECK(hipFree(dCsrValA)); CHECK(hipFree(dCsrRowPtrA)); CHECK(hipFree(dCsrColIndA)); CHECK_CUSPARSE(hipsparseDestroyMatDescr(descr)); CHECK_CUSPARSE(hipsparseDestroy(handle)); return 0; }
711e79a86f6651f728d7285bcf8a30740b6ec287.cu
#include "../common/common.h" #include <stdio.h> #include <stdlib.h> #include <cusparse_v2.h> #include <cuda.h> /* * This is an example demonstrating usage of the cuSPARSE library to perform a * sparse matrix-vector multiplication on randomly generated data. */ /* * M = # of rows * N = # of columns */ int M = 1024; int N = 1024; /* * Generate a vector of length N with random single-precision floating-point * values between 0 and 100. */ void generate_random_vector(int N, float **outX) { int i; double rMax = (double)RAND_MAX; float *X = (float *)malloc(sizeof(float) * N); for (i = 0; i < N; i++) { int r = rand(); double dr = (double)r; X[i] = (dr / rMax) * 100.0; } *outX = X; } /* * Generate random dense matrix A in column-major order, while rounding some * elements down to zero to ensure it is sparse. */ int generate_random_dense_matrix(int M, int N, float **outA) { int i, j; double rMax = (double)RAND_MAX; float *A = (float *)malloc(sizeof(float) * M * N); int totalNnz = 0; for (j = 0; j < N; j++) { for (i = 0; i < M; i++) { int r = rand(); float *curr = A + (j * M + i); if (r % 3 > 0) { *curr = 0.0f; } else { double dr = (double)r; *curr = (dr / rMax) * 100.0; } if (*curr != 0.0f) { totalNnz++; } } } *outA = A; return totalNnz; } int main(int argc, char **argv) { int row; float *A, *dA; int *dNnzPerRow; float *dCsrValA; int *dCsrRowPtrA; int *dCsrColIndA; int totalNnz; float alpha = 3.0f; float beta = 4.0f; float *dX, *X; float *dY, *Y; cusparseHandle_t handle = 0; cusparseMatDescr_t descr = 0; // Generate input srand(9384); int trueNnz = generate_random_dense_matrix(M, N, &A); generate_random_vector(N, &X); generate_random_vector(M, &Y); // Create the cuSPARSE handle CHECK_CUSPARSE(cusparseCreate(&handle)); // Allocate device memory for vectors and the dense form of the matrix A CHECK(cudaMalloc((void **)&dX, sizeof(float) * N)); CHECK(cudaMalloc((void **)&dY, sizeof(float) * M)); CHECK(cudaMalloc((void **)&dA, sizeof(float) * M * N)); CHECK(cudaMalloc((void **)&dNnzPerRow, sizeof(int) * M)); // Construct a descriptor of the matrix A CHECK_CUSPARSE(cusparseCreateMatDescr(&descr)); CHECK_CUSPARSE(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL)); CHECK_CUSPARSE(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO)); // Transfer the input vectors and dense matrix A to the device CHECK(cudaMemcpy(dX, X, sizeof(float) * N, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(dY, Y, sizeof(float) * M, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(dA, A, sizeof(float) * M * N, cudaMemcpyHostToDevice)); // Compute the number of non-zero elements in A CHECK_CUSPARSE(cusparseSnnz(handle, CUSPARSE_DIRECTION_ROW, M, N, descr, dA, M, dNnzPerRow, &totalNnz)); if (totalNnz != trueNnz) { fprintf(stderr, "Difference detected between cuSPARSE NNZ and true " "value: expected %d but got %d\n", trueNnz, totalNnz); return 1; } // Allocate device memory to store the sparse CSR representation of A CHECK(cudaMalloc((void **)&dCsrValA, sizeof(float) * totalNnz)); CHECK(cudaMalloc((void **)&dCsrRowPtrA, sizeof(int) * (M + 1))); CHECK(cudaMalloc((void **)&dCsrColIndA, sizeof(int) * totalNnz)); // Convert A from a dense formatting to a CSR formatting, using the GPU CHECK_CUSPARSE(cusparseSdense2csr(handle, M, N, descr, dA, M, dNnzPerRow, dCsrValA, dCsrRowPtrA, dCsrColIndA)); // Perform matrix-vector multiplication with the CSR-formatted matrix A // CHECK_CUSPARSE(cusparseScsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, // M, N, totalNnz, &alpha, descr, dCsrValA, // dCsrRowPtrA, dCsrColIndA, dX, &beta, dY)); // Copy the result vector back to the host CHECK(cudaMemcpy(Y, dY, sizeof(float) * M, cudaMemcpyDeviceToHost)); for (row = 0; row < 10; row++) { printf("%2.2f\n", Y[row]); } printf("...\n"); free(A); free(X); free(Y); CHECK(cudaFree(dX)); CHECK(cudaFree(dY)); CHECK(cudaFree(dA)); CHECK(cudaFree(dNnzPerRow)); CHECK(cudaFree(dCsrValA)); CHECK(cudaFree(dCsrRowPtrA)); CHECK(cudaFree(dCsrColIndA)); CHECK_CUSPARSE(cusparseDestroyMatDescr(descr)); CHECK_CUSPARSE(cusparseDestroy(handle)); return 0; }
aa5dfd10047cec2d41b95a0bbc50efb71525410e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../shared/timer.hpp" #include "../shared/tigr_utilities.hpp" #include "../shared/graph.hpp" #include "../shared/virtual_graph.hpp" #include "../shared/globals.hpp" #include "../shared/argument_parsing.hpp" #include "../shared/gpu_error_check.cuh" __global__ void kernel(unsigned int numParts, unsigned int *nodePointer, PartPointer *partNodePointer, unsigned int *edgeList, unsigned int *dist, bool *finished, int level) { unsigned int partId = blockDim.x * blockIdx.x + threadIdx.x; if (partId < numParts) { unsigned int id = partNodePointer[partId].node; unsigned int part = partNodePointer[partId].part; if (dist[id] != level) return; unsigned int thisPointer = nodePointer[id]; unsigned int degree = edgeList[thisPointer]; unsigned int numParts; if (degree % Part_Size == 0) numParts = degree / Part_Size; else numParts = degree / Part_Size + 1; unsigned int end; unsigned int ofs = thisPointer + part + 1; for (int i = 0; i < Part_Size; i++) { if (part + i * numParts >= degree) break; end = ofs + i * numParts; if (dist[edgeList[end]] > level + 1) { dist[edgeList[end]] = level + 1; *finished = false; } } } } __global__ void clearLabel(bool *label, unsigned int size) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if (id < size) label[id] = false; } int main(int argc, char **argv) { ArgumentParser arguments(argc, argv, true, false); Graph graph(arguments.input, false); graph.ReadGraph(); VirtualGraph vGraph(graph); Timer t22; t22.Start(); vGraph.MakeUGraph(); cout << "Pre-processing time in " << t22.Finish() << " (ms).\n"; uint num_nodes = graph.num_nodes; uint num_edges = graph.num_edges; if (arguments.hasDeviceID) hipSetDevice(arguments.deviceID); hipFree(0); unsigned int *dist; dist = new unsigned int[num_nodes]; for (int i = 0; i < num_nodes; i++) { dist[i] = DIST_INFINITY; } dist[arguments.sourceNode] = 0; unsigned int *d_nodePointer; unsigned int *d_edgeList; unsigned int *d_dist; PartPointer *d_partNodePointer; bool finished; bool *d_finished; gpuErrorcheck(hipMalloc(&d_dist, num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipMemcpy(d_dist, dist, num_nodes * sizeof(unsigned int), hipMemcpyHostToDevice)); Timer t3; t3.Start(); gpuErrorcheck(hipMallocManaged(&d_nodePointer, num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipMallocManaged(&d_edgeList, (num_edges + num_nodes) * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool))); gpuErrorcheck(hipMallocManaged(&d_partNodePointer, vGraph.numParts * sizeof(PartPointer))); gpuErrorcheck(hipMemAdvise(d_nodePointer, num_nodes * sizeof(uint), hipMemAdviseSetPreferredLocation, arguments.deviceID)); gpuErrorcheck(hipMemAdvise(d_edgeList, (num_edges + num_nodes) * sizeof(uint), hipMemAdviseSetPreferredLocation, arguments.deviceID)); gpuErrorcheck(hipMemAdvise(d_partNodePointer, vGraph.numParts * sizeof(PartPointer), hipMemAdviseSetPreferredLocation, arguments.deviceID)); memcpy(d_nodePointer, vGraph.nodePointer, num_nodes * sizeof(unsigned int)); memcpy(d_edgeList, vGraph.edgeList, (num_edges + num_nodes) * sizeof(unsigned int)); memcpy(d_partNodePointer, vGraph.partNodePointer, vGraph.numParts * sizeof(PartPointer)); Timer t; t.Start(); int itr = 0; int level = 0; do { itr++; finished = true; gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice)); if (itr % 2 == 1) { hipLaunchKernelGGL(( kernel), dim3(vGraph.numParts / 512 + 1), dim3(512), 0, 0, vGraph.numParts, d_nodePointer, d_partNodePointer, d_edgeList, d_dist, d_finished, level); } else { hipLaunchKernelGGL(( kernel), dim3(vGraph.numParts / 512 + 1), dim3(512), 0, 0, vGraph.numParts, d_nodePointer, d_partNodePointer, d_edgeList, d_dist, d_finished, level); } gpuErrorcheck(hipPeekAtLastError()); gpuErrorcheck(hipDeviceSynchronize()); gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost)); level++; } while (!(finished)); cout << "Number of iterations = " << itr << endl; float runtime = t.Finish(); cout << "Processing finished in " << runtime << " (ms).\n"; cout << "Total time in " << t3.Finish() << " (ms).\n"; gpuErrorcheck(hipMemcpy(dist, d_dist, num_nodes * sizeof(unsigned int), hipMemcpyDeviceToHost)); utilities::PrintResults(dist, 30); if (arguments.hasOutput) utilities::SaveResults(arguments.output, dist, num_nodes); gpuErrorcheck(hipFree(d_nodePointer)); gpuErrorcheck(hipFree(d_edgeList)); gpuErrorcheck(hipFree(d_dist)); gpuErrorcheck(hipFree(d_finished)); gpuErrorcheck(hipFree(d_partNodePointer)); }
aa5dfd10047cec2d41b95a0bbc50efb71525410e.cu
#include "../shared/timer.hpp" #include "../shared/tigr_utilities.hpp" #include "../shared/graph.hpp" #include "../shared/virtual_graph.hpp" #include "../shared/globals.hpp" #include "../shared/argument_parsing.hpp" #include "../shared/gpu_error_check.cuh" __global__ void kernel(unsigned int numParts, unsigned int *nodePointer, PartPointer *partNodePointer, unsigned int *edgeList, unsigned int *dist, bool *finished, int level) { unsigned int partId = blockDim.x * blockIdx.x + threadIdx.x; if (partId < numParts) { unsigned int id = partNodePointer[partId].node; unsigned int part = partNodePointer[partId].part; if (dist[id] != level) return; unsigned int thisPointer = nodePointer[id]; unsigned int degree = edgeList[thisPointer]; unsigned int numParts; if (degree % Part_Size == 0) numParts = degree / Part_Size; else numParts = degree / Part_Size + 1; unsigned int end; unsigned int ofs = thisPointer + part + 1; for (int i = 0; i < Part_Size; i++) { if (part + i * numParts >= degree) break; end = ofs + i * numParts; if (dist[edgeList[end]] > level + 1) { dist[edgeList[end]] = level + 1; *finished = false; } } } } __global__ void clearLabel(bool *label, unsigned int size) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if (id < size) label[id] = false; } int main(int argc, char **argv) { ArgumentParser arguments(argc, argv, true, false); Graph graph(arguments.input, false); graph.ReadGraph(); VirtualGraph vGraph(graph); Timer t22; t22.Start(); vGraph.MakeUGraph(); cout << "Pre-processing time in " << t22.Finish() << " (ms).\n"; uint num_nodes = graph.num_nodes; uint num_edges = graph.num_edges; if (arguments.hasDeviceID) cudaSetDevice(arguments.deviceID); cudaFree(0); unsigned int *dist; dist = new unsigned int[num_nodes]; for (int i = 0; i < num_nodes; i++) { dist[i] = DIST_INFINITY; } dist[arguments.sourceNode] = 0; unsigned int *d_nodePointer; unsigned int *d_edgeList; unsigned int *d_dist; PartPointer *d_partNodePointer; bool finished; bool *d_finished; gpuErrorcheck(cudaMalloc(&d_dist, num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMemcpy(d_dist, dist, num_nodes * sizeof(unsigned int), cudaMemcpyHostToDevice)); Timer t3; t3.Start(); gpuErrorcheck(cudaMallocManaged(&d_nodePointer, num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMallocManaged(&d_edgeList, (num_edges + num_nodes) * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool))); gpuErrorcheck(cudaMallocManaged(&d_partNodePointer, vGraph.numParts * sizeof(PartPointer))); gpuErrorcheck(cudaMemAdvise(d_nodePointer, num_nodes * sizeof(uint), cudaMemAdviseSetPreferredLocation, arguments.deviceID)); gpuErrorcheck(cudaMemAdvise(d_edgeList, (num_edges + num_nodes) * sizeof(uint), cudaMemAdviseSetPreferredLocation, arguments.deviceID)); gpuErrorcheck(cudaMemAdvise(d_partNodePointer, vGraph.numParts * sizeof(PartPointer), cudaMemAdviseSetPreferredLocation, arguments.deviceID)); memcpy(d_nodePointer, vGraph.nodePointer, num_nodes * sizeof(unsigned int)); memcpy(d_edgeList, vGraph.edgeList, (num_edges + num_nodes) * sizeof(unsigned int)); memcpy(d_partNodePointer, vGraph.partNodePointer, vGraph.numParts * sizeof(PartPointer)); Timer t; t.Start(); int itr = 0; int level = 0; do { itr++; finished = true; gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice)); if (itr % 2 == 1) { kernel<<<vGraph.numParts / 512 + 1, 512>>>(vGraph.numParts, d_nodePointer, d_partNodePointer, d_edgeList, d_dist, d_finished, level); } else { kernel<<<vGraph.numParts / 512 + 1, 512>>>(vGraph.numParts, d_nodePointer, d_partNodePointer, d_edgeList, d_dist, d_finished, level); } gpuErrorcheck(cudaPeekAtLastError()); gpuErrorcheck(cudaDeviceSynchronize()); gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost)); level++; } while (!(finished)); cout << "Number of iterations = " << itr << endl; float runtime = t.Finish(); cout << "Processing finished in " << runtime << " (ms).\n"; cout << "Total time in " << t3.Finish() << " (ms).\n"; gpuErrorcheck(cudaMemcpy(dist, d_dist, num_nodes * sizeof(unsigned int), cudaMemcpyDeviceToHost)); utilities::PrintResults(dist, 30); if (arguments.hasOutput) utilities::SaveResults(arguments.output, dist, num_nodes); gpuErrorcheck(cudaFree(d_nodePointer)); gpuErrorcheck(cudaFree(d_edgeList)); gpuErrorcheck(cudaFree(d_dist)); gpuErrorcheck(cudaFree(d_finished)); gpuErrorcheck(cudaFree(d_partNodePointer)); }
85fa1f4eeed95e0cffdfea92516d8080e44ac83c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "IPSet.cuh" #include <fstream> #include <string> #include <vector> #include <thrust/sequence.h> #include <thrust/execution_policy.h> #include "device_launch_parameters.h" IPSet& IPSet::operator=(const IPSet& other) { if (this == &other) return *this; Size = other.Size; Setup = other.Setup; GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); GpuAssert(hipMemcpy(d_IPs, other.d_IPs, Size * sizeof(unsigned int), hipMemcpyDeviceToDevice), "Cannot copy IPs to device memory in = operator."); GpuAssert(hipMemcpy(d_Lenghts, other.d_Lenghts, Size * sizeof(int), hipMemcpyDeviceToDevice), "Cannot copy Lenghts to device memory in = operator."); return *this; } IPSet::IPSet(const IPSet& other) { if (this == &other) return; Size = other.Size; Setup = other.Setup; GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); GpuAssert(hipMemcpy(d_IPs, other.d_IPs, Size * sizeof(unsigned int), hipMemcpyDeviceToDevice), "Cannot copy IPs to device memory in copy operator."); GpuAssert(hipMemcpy(d_Lenghts, other.d_Lenghts, Size * sizeof(int), hipMemcpyDeviceToDevice), "Cannot copy Lenghts to device memory in copy operator."); } void IPSet::Dispose() { if(d_IPs != NULL) { GpuAssert(hipFree(d_IPs), "Cannot free IPs memory in IPSet destructor"); GpuAssert(hipFree(d_Lenghts), "Cannot free Lenghts memory in IPSet destructor"); d_IPs = NULL; d_Lenghts = NULL; } } __global__ void BuildIPs(unsigned char * ipData, unsigned int * ips, int *lenghts, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned char b1, b2, b3, b4; while (i < size) { b1 = ipData[i * 5]; b2 = ipData[i * 5 + 1]; b3 = ipData[i * 5 + 2]; b4 = ipData[i * 5 + 3]; ips[i] = (b1 << 24) + (b2 << 16) + (b3 << 8) + b4; lenghts[i] = ipData[i * 5 + 4]; i += blockDim.x * gridDim.x; } } void IPSet::Load(GpuSetup &setup, string path, int count) { Setup = setup; ifstream file(path); string line; string delims = ";."; vector<string> parts; int pos; int iteration = 0; while (!file.eof() && iteration < count) { file >> line; line = line.substr(4, line.size()); pos = line.find("."); parts.push_back(line.substr(0, pos)); line = line.substr(pos+1, line.size()); pos = line.find("."); parts.push_back(line.substr(0, pos)); line = line.substr(pos+1, line.size()); pos = line.find("."); parts.push_back(line.substr(0, pos)); line = line.substr(pos+1, line.size()); pos = line.find(";"); parts.push_back(line.substr(0, pos)); line = line.substr(pos+1, line.size()); parts.push_back(line); ++iteration; } file.close(); Size = parts.size() / 5; unsigned char *IPData = new unsigned char[Size * 5]; for(int i = 0; i < Size * 5; ++i) IPData[i] = static_cast<unsigned char>(stoi(parts[i])); unsigned char *d_IPData; GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_IPData), 5 * Size * sizeof(unsigned char)), "Cannot init ip masks device memory."); GpuAssert(hipMemcpy(d_IPData, IPData, 5 * Size * sizeof(unsigned char), hipMemcpyHostToDevice), "Cannot copy ip masks to device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); BuildIPs << < Setup.Blocks, Setup.Threads >> > (d_IPData, d_IPs, d_Lenghts, Size); GpuAssert(hipPeekAtLastError(), "Error while launching BuildIPs kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running BuildIPs kernel"); delete[] IPData; GpuAssert(hipFree(d_IPData), "Cannot free d_IPData in Load"); } void IPSet::Generate(GpuSetup& setup, int count) { Setup = setup; Size = count; unsigned char *IPData = new unsigned char[Size * 5]; int maskLenght; int mask; for (int i = 0; i < Size; ++i) { if ((rand() % 100) <= 60) maskLenght = 24; else if ((rand() % 100) <= 25) maskLenght = 16; else if ((rand() % 100) <= 33) maskLenght = 8; else maskLenght = rand() % 32; mask = (rand() | (rand() << 16)) << (32 - maskLenght); IPData[i * 5] = (mask >> 24) & 0xFF; IPData[i * 5 + 1] = (mask >> 16) & 0xFF; IPData[i * 5 + 2] = (mask >> 8) & 0xFF; IPData[i * 5 + 3] = mask & 0xFF; IPData[i * 5 + 4] = maskLenght; } unsigned char *d_IPData; GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_IPData), 5 * Size * sizeof(unsigned char)), "Cannot init ip masks device memory."); GpuAssert(hipMemcpy(d_IPData, IPData, 5 * Size * sizeof(unsigned char), hipMemcpyHostToDevice), "Cannot copy ip masks to device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); BuildIPs << < Setup.Blocks, Setup.Threads >> > (d_IPData, d_IPs, d_Lenghts, Size); GpuAssert(hipPeekAtLastError(), "Error while launching BuildIPs kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running BuildIPs kernel"); delete[] IPData; GpuAssert(hipFree(d_IPData), "Cannot free d_IPData in Load"); } __global__ void CopyIPsToSubset(int *indexes, int subsetSize, unsigned int *IPs, int *Lenghts, unsigned int *sourceSetIPs, int * sourceSetLenghts) { int iteration = blockIdx.x * blockDim.x + threadIdx.x; while(iteration < subsetSize) { int sourceInd = indexes[iteration]; IPs[iteration] = sourceSetIPs[sourceInd]; Lenghts[iteration] = sourceSetLenghts[sourceInd]; iteration += blockDim.x * gridDim.x; } } void IPSet::RandomSubset(int subsetSize, IPSet& sourceSet) { Setup = sourceSet.Setup; Size = subsetSize; int *d_Indexes; int *d_RandomValues; GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); int maxSize = subsetSize > sourceSet.Size ? subsetSize : sourceSet.Size; GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_Indexes), maxSize * sizeof(int)), "Cannot init indexes device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_RandomValues), maxSize * sizeof(int)), "Cannot init random values device memory."); thrust::sequence(thrust::device, d_Indexes, d_Indexes + sourceSet.Size, 0); thrust::generate_n(thrust::device, d_Indexes + sourceSet.Size, maxSize - sourceSet.Size, Rnd(0, sourceSet.Size)); thrust::generate_n(thrust::device, d_RandomValues, maxSize, Rnd(0, maxSize)); thrust::stable_sort_by_key(thrust::device, d_RandomValues, d_RandomValues + maxSize, d_Indexes); thrust::sort(thrust::device, d_Indexes, d_Indexes + Size); CopyIPsToSubset << < Setup.Blocks, Setup.Threads >> > (d_Indexes, Size, d_IPs, d_Lenghts, sourceSet.d_IPs, sourceSet.d_Lenghts); GpuAssert(hipPeekAtLastError(), "Error while launching CopyIPsToSubset kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running CopyIPsToSubset kernel"); GpuAssert(hipFree(d_Indexes), "Cannot free indexes memory."); GpuAssert(hipFree(d_RandomValues), "Cannot free random values memory."); } void IPSet::Sort() { thrust::sort_by_key(thrust::device, d_IPs, d_IPs + Size, d_Lenghts); } void IPSet::Randomize() { int *d_Indexes; int *d_RandomValues; GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_Indexes), Size * sizeof(int)), "Cannot init indexes device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&d_RandomValues), Size * sizeof(int)), "Cannot init random values device memory."); thrust::sequence(thrust::device, d_Indexes, d_Indexes + Size, 0); thrust::generate_n(thrust::device, d_RandomValues, Size, Rnd(0, Size)); unsigned int *new_IPs; int *new_Lenghts; GpuAssert(hipMalloc(reinterpret_cast<void**>(&new_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&new_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); CopyIPsToSubset << < Setup.Blocks, Setup.Threads >> > (d_Indexes, Size, new_IPs, new_Lenghts, d_IPs, d_Lenghts); GpuAssert(hipPeekAtLastError(), "Error while launching CopyIPsToSubset kernel"); GpuAssert(hipDeviceSynchronize(), "Error while running CopyIPsToSubset kernel"); GpuAssert(hipFree(d_IPs), "Cannot free IPs memory."); GpuAssert(hipFree(d_Lenghts), "Cannot free Lenghts values memory."); d_IPs = new_IPs; d_Lenghts = new_Lenghts; GpuAssert(hipFree(d_Indexes), "Cannot free indexes memory."); GpuAssert(hipFree(d_RandomValues), "Cannot free random values memory."); } IPSet operator+(IPSet& l, IPSet& r) { IPSet set; if (l.Setup.DeviceID != r.Setup.DeviceID) throw runtime_error("Cannot add set from different devices"); set.Size = l.Size + r.Size; set.Setup = l.Setup; GpuAssert(hipMalloc(reinterpret_cast<void**>(&set.d_IPs), set.Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(hipMalloc(reinterpret_cast<void**>(&set.d_Lenghts), set.Size * sizeof(int)), "Cannot init Lenghts device memory."); GpuAssert(hipMemcpy(set.d_IPs, l.d_IPs, l.Size * sizeof(unsigned int), hipMemcpyDeviceToDevice), "Cannot copy IPs to device memory in + operator."); GpuAssert(hipMemcpy(set.d_Lenghts, l.d_Lenghts, l.Size * sizeof(int), hipMemcpyDeviceToDevice), "Cannot copy Lenghts to device memory in + operator."); GpuAssert(hipMemcpy(set.d_IPs + l.Size, r.d_IPs, r.Size * sizeof(unsigned int), hipMemcpyDeviceToDevice), "Cannot copy IPs to device memory in + operator."); GpuAssert(hipMemcpy(set.d_Lenghts + l.Size, r.d_Lenghts, r.Size * sizeof(int), hipMemcpyDeviceToDevice), "Cannot copy Lenghts to device memory in + operator."); return set; }
85fa1f4eeed95e0cffdfea92516d8080e44ac83c.cu
#include "IPSet.cuh" #include <fstream> #include <string> #include <vector> #include <thrust/sequence.h> #include <thrust/execution_policy.h> #include "device_launch_parameters.h" IPSet& IPSet::operator=(const IPSet& other) { if (this == &other) return *this; Size = other.Size; Setup = other.Setup; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); GpuAssert(cudaMemcpy(d_IPs, other.d_IPs, Size * sizeof(unsigned int), cudaMemcpyDeviceToDevice), "Cannot copy IPs to device memory in = operator."); GpuAssert(cudaMemcpy(d_Lenghts, other.d_Lenghts, Size * sizeof(int), cudaMemcpyDeviceToDevice), "Cannot copy Lenghts to device memory in = operator."); return *this; } IPSet::IPSet(const IPSet& other) { if (this == &other) return; Size = other.Size; Setup = other.Setup; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); GpuAssert(cudaMemcpy(d_IPs, other.d_IPs, Size * sizeof(unsigned int), cudaMemcpyDeviceToDevice), "Cannot copy IPs to device memory in copy operator."); GpuAssert(cudaMemcpy(d_Lenghts, other.d_Lenghts, Size * sizeof(int), cudaMemcpyDeviceToDevice), "Cannot copy Lenghts to device memory in copy operator."); } void IPSet::Dispose() { if(d_IPs != NULL) { GpuAssert(cudaFree(d_IPs), "Cannot free IPs memory in IPSet destructor"); GpuAssert(cudaFree(d_Lenghts), "Cannot free Lenghts memory in IPSet destructor"); d_IPs = NULL; d_Lenghts = NULL; } } __global__ void BuildIPs(unsigned char * ipData, unsigned int * ips, int *lenghts, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned char b1, b2, b3, b4; while (i < size) { b1 = ipData[i * 5]; b2 = ipData[i * 5 + 1]; b3 = ipData[i * 5 + 2]; b4 = ipData[i * 5 + 3]; ips[i] = (b1 << 24) + (b2 << 16) + (b3 << 8) + b4; lenghts[i] = ipData[i * 5 + 4]; i += blockDim.x * gridDim.x; } } void IPSet::Load(GpuSetup &setup, string path, int count) { Setup = setup; ifstream file(path); string line; string delims = ";."; vector<string> parts; int pos; int iteration = 0; while (!file.eof() && iteration < count) { file >> line; line = line.substr(4, line.size()); pos = line.find("."); parts.push_back(line.substr(0, pos)); line = line.substr(pos+1, line.size()); pos = line.find("."); parts.push_back(line.substr(0, pos)); line = line.substr(pos+1, line.size()); pos = line.find("."); parts.push_back(line.substr(0, pos)); line = line.substr(pos+1, line.size()); pos = line.find(";"); parts.push_back(line.substr(0, pos)); line = line.substr(pos+1, line.size()); parts.push_back(line); ++iteration; } file.close(); Size = parts.size() / 5; unsigned char *IPData = new unsigned char[Size * 5]; for(int i = 0; i < Size * 5; ++i) IPData[i] = static_cast<unsigned char>(stoi(parts[i])); unsigned char *d_IPData; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_IPData), 5 * Size * sizeof(unsigned char)), "Cannot init ip masks device memory."); GpuAssert(cudaMemcpy(d_IPData, IPData, 5 * Size * sizeof(unsigned char), cudaMemcpyHostToDevice), "Cannot copy ip masks to device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); BuildIPs << < Setup.Blocks, Setup.Threads >> > (d_IPData, d_IPs, d_Lenghts, Size); GpuAssert(cudaPeekAtLastError(), "Error while launching BuildIPs kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running BuildIPs kernel"); delete[] IPData; GpuAssert(cudaFree(d_IPData), "Cannot free d_IPData in Load"); } void IPSet::Generate(GpuSetup& setup, int count) { Setup = setup; Size = count; unsigned char *IPData = new unsigned char[Size * 5]; int maskLenght; int mask; for (int i = 0; i < Size; ++i) { if ((rand() % 100) <= 60) maskLenght = 24; else if ((rand() % 100) <= 25) maskLenght = 16; else if ((rand() % 100) <= 33) maskLenght = 8; else maskLenght = rand() % 32; mask = (rand() | (rand() << 16)) << (32 - maskLenght); IPData[i * 5] = (mask >> 24) & 0xFF; IPData[i * 5 + 1] = (mask >> 16) & 0xFF; IPData[i * 5 + 2] = (mask >> 8) & 0xFF; IPData[i * 5 + 3] = mask & 0xFF; IPData[i * 5 + 4] = maskLenght; } unsigned char *d_IPData; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_IPData), 5 * Size * sizeof(unsigned char)), "Cannot init ip masks device memory."); GpuAssert(cudaMemcpy(d_IPData, IPData, 5 * Size * sizeof(unsigned char), cudaMemcpyHostToDevice), "Cannot copy ip masks to device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); BuildIPs << < Setup.Blocks, Setup.Threads >> > (d_IPData, d_IPs, d_Lenghts, Size); GpuAssert(cudaPeekAtLastError(), "Error while launching BuildIPs kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running BuildIPs kernel"); delete[] IPData; GpuAssert(cudaFree(d_IPData), "Cannot free d_IPData in Load"); } __global__ void CopyIPsToSubset(int *indexes, int subsetSize, unsigned int *IPs, int *Lenghts, unsigned int *sourceSetIPs, int * sourceSetLenghts) { int iteration = blockIdx.x * blockDim.x + threadIdx.x; while(iteration < subsetSize) { int sourceInd = indexes[iteration]; IPs[iteration] = sourceSetIPs[sourceInd]; Lenghts[iteration] = sourceSetLenghts[sourceInd]; iteration += blockDim.x * gridDim.x; } } void IPSet::RandomSubset(int subsetSize, IPSet& sourceSet) { Setup = sourceSet.Setup; Size = subsetSize; int *d_Indexes; int *d_RandomValues; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); int maxSize = subsetSize > sourceSet.Size ? subsetSize : sourceSet.Size; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_Indexes), maxSize * sizeof(int)), "Cannot init indexes device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_RandomValues), maxSize * sizeof(int)), "Cannot init random values device memory."); thrust::sequence(thrust::device, d_Indexes, d_Indexes + sourceSet.Size, 0); thrust::generate_n(thrust::device, d_Indexes + sourceSet.Size, maxSize - sourceSet.Size, Rnd(0, sourceSet.Size)); thrust::generate_n(thrust::device, d_RandomValues, maxSize, Rnd(0, maxSize)); thrust::stable_sort_by_key(thrust::device, d_RandomValues, d_RandomValues + maxSize, d_Indexes); thrust::sort(thrust::device, d_Indexes, d_Indexes + Size); CopyIPsToSubset << < Setup.Blocks, Setup.Threads >> > (d_Indexes, Size, d_IPs, d_Lenghts, sourceSet.d_IPs, sourceSet.d_Lenghts); GpuAssert(cudaPeekAtLastError(), "Error while launching CopyIPsToSubset kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running CopyIPsToSubset kernel"); GpuAssert(cudaFree(d_Indexes), "Cannot free indexes memory."); GpuAssert(cudaFree(d_RandomValues), "Cannot free random values memory."); } void IPSet::Sort() { thrust::sort_by_key(thrust::device, d_IPs, d_IPs + Size, d_Lenghts); } void IPSet::Randomize() { int *d_Indexes; int *d_RandomValues; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_Indexes), Size * sizeof(int)), "Cannot init indexes device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&d_RandomValues), Size * sizeof(int)), "Cannot init random values device memory."); thrust::sequence(thrust::device, d_Indexes, d_Indexes + Size, 0); thrust::generate_n(thrust::device, d_RandomValues, Size, Rnd(0, Size)); unsigned int *new_IPs; int *new_Lenghts; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&new_IPs), Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&new_Lenghts), Size * sizeof(int)), "Cannot init Lenghts device memory."); CopyIPsToSubset << < Setup.Blocks, Setup.Threads >> > (d_Indexes, Size, new_IPs, new_Lenghts, d_IPs, d_Lenghts); GpuAssert(cudaPeekAtLastError(), "Error while launching CopyIPsToSubset kernel"); GpuAssert(cudaDeviceSynchronize(), "Error while running CopyIPsToSubset kernel"); GpuAssert(cudaFree(d_IPs), "Cannot free IPs memory."); GpuAssert(cudaFree(d_Lenghts), "Cannot free Lenghts values memory."); d_IPs = new_IPs; d_Lenghts = new_Lenghts; GpuAssert(cudaFree(d_Indexes), "Cannot free indexes memory."); GpuAssert(cudaFree(d_RandomValues), "Cannot free random values memory."); } IPSet operator+(IPSet& l, IPSet& r) { IPSet set; if (l.Setup.DeviceID != r.Setup.DeviceID) throw runtime_error("Cannot add set from different devices"); set.Size = l.Size + r.Size; set.Setup = l.Setup; GpuAssert(cudaMalloc(reinterpret_cast<void**>(&set.d_IPs), set.Size * sizeof(unsigned int)), "Cannot init IPs device memory."); GpuAssert(cudaMalloc(reinterpret_cast<void**>(&set.d_Lenghts), set.Size * sizeof(int)), "Cannot init Lenghts device memory."); GpuAssert(cudaMemcpy(set.d_IPs, l.d_IPs, l.Size * sizeof(unsigned int), cudaMemcpyDeviceToDevice), "Cannot copy IPs to device memory in + operator."); GpuAssert(cudaMemcpy(set.d_Lenghts, l.d_Lenghts, l.Size * sizeof(int), cudaMemcpyDeviceToDevice), "Cannot copy Lenghts to device memory in + operator."); GpuAssert(cudaMemcpy(set.d_IPs + l.Size, r.d_IPs, r.Size * sizeof(unsigned int), cudaMemcpyDeviceToDevice), "Cannot copy IPs to device memory in + operator."); GpuAssert(cudaMemcpy(set.d_Lenghts + l.Size, r.d_Lenghts, r.Size * sizeof(int), cudaMemcpyDeviceToDevice), "Cannot copy Lenghts to device memory in + operator."); return set; }
32dc83c8d79451aa022b25d6ca2289fd216d79ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHGeneral.h> #include <THH/THHNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int start_index(int a, int b, int c) { return (int)::floor((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)::ceil((float)((a + 1) * c) / b); } // 5d tensor B x D x T x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptivemaxpool( T *input, T *output, int64_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time ramge is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time T *input_dt = input + d*istrideD + istartT*istrideT; // output offset by slice/feature and frame/time T *output_dt = output + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/time int64_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels T *ptr_input = input_dt + istartH*istrideH + istartW*istrideW; T *ptr_output = output_dt + oh*osizeW + ow; int64_t *ptr_ind = indices_dt + oh*osizeW + ow; int64_t argmax = -1; T max = THCNumerics<T>::min(); int it, ih, iw; for(it = 0; it < kT; ++it) { for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { T val = ptr_input[ih*istrideH + iw*istrideW]; if ((val > max) || THCNumerics<T>::isnan(val)) { max = val; argmax = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + iw+istartW; } } } ptr_input += istrideT; // next input frame } // Update output and argmax *ptr_output = max; *ptr_ind = argmax; } } } template <typename scalar_t> void adaptivemaxpool_loop( scalar_t *input_data, scalar_t *output_data, int64_t *indices_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( adaptivemaxpool), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, indices_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ); totalZ -= 65535; offsetZ += 65535; THCudaCheck(hipGetLastError()); } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * Assumes that input size can be perfectly divided by output size, i.e. * each input pixel can only be argmax of one output pixel. */ template <typename T> __global__ void adaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ ) { // iterators on output pixels int oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; int d = o_plane / osizeT; // output slice/feature // gradInput offset by slice/feature T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW; // gradOutput offset by slice/feature and frame/otme T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/otme int64_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { // Compute the gradients for the argmax input pixel T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow; int64_t *ptr_ind = indices_dt + oh*osizeW + ow; T grad_delta = *ptr_gradOutput; int argmax = (*ptr_ind); gradInput_d[argmax] += grad_delta; } } } template <typename scalar_t> void adaptivemaxgradinput_loop( scalar_t *gradInput_data, scalar_t *gradOutput_data, int64_t *indices_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( adaptivemaxgradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); totalZ -= 65535; offsetZ += 65535; THCudaCheck(hipGetLastError()); } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * Uses atomic add. */ template <typename T> __global__ void atomicadaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ ) { // iterators on output pixels int oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; int d = o_plane / osizeT; // output slice/feature // gradInput offset by slice/feature T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW; // gradOutput offset by slice/feature and frame/otme T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/otme int64_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { // Compute the gradients for the argmax input pixel T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow; int64_t *ptr_ind = indices_dt + oh*osizeW + ow; T grad_delta = *ptr_gradOutput; int64_t argmax = (*ptr_ind); atomicAdd(&(gradInput_d[argmax]), grad_delta); } } } template <typename scalar_t> void atomicadaptivemaxgradinput_loop( scalar_t *gradInput_data, scalar_t *gradOutput_data, int64_t *indices_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( atomicadaptivemaxgradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); totalZ -= 65535; offsetZ += 65535; THCudaCheck(hipGetLastError()); } } // 5d tensor B x D x T x H x W void adaptive_max_pool3d_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input_, IntArrayRef output_size) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("adaptive_max_pool3d_cuda", {output_arg, indices_arg, input_arg}); for (int64_t i = 0; i < input_.ndimension(); i++) { AT_CHECK(input_.size(i) > 0, "adaptive_max_pool3d_cuda(): expected input to have non-empty spatial dimensions, " "but input has sizes ", input_.sizes(), " with dimension ", i, " being " "empty"); } AT_CHECK((input_.ndimension() == 4 || input_.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); // Issue #20215: the JIT sometimes passes output_size.size() == 1. AT_CHECK(output_size.size() == 1 || output_size.size() == 3, "adaptive_max_pool3d: internal error: output_size.size() must be 1 or 3"); int64_t osizeT = output_size[0]; int64_t osizeH = output_size.size() == 1 ? output_size[0] : output_size[1]; int64_t osizeW = output_size.size() == 1 ? output_size[0] : output_size[2]; int64_t sizeD, isizeT, isizeH, isizeW; int64_t istrideD, istrideT, istrideH, istrideW; int64_t totalZ; const Tensor& input = input_.ndimension() == 4 ? input_ : input_.contiguous(); if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); istrideD = input.stride(0); istrideT = input.stride(1); istrideH = input.stride(2); istrideW = input.stride(3); output.resize_({sizeD, osizeT, osizeH, osizeW}); indices.resize_({sizeD, osizeT, osizeH, osizeW}); totalZ = sizeD * osizeT; } else { int64_t sizeB = input.size(0); sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); istrideD = input.stride(1); istrideT = input.stride(2); istrideH = input.stride(3); istrideW = input.stride(4); output.resize_({sizeB, sizeD, osizeT, osizeH, osizeW}); indices.resize_({sizeB, sizeD, osizeT, osizeH, osizeW}); totalZ = sizeB * sizeD * osizeT; } AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "adaptive_max_pool3d_cuda", [&] { scalar_t *input_data = input.data<scalar_t>(); scalar_t *output_data = output.data<scalar_t>(); int64_t *indices_data = indices.data<int64_t>(); adaptivemaxpool_loop( input_data, output_data, indices_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW); } ); } void adaptive_max_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { TensorArg grad_input_arg{ gradInput, "gradInput", 1 }; TensorArg grad_output_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("adaptive_max_pool3d_out_cuda", {grad_input_arg, grad_output_arg, input_arg, indices_arg}); const Tensor gradOutput = gradOutput_.contiguous(); gradInput.resize_as_(input); gradInput.zero_(); int64_t sizeD, isizeT, isizeH, isizeW; int64_t osizeT, osizeH, osizeW; int64_t totalZ; if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); osizeT = gradOutput.size(1); osizeH = gradOutput.size(2); osizeW = gradOutput.size(3); } else { sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); osizeT = gradOutput.size(2); osizeH = gradOutput.size(3); osizeW = gradOutput.size(4); } bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0); if (input.ndimension() == 4) { totalZ = sizeD * osizeT; } else { int sizeB = input.size(0); totalZ = sizeB * sizeD * osizeT; } if (atomic) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "adaptive_max_pool3d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data<scalar_t>(); int64_t *indices_data = indices.data<int64_t>(); atomicadaptivemaxgradinput_loop( gradInput_data, gradOutput_data, indices_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "adaptive_max_pool3d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data<scalar_t>(); int64_t *indices_data = indices.data<int64_t>(); adaptivemaxgradinput_loop( gradInput_data, gradOutput_data, indices_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); } ); } } } // namespace std::tuple<Tensor&, Tensor&> adaptive_max_pool3d_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef output_size) { adaptive_max_pool3d_out_cuda_template( output, indices, input, output_size); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> adaptive_max_pool3d_cuda( const Tensor& input, IntArrayRef output_size) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); adaptive_max_pool3d_out_cuda_template( output, indices, input, output_size); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& adaptive_max_pool3d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { adaptive_max_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } Tensor adaptive_max_pool3d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { auto gradInput = at::zeros_like(input); adaptive_max_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } } // at::native } // at
32dc83c8d79451aa022b25d6ca2289fd216d79ed.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCGeneral.h> #include <THC/THCNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int start_index(int a, int b, int c) { return (int)std::floor((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)std::ceil((float)((a + 1) * c) / b); } // 5d tensor B x D x T x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptivemaxpool( T *input, T *output, int64_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time ramge is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time T *input_dt = input + d*istrideD + istartT*istrideT; // output offset by slice/feature and frame/time T *output_dt = output + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/time int64_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels T *ptr_input = input_dt + istartH*istrideH + istartW*istrideW; T *ptr_output = output_dt + oh*osizeW + ow; int64_t *ptr_ind = indices_dt + oh*osizeW + ow; int64_t argmax = -1; T max = THCNumerics<T>::min(); int it, ih, iw; for(it = 0; it < kT; ++it) { for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { T val = ptr_input[ih*istrideH + iw*istrideW]; if ((val > max) || THCNumerics<T>::isnan(val)) { max = val; argmax = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + iw+istartW; } } } ptr_input += istrideT; // next input frame } // Update output and argmax *ptr_output = max; *ptr_ind = argmax; } } } template <typename scalar_t> void adaptivemaxpool_loop( scalar_t *input_data, scalar_t *output_data, int64_t *indices_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); adaptivemaxpool<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output_data, indices_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ); totalZ -= 65535; offsetZ += 65535; THCudaCheck(cudaGetLastError()); } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * Assumes that input size can be perfectly divided by output size, i.e. * each input pixel can only be argmax of one output pixel. */ template <typename T> __global__ void adaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ ) { // iterators on output pixels int oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; int d = o_plane / osizeT; // output slice/feature // gradInput offset by slice/feature T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW; // gradOutput offset by slice/feature and frame/otme T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/otme int64_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { // Compute the gradients for the argmax input pixel T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow; int64_t *ptr_ind = indices_dt + oh*osizeW + ow; T grad_delta = *ptr_gradOutput; int argmax = (*ptr_ind); gradInput_d[argmax] += grad_delta; } } } template <typename scalar_t> void adaptivemaxgradinput_loop( scalar_t *gradInput_data, scalar_t *gradOutput_data, int64_t *indices_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); adaptivemaxgradinput<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, indices_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); totalZ -= 65535; offsetZ += 65535; THCudaCheck(cudaGetLastError()); } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * Uses atomic add. */ template <typename T> __global__ void atomicadaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ ) { // iterators on output pixels int oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; int d = o_plane / osizeT; // output slice/feature // gradInput offset by slice/feature T *gradInput_d = gradInput + d*isizeT*isizeH*isizeW; // gradOutput offset by slice/feature and frame/otme T *gradOutput_dt = gradOutput + o_plane*osizeH*osizeW; // indices offset by slice/feature and frame/otme int64_t *indices_dt = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { // Compute the gradients for the argmax input pixel T *ptr_gradOutput = gradOutput_dt + oh*osizeW + ow; int64_t *ptr_ind = indices_dt + oh*osizeW + ow; T grad_delta = *ptr_gradOutput; int64_t argmax = (*ptr_ind); atomicAdd(&(gradInput_d[argmax]), grad_delta); } } } template <typename scalar_t> void atomicadaptivemaxgradinput_loop( scalar_t *gradInput_data, scalar_t *gradOutput_data, int64_t *indices_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); atomicadaptivemaxgradinput<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, indices_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); totalZ -= 65535; offsetZ += 65535; THCudaCheck(cudaGetLastError()); } } // 5d tensor B x D x T x H x W void adaptive_max_pool3d_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input_, IntArrayRef output_size) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("adaptive_max_pool3d_cuda", {output_arg, indices_arg, input_arg}); for (int64_t i = 0; i < input_.ndimension(); i++) { AT_CHECK(input_.size(i) > 0, "adaptive_max_pool3d_cuda(): expected input to have non-empty spatial dimensions, " "but input has sizes ", input_.sizes(), " with dimension ", i, " being " "empty"); } AT_CHECK((input_.ndimension() == 4 || input_.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); // Issue #20215: the JIT sometimes passes output_size.size() == 1. AT_CHECK(output_size.size() == 1 || output_size.size() == 3, "adaptive_max_pool3d: internal error: output_size.size() must be 1 or 3"); int64_t osizeT = output_size[0]; int64_t osizeH = output_size.size() == 1 ? output_size[0] : output_size[1]; int64_t osizeW = output_size.size() == 1 ? output_size[0] : output_size[2]; int64_t sizeD, isizeT, isizeH, isizeW; int64_t istrideD, istrideT, istrideH, istrideW; int64_t totalZ; const Tensor& input = input_.ndimension() == 4 ? input_ : input_.contiguous(); if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); istrideD = input.stride(0); istrideT = input.stride(1); istrideH = input.stride(2); istrideW = input.stride(3); output.resize_({sizeD, osizeT, osizeH, osizeW}); indices.resize_({sizeD, osizeT, osizeH, osizeW}); totalZ = sizeD * osizeT; } else { int64_t sizeB = input.size(0); sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); istrideD = input.stride(1); istrideT = input.stride(2); istrideH = input.stride(3); istrideW = input.stride(4); output.resize_({sizeB, sizeD, osizeT, osizeH, osizeW}); indices.resize_({sizeB, sizeD, osizeT, osizeH, osizeW}); totalZ = sizeB * sizeD * osizeT; } AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "adaptive_max_pool3d_cuda", [&] { scalar_t *input_data = input.data<scalar_t>(); scalar_t *output_data = output.data<scalar_t>(); int64_t *indices_data = indices.data<int64_t>(); adaptivemaxpool_loop( input_data, output_data, indices_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW); } ); } void adaptive_max_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { TensorArg grad_input_arg{ gradInput, "gradInput", 1 }; TensorArg grad_output_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("adaptive_max_pool3d_out_cuda", {grad_input_arg, grad_output_arg, input_arg, indices_arg}); const Tensor gradOutput = gradOutput_.contiguous(); gradInput.resize_as_(input); gradInput.zero_(); int64_t sizeD, isizeT, isizeH, isizeW; int64_t osizeT, osizeH, osizeW; int64_t totalZ; if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); osizeT = gradOutput.size(1); osizeH = gradOutput.size(2); osizeW = gradOutput.size(3); } else { sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); osizeT = gradOutput.size(2); osizeH = gradOutput.size(3); osizeW = gradOutput.size(4); } bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0); if (input.ndimension() == 4) { totalZ = sizeD * osizeT; } else { int sizeB = input.size(0); totalZ = sizeB * sizeD * osizeT; } if (atomic) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "adaptive_max_pool3d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data<scalar_t>(); int64_t *indices_data = indices.data<int64_t>(); atomicadaptivemaxgradinput_loop( gradInput_data, gradOutput_data, indices_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "adaptive_max_pool3d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data<scalar_t>(); int64_t *indices_data = indices.data<int64_t>(); adaptivemaxgradinput_loop( gradInput_data, gradOutput_data, indices_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); } ); } } } // namespace std::tuple<Tensor&, Tensor&> adaptive_max_pool3d_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef output_size) { adaptive_max_pool3d_out_cuda_template( output, indices, input, output_size); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> adaptive_max_pool3d_cuda( const Tensor& input, IntArrayRef output_size) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); adaptive_max_pool3d_out_cuda_template( output, indices, input, output_size); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& adaptive_max_pool3d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { adaptive_max_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } Tensor adaptive_max_pool3d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { auto gradInput = at::zeros_like(input); adaptive_max_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } } // at::native } // at
fa3b16f84772601f2a149455369ee83c7bb2767e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void misaligned_write_test(float* a, float* b, float *c, int size, int offset) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int k = gid + offset; if (k < size) c[k] = a[gid] + b[gid]; } //int main(int argc, char** argv) //{ // printf("Runing 1D grid \n"); // int size = 1 << 25; // int block_size = 128; // unsigned int byte_size = size * sizeof(float); // int offset = 0; // // if (argc > 1) // offset = atoi(argv[1]); // // printf("Input size : %d \n", size); // // float * h_a, *h_b, *h_ref; // h_a = (float*)malloc(byte_size); // h_b = (float*)malloc(byte_size); // h_ref = (float*)malloc(byte_size); // // // if (!h_a) // printf("host memory allocation error \n"); // // for (size_t i = 0; i < size; i++) // { // h_a[i] = i % 10; // h_b[i] = i % 7; // } // // dim3 block(block_size); // dim3 grid((size + block.x - 1) / block.x); // // printf("Kernel is lauch with grid(%d,%d,%d) and block(%d,%d,%d) \n", // grid.x, grid.y, grid.z, block.x, block.y, block.z); // // float *d_a, *d_b, *d_c; // // hipMalloc((void**)&d_a, byte_size); // hipMalloc((void**)&d_b, byte_size); // hipMalloc((void**)&d_c, byte_size); // hipMemset(d_c, 0, byte_size); // // hipMemcpy(d_a, h_a, byte_size, hipMemcpyHostToDevice); // hipMemcpy(d_b, h_b, byte_size, hipMemcpyHostToDevice); // // misaligned_write_test << <grid, block >> > (d_a, d_b, d_c, size, offset); // // hipDeviceSynchronize(); // hipMemcpy(h_ref, d_c, byte_size, hipMemcpyDeviceToHost); // // hipFree(d_c); // hipFree(d_b); // hipFree(d_a); // free(h_ref); // free(h_b); // free(h_a); //}
fa3b16f84772601f2a149455369ee83c7bb2767e.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void misaligned_write_test(float* a, float* b, float *c, int size, int offset) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int k = gid + offset; if (k < size) c[k] = a[gid] + b[gid]; } //int main(int argc, char** argv) //{ // printf("Runing 1D grid \n"); // int size = 1 << 25; // int block_size = 128; // unsigned int byte_size = size * sizeof(float); // int offset = 0; // // if (argc > 1) // offset = atoi(argv[1]); // // printf("Input size : %d \n", size); // // float * h_a, *h_b, *h_ref; // h_a = (float*)malloc(byte_size); // h_b = (float*)malloc(byte_size); // h_ref = (float*)malloc(byte_size); // // // if (!h_a) // printf("host memory allocation error \n"); // // for (size_t i = 0; i < size; i++) // { // h_a[i] = i % 10; // h_b[i] = i % 7; // } // // dim3 block(block_size); // dim3 grid((size + block.x - 1) / block.x); // // printf("Kernel is lauch with grid(%d,%d,%d) and block(%d,%d,%d) \n", // grid.x, grid.y, grid.z, block.x, block.y, block.z); // // float *d_a, *d_b, *d_c; // // cudaMalloc((void**)&d_a, byte_size); // cudaMalloc((void**)&d_b, byte_size); // cudaMalloc((void**)&d_c, byte_size); // cudaMemset(d_c, 0, byte_size); // // cudaMemcpy(d_a, h_a, byte_size, cudaMemcpyHostToDevice); // cudaMemcpy(d_b, h_b, byte_size, cudaMemcpyHostToDevice); // // misaligned_write_test << <grid, block >> > (d_a, d_b, d_c, size, offset); // // cudaDeviceSynchronize(); // cudaMemcpy(h_ref, d_c, byte_size, cudaMemcpyDeviceToHost); // // cudaFree(d_c); // cudaFree(d_b); // cudaFree(d_a); // free(h_ref); // free(h_b); // free(h_a); //}
8d074722b63110f259a8a5911dbadd4fd2da4d7e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <benchmark/benchmark.h> #include <thrust/iterator/counting_iterator.h> #include <cudf/column/column_factories.hpp> #include <cudf/join.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/error.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <fixture/benchmark_fixture.hpp> #include <synchronization/synchronization.hpp> #include <vector> #include "generate_input_tables_hip.cuh" template <typename key_type, typename payload_type> class Join : public cudf::benchmark { }; template <typename key_type, typename payload_type, bool Nullable> static void BM_join(benchmark::State &state) { const cudf::size_type build_table_size{(cudf::size_type)state.range(0)}; const cudf::size_type probe_table_size{(cudf::size_type)state.range(1)}; const cudf::size_type rand_max_val{build_table_size * 2}; const double selectivity = 0.3; const bool is_build_table_key_unique = true; // Generate build and probe tables cudf::test::UniformRandomGenerator<cudf::size_type> rand_gen(0, build_table_size); auto build_random_null_mask = [&rand_gen](int size) { if (Nullable) { // roughly 25% nulls auto validity = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [&rand_gen](auto i) { return (rand_gen.generate() & 3) == 0; }); return cudf::test::detail::make_null_mask(validity, validity + size); } else { return cudf::create_null_mask(size, cudf::mask_state::UNINITIALIZED); } }; std::unique_ptr<cudf::column> build_key_column = [&]() { return Nullable ? cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()), build_table_size, build_random_null_mask(build_table_size)) : cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()), build_table_size); }(); std::unique_ptr<cudf::column> probe_key_column = [&]() { return Nullable ? cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()), probe_table_size, build_random_null_mask(probe_table_size)) : cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()), probe_table_size); }(); generate_input_tables<key_type, cudf::size_type>( build_key_column->mutable_view().data<key_type>(), build_table_size, probe_key_column->mutable_view().data<key_type>(), probe_table_size, selectivity, rand_max_val, is_build_table_key_unique); auto payload_data_it = thrust::make_counting_iterator(0); cudf::test::fixed_width_column_wrapper<payload_type> build_payload_column( payload_data_it, payload_data_it + build_table_size); cudf::test::fixed_width_column_wrapper<payload_type> probe_payload_column( payload_data_it, payload_data_it + probe_table_size); CHECK_CUDA(0); cudf::table_view build_table({build_key_column->view(), build_payload_column}); cudf::table_view probe_table({probe_key_column->view(), probe_payload_column}); // Setup join parameters and result table std::vector<cudf::size_type> columns_to_join = {0}; // Benchmark the inner join operation for (auto _ : state) { cuda_event_timer raii(state, true, 0); auto result = cudf::inner_join(probe_table, build_table, columns_to_join, columns_to_join, {{0, 0}}, cudf::null_equality::UNEQUAL); } } #define JOIN_BENCHMARK_DEFINE(name, key_type, payload_type, nullable) \ BENCHMARK_TEMPLATE_DEFINE_F(Join, name, key_type, payload_type) \ (::benchmark::State & st) { BM_join<key_type, payload_type, nullable>(st); } JOIN_BENCHMARK_DEFINE(join_32bit, int32_t, int32_t, false); JOIN_BENCHMARK_DEFINE(join_64bit, int64_t, int64_t, false); JOIN_BENCHMARK_DEFINE(join_32bit_nulls, int32_t, int32_t, true); JOIN_BENCHMARK_DEFINE(join_64bit_nulls, int64_t, int64_t, true); BENCHMARK_REGISTER_F(Join, join_32bit) ->Unit(benchmark::kMillisecond) ->Args({100'000, 100'000}) ->Args({100'000, 400'000}) ->Args({100'000, 1'000'000}) ->Args({10'000'000, 10'000'000}) ->Args({10'000'000, 40'000'000}) ->Args({10'000'000, 100'000'000}) ->Args({100'000'000, 100'000'000}) ->Args({80'000'000, 240'000'000}) ->UseManualTime(); BENCHMARK_REGISTER_F(Join, join_64bit) ->Unit(benchmark::kMillisecond) ->Args({50'000'000, 50'000'000}) ->Args({40'000'000, 120'000'000}) ->UseManualTime(); BENCHMARK_REGISTER_F(Join, join_32bit_nulls) ->Unit(benchmark::kMillisecond) ->Args({100'000, 100'000}) ->Args({100'000, 400'000}) ->Args({100'000, 1'000'000}) ->Args({10'000'000, 10'000'000}) ->Args({10'000'000, 40'000'000}) ->Args({10'000'000, 100'000'000}) ->Args({100'000'000, 100'000'000}) ->Args({80'000'000, 240'000'000}) ->UseManualTime(); BENCHMARK_REGISTER_F(Join, join_64bit_nulls) ->Unit(benchmark::kMillisecond) ->Args({50'000'000, 50'000'000}) ->Args({40'000'000, 120'000'000}) ->UseManualTime();
8d074722b63110f259a8a5911dbadd4fd2da4d7e.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <benchmark/benchmark.h> #include <thrust/iterator/counting_iterator.h> #include <cudf/column/column_factories.hpp> #include <cudf/join.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/error.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <fixture/benchmark_fixture.hpp> #include <synchronization/synchronization.hpp> #include <vector> #include "generate_input_tables.cuh" template <typename key_type, typename payload_type> class Join : public cudf::benchmark { }; template <typename key_type, typename payload_type, bool Nullable> static void BM_join(benchmark::State &state) { const cudf::size_type build_table_size{(cudf::size_type)state.range(0)}; const cudf::size_type probe_table_size{(cudf::size_type)state.range(1)}; const cudf::size_type rand_max_val{build_table_size * 2}; const double selectivity = 0.3; const bool is_build_table_key_unique = true; // Generate build and probe tables cudf::test::UniformRandomGenerator<cudf::size_type> rand_gen(0, build_table_size); auto build_random_null_mask = [&rand_gen](int size) { if (Nullable) { // roughly 25% nulls auto validity = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [&rand_gen](auto i) { return (rand_gen.generate() & 3) == 0; }); return cudf::test::detail::make_null_mask(validity, validity + size); } else { return cudf::create_null_mask(size, cudf::mask_state::UNINITIALIZED); } }; std::unique_ptr<cudf::column> build_key_column = [&]() { return Nullable ? cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()), build_table_size, build_random_null_mask(build_table_size)) : cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()), build_table_size); }(); std::unique_ptr<cudf::column> probe_key_column = [&]() { return Nullable ? cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()), probe_table_size, build_random_null_mask(probe_table_size)) : cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()), probe_table_size); }(); generate_input_tables<key_type, cudf::size_type>( build_key_column->mutable_view().data<key_type>(), build_table_size, probe_key_column->mutable_view().data<key_type>(), probe_table_size, selectivity, rand_max_val, is_build_table_key_unique); auto payload_data_it = thrust::make_counting_iterator(0); cudf::test::fixed_width_column_wrapper<payload_type> build_payload_column( payload_data_it, payload_data_it + build_table_size); cudf::test::fixed_width_column_wrapper<payload_type> probe_payload_column( payload_data_it, payload_data_it + probe_table_size); CHECK_CUDA(0); cudf::table_view build_table({build_key_column->view(), build_payload_column}); cudf::table_view probe_table({probe_key_column->view(), probe_payload_column}); // Setup join parameters and result table std::vector<cudf::size_type> columns_to_join = {0}; // Benchmark the inner join operation for (auto _ : state) { cuda_event_timer raii(state, true, 0); auto result = cudf::inner_join(probe_table, build_table, columns_to_join, columns_to_join, {{0, 0}}, cudf::null_equality::UNEQUAL); } } #define JOIN_BENCHMARK_DEFINE(name, key_type, payload_type, nullable) \ BENCHMARK_TEMPLATE_DEFINE_F(Join, name, key_type, payload_type) \ (::benchmark::State & st) { BM_join<key_type, payload_type, nullable>(st); } JOIN_BENCHMARK_DEFINE(join_32bit, int32_t, int32_t, false); JOIN_BENCHMARK_DEFINE(join_64bit, int64_t, int64_t, false); JOIN_BENCHMARK_DEFINE(join_32bit_nulls, int32_t, int32_t, true); JOIN_BENCHMARK_DEFINE(join_64bit_nulls, int64_t, int64_t, true); BENCHMARK_REGISTER_F(Join, join_32bit) ->Unit(benchmark::kMillisecond) ->Args({100'000, 100'000}) ->Args({100'000, 400'000}) ->Args({100'000, 1'000'000}) ->Args({10'000'000, 10'000'000}) ->Args({10'000'000, 40'000'000}) ->Args({10'000'000, 100'000'000}) ->Args({100'000'000, 100'000'000}) ->Args({80'000'000, 240'000'000}) ->UseManualTime(); BENCHMARK_REGISTER_F(Join, join_64bit) ->Unit(benchmark::kMillisecond) ->Args({50'000'000, 50'000'000}) ->Args({40'000'000, 120'000'000}) ->UseManualTime(); BENCHMARK_REGISTER_F(Join, join_32bit_nulls) ->Unit(benchmark::kMillisecond) ->Args({100'000, 100'000}) ->Args({100'000, 400'000}) ->Args({100'000, 1'000'000}) ->Args({10'000'000, 10'000'000}) ->Args({10'000'000, 40'000'000}) ->Args({10'000'000, 100'000'000}) ->Args({100'000'000, 100'000'000}) ->Args({80'000'000, 240'000'000}) ->UseManualTime(); BENCHMARK_REGISTER_F(Join, join_64bit_nulls) ->Unit(benchmark::kMillisecond) ->Args({50'000'000, 50'000'000}) ->Args({40'000'000, 120'000'000}) ->UseManualTime();
119a226cfb6dc6d48d192fbc2ad59f71fb1a1d3d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Rippling.h" #include <iostream> #include <assert.h> #include <assert.h> using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void rippling(uchar4* tabPixelsGM,uint w, uint h,float t); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ Rippling::Rippling(const Grid& grid, uint w, uint h, float dt,bool isVerbose) : Animable_I<uchar4>(grid, w, h, "Rippling-Cuda-uchar4",isVerbose)// super classe { assert(w == h); // specific rippling // Animation this->dt = dt; this->t = 0; // protected dans Animable } Rippling::~Rippling() { // rien } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API * * Note : domaineMath pas use car image pas zoomable */ void Rippling::process(uchar4* tabPixelsGM, uint w, uint h, const DomaineMath& domaineMath) { hipLaunchKernelGGL(( rippling), dim3(Animable_I::dg),dim3(Animable_I::db), 0, 0, tabPixelsGM, w, h, t); } /** * Override * Call periodicly by the API */ void Rippling::animationStep() { t += dt; // pourquoi si dt plus petit, les fps en mode image diminue fortement ? } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
119a226cfb6dc6d48d192fbc2ad59f71fb1a1d3d.cu
#include "Rippling.h" #include <iostream> #include <assert.h> #include <assert.h> using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void rippling(uchar4* tabPixelsGM,uint w, uint h,float t); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ Rippling::Rippling(const Grid& grid, uint w, uint h, float dt,bool isVerbose) : Animable_I<uchar4>(grid, w, h, "Rippling-Cuda-uchar4",isVerbose)// super classe { assert(w == h); // specific rippling // Animation this->dt = dt; this->t = 0; // protected dans Animable } Rippling::~Rippling() { // rien } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API * * Note : domaineMath pas use car image pas zoomable */ void Rippling::process(uchar4* tabPixelsGM, uint w, uint h, const DomaineMath& domaineMath) { rippling<<<Animable_I::dg,Animable_I::db>>>(tabPixelsGM, w, h, t); } /** * Override * Call periodicly by the API */ void Rippling::animationStep() { t += dt; // pourquoi si dt plus petit, les fps en mode image diminue fortement ? } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
46196e1a30b72f9b52111ebefb64738a2d1b1dc8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include <vector> #include <iostream> int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left, 0.f), height = max(bottom - top, 0.f); float interS = width * height; float Sa = (a[2] - a[0]) * (a[3] - a[1]); float Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 5 tensor at::Tensor nms_gpu(const at::Tensor& boxes, float nms_overlap_thresh, int64_t top_k) { using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); int boxes_num = ::min(boxes.size(0), top_k); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = reinterpret_cast<unsigned long long*>(THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long))); // unsigned long long* mask_dev = NULL; // THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)}).sort(0, false)); }
46196e1a30b72f9b52111ebefb64738a2d1b1dc8.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <vector> #include <iostream> int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left, 0.f), height = max(bottom - top, 0.f); float interS = width * height; float Sa = (a[2] - a[0]) * (a[3] - a[1]); float Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 5 tensor at::Tensor nms_gpu(const at::Tensor& boxes, float nms_overlap_thresh, int64_t top_k) { using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); int boxes_num = std::min(boxes.size(0), top_k); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = reinterpret_cast<unsigned long long*>(THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long))); // unsigned long long* mask_dev = NULL; // THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)}).sort(0, false)); }
963417fbbf953f2370d737d987487ca1b5a30c99.hip
// !!! This is a file automatically generated by hipify!!! /*----------------------------------------------------------- ** gaussian.cu -- The program is to solve a linear system Ax = b ** by using Gaussian Elimination. The algorithm on page 101 ** ("Foundations of Parallel Programming") is used. ** The sequential version is gaussian.c. This parallel ** implementation converts three independent for() loops ** into three Fans. Use the data file ge_3.dat to verify ** the correction of the output. ** ** Written by Andreas Kura, 02/15/95 ** Modified by Chong-wei Xu, 04/20/95 ** Modified by Chris Gregg for CUDA, 07/20/2009 **----------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "hip/hip_runtime.h" #include <string.h> #define MAXBLOCKSIZE 512 int Size; float *a, *b, *finalVec; float *m; FILE *fp; void InitProblemOnce(char *filename); void InitPerRun(); void ForwardSub(); void BackSub(); void InitMat(float *ary, int nrow, int ncol); void InitAry(float *ary, int ary_size); void PrintMat(float *ary, int nrow, int ncolumn); void PrintAry(float *ary, int ary_size); void PrintDeviceProperties(); void checkCUDAError(const char *msg); unsigned int totalKernelTime = 0; int main(int argc, char *argv[]) { int verbose = 1; if (argc < 2) { printf("Usage: gaussian matrix.txt [-q]\n\n"); printf("-q (quiet) suppresses printing the matrix and result values.\n"); printf("The first line of the file contains the dimension of the matrix, n."); printf("The second line of the file is a newline.\n"); printf("The next n lines contain n tab separated values for the matrix."); printf("The next line of the file is a newline.\n"); printf("The next line of the file is a 1xn vector with tab separated values.\n"); printf("The next line of the file is a newline. (optional)\n"); printf("The final line of the file is the pre-computed solution. (optional)\n"); printf("Example: matrix4.txt:\n"); printf("4\n"); printf("\n"); printf("-0.6 -0.5 0.7 0.3\n"); printf("-0.3 -0.9 0.3 0.7\n"); printf("-0.4 -0.5 -0.3 -0.8\n"); printf("0.0 -0.1 0.2 0.9\n"); printf("\n"); printf("-0.85 -0.68 0.24 -0.53\n"); printf("\n"); printf("0.7 0.0 -0.4 -0.5\n"); exit(0); } //PrintDeviceProperties(); //char filename[100]; //sprintf(filename,"matrices/matrix%d.txt",size); InitProblemOnce(argv[1]); if (argc > 2) { if (!strcmp(argv[2],"-q")) verbose = 0; } //InitProblemOnce(filename); InitPerRun(); //begin timing struct timeval time_start; gettimeofday(&time_start, NULL); // run kernels ForwardSub(); //end timing struct timeval time_end; gettimeofday(&time_end, NULL); unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec); if (verbose) { printf("Matrix m is: \n"); PrintMat(m, Size, Size); printf("Matrix a is: \n"); PrintMat(a, Size, Size); printf("Array b is: \n"); PrintAry(b, Size); } BackSub(); if (verbose) { printf("The final solution is: \n"); PrintAry(finalVec,Size); } printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6); printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6); /*printf("%d,%d\n",size,time_total); fprintf(stderr,"%d,%d\n",size,time_total);*/ free(m); free(a); free(b); } /*------------------------------------------------------ ** PrintDeviceProperties **----------------------------------------------------- */ void PrintDeviceProperties(){ hipDeviceProp_t deviceProp; int nDevCount = 0; hipGetDeviceCount( &nDevCount ); printf( "Total Device found: %d", nDevCount ); for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx ) { memset( &deviceProp, 0, sizeof(deviceProp)); if( hipSuccess == hipGetDeviceProperties(&deviceProp, nDeviceIdx)) { printf( "\nDevice Name \t\t - %s ", deviceProp.name ); printf( "\n**************************************"); printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 ); printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 ); printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock ); printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize ); //printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch ); printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock ); printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] ); printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] ); printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem ); printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor ); printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate ); //printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment ); //printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" ); printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount ); } else printf( "\n%s", hipGetErrorString(hipGetLastError())); } } /*------------------------------------------------------ ** InitProblemOnce -- Initialize all of matrices and ** vectors by opening a data file specified by the user. ** ** We used dynamic array *a, *b, and *m to allocate ** the memory storages. **------------------------------------------------------ */ void InitProblemOnce(char *filename) { //char *filename = argv[1]; //printf("Enter the data file name: "); //scanf("%s", filename); //printf("The file name is: %s\n", filename); fp = fopen(filename, "r"); fscanf(fp, "%d", &Size); a = (float *) malloc(Size * Size * sizeof(float)); InitMat(a, Size, Size); //printf("The input matrix a is:\n"); //PrintMat(a, Size, Size); b = (float *) malloc(Size * sizeof(float)); InitAry(b, Size); //printf("The input array b is:\n"); //PrintAry(b, Size); m = (float *) malloc(Size * Size * sizeof(float)); } /*------------------------------------------------------ ** InitPerRun() -- Initialize the contents of the ** multipier matrix **m **------------------------------------------------------ */ void InitPerRun() { int i; for (i=0; i<Size*Size; i++) *(m+i) = 0.0; } /*------------------------------------------------------- ** Fan1() -- Calculate multiplier matrix ** Pay attention to the index. Index i give the range ** which starts from 0 to range-1. The real values of ** the index should be adjust and related with the value ** of t which is defined on the ForwardSub(). **------------------------------------------------------- */ __global__ void Fan1(float *m_cuda, float *a_cuda, int size, int t){ int gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid < size-1-t){ *(m_cuda+size*(gid + t + 1)+t) = *(a_cuda+size*(gid + t + 1) + t) / *(a_cuda + size * t + t); } } /*------------------------------------------------------- ** Fan2() -- Modify the matrix A into LUD **------------------------------------------------------- */ __global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t) { if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return; if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return; int xidx = blockIdx.x * blockDim.x + threadIdx.x; int yidx = blockIdx.y * blockDim.y + threadIdx.y; //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y); a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)]; //a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t]; if(yidx == 0){ //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y); //printf("xidx:%d,yidx:%d\n",xidx,yidx); b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t]; } } /*------------------------------------------------------ ** ForwardSub() -- Forward substitution of Gaussian ** elimination. **------------------------------------------------------ */ void ForwardSub() { int t; float *m_cuda,*a_cuda,*b_cuda; // allocate memory on GPU hipMalloc((void **) &m_cuda, Size * Size * sizeof(float)); hipMalloc((void **) &a_cuda, Size * Size * sizeof(float)); hipMalloc((void **) &b_cuda, Size * sizeof(float)); // copy memory to GPU hipMemcpy(m_cuda, m, Size * Size * sizeof(float),hipMemcpyHostToDevice ); hipMemcpy(a_cuda, a, Size * Size * sizeof(float),hipMemcpyHostToDevice ); hipMemcpy(b_cuda, b, Size * sizeof(float),hipMemcpyHostToDevice ); int block_size,grid_size; //if we have smaller work to do than the max block size, just run 1 block of that size block_size = (Size % MAXBLOCKSIZE == 0) ? MAXBLOCKSIZE : Size; grid_size = (Size/block_size) + (!(Size%block_size)? 0:1); //block_size = 1; //grid_size = 1; //printf("1d grid size: %d\n",grid_size); dim3 dimBlock(block_size); dim3 dimGrid(grid_size); //dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) ); int blockSize2d, gridSize2d; blockSize2d = 4; gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1)); dim3 dimBlockXY(blockSize2d,blockSize2d); dim3 dimGridXY(gridSize2d,gridSize2d); printf("sizing info: %dx%d blocks, in a %dx%d grid\n",blockSize2d,blockSize2d,gridSize2d,gridSize2d); // begin timing kernels struct timeval time_start; gettimeofday(&time_start, NULL); for (t=0; t<(Size-1); t++) { hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, 0, m_cuda,a_cuda,Size,t); hipDeviceSynchronize(); hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, 0, m_cuda,a_cuda,b_cuda,Size,Size-t,t); hipDeviceSynchronize(); checkCUDAError("Fan2"); } // end timing kernels struct timeval time_end; gettimeofday(&time_end, NULL); totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec); // copy memory back to CPU hipMemcpy(m, m_cuda, Size * Size * sizeof(float),hipMemcpyDeviceToHost ); hipMemcpy(a, a_cuda, Size * Size * sizeof(float),hipMemcpyDeviceToHost ); hipMemcpy(b, b_cuda, Size * sizeof(float),hipMemcpyDeviceToHost ); hipFree(m_cuda); hipFree(a_cuda); hipFree(b_cuda); } /*------------------------------------------------------ ** BackSub() -- Backward substitution **------------------------------------------------------ */ void BackSub() { // create a new vector to hold the final answer finalVec = (float *) malloc(Size * sizeof(float)); // solve "bottom up" int i,j; for(i=0;i<Size;i++){ finalVec[Size-i-1]=b[Size-i-1]; for(j=0;j<i;j++) { finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1]; } finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1)); } } void InitMat(float *ary, int nrow, int ncol) { int i, j; for (i=0; i<nrow; i++) { for (j=0; j<ncol; j++) { fscanf(fp, "%f", ary+Size*i+j); } } } /*------------------------------------------------------ ** PrintMat() -- Print the contents of the matrix **------------------------------------------------------ */ void PrintMat(float *ary, int nrow, int ncol) { int i, j; for (i=0; i<nrow; i++) { for (j=0; j<ncol; j++) { printf("%8.2f ", *(ary+Size*i+j)); } printf("\n"); } printf("\n"); } /*------------------------------------------------------ ** InitAry() -- Initialize the array (vector) by reading ** data from the data file **------------------------------------------------------ */ void InitAry(float *ary, int ary_size) { int i; for (i=0; i<ary_size; i++) { fscanf(fp, "%f", &ary[i]); } } /*------------------------------------------------------ ** PrintAry() -- Print the contents of the array (vector) **------------------------------------------------------ */ void PrintAry(float *ary, int ary_size) { int i; for (i=0; i<ary_size; i++) { printf("%.2f ", ary[i]); } printf("\n\n"); } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
963417fbbf953f2370d737d987487ca1b5a30c99.cu
/*----------------------------------------------------------- ** gaussian.cu -- The program is to solve a linear system Ax = b ** by using Gaussian Elimination. The algorithm on page 101 ** ("Foundations of Parallel Programming") is used. ** The sequential version is gaussian.c. This parallel ** implementation converts three independent for() loops ** into three Fans. Use the data file ge_3.dat to verify ** the correction of the output. ** ** Written by Andreas Kura, 02/15/95 ** Modified by Chong-wei Xu, 04/20/95 ** Modified by Chris Gregg for CUDA, 07/20/2009 **----------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "cuda.h" #include <string.h> #define MAXBLOCKSIZE 512 int Size; float *a, *b, *finalVec; float *m; FILE *fp; void InitProblemOnce(char *filename); void InitPerRun(); void ForwardSub(); void BackSub(); void InitMat(float *ary, int nrow, int ncol); void InitAry(float *ary, int ary_size); void PrintMat(float *ary, int nrow, int ncolumn); void PrintAry(float *ary, int ary_size); void PrintDeviceProperties(); void checkCUDAError(const char *msg); unsigned int totalKernelTime = 0; int main(int argc, char *argv[]) { int verbose = 1; if (argc < 2) { printf("Usage: gaussian matrix.txt [-q]\n\n"); printf("-q (quiet) suppresses printing the matrix and result values.\n"); printf("The first line of the file contains the dimension of the matrix, n."); printf("The second line of the file is a newline.\n"); printf("The next n lines contain n tab separated values for the matrix."); printf("The next line of the file is a newline.\n"); printf("The next line of the file is a 1xn vector with tab separated values.\n"); printf("The next line of the file is a newline. (optional)\n"); printf("The final line of the file is the pre-computed solution. (optional)\n"); printf("Example: matrix4.txt:\n"); printf("4\n"); printf("\n"); printf("-0.6 -0.5 0.7 0.3\n"); printf("-0.3 -0.9 0.3 0.7\n"); printf("-0.4 -0.5 -0.3 -0.8\n"); printf("0.0 -0.1 0.2 0.9\n"); printf("\n"); printf("-0.85 -0.68 0.24 -0.53\n"); printf("\n"); printf("0.7 0.0 -0.4 -0.5\n"); exit(0); } //PrintDeviceProperties(); //char filename[100]; //sprintf(filename,"matrices/matrix%d.txt",size); InitProblemOnce(argv[1]); if (argc > 2) { if (!strcmp(argv[2],"-q")) verbose = 0; } //InitProblemOnce(filename); InitPerRun(); //begin timing struct timeval time_start; gettimeofday(&time_start, NULL); // run kernels ForwardSub(); //end timing struct timeval time_end; gettimeofday(&time_end, NULL); unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec); if (verbose) { printf("Matrix m is: \n"); PrintMat(m, Size, Size); printf("Matrix a is: \n"); PrintMat(a, Size, Size); printf("Array b is: \n"); PrintAry(b, Size); } BackSub(); if (verbose) { printf("The final solution is: \n"); PrintAry(finalVec,Size); } printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6); printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6); /*printf("%d,%d\n",size,time_total); fprintf(stderr,"%d,%d\n",size,time_total);*/ free(m); free(a); free(b); } /*------------------------------------------------------ ** PrintDeviceProperties **----------------------------------------------------- */ void PrintDeviceProperties(){ cudaDeviceProp deviceProp; int nDevCount = 0; cudaGetDeviceCount( &nDevCount ); printf( "Total Device found: %d", nDevCount ); for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx ) { memset( &deviceProp, 0, sizeof(deviceProp)); if( cudaSuccess == cudaGetDeviceProperties(&deviceProp, nDeviceIdx)) { printf( "\nDevice Name \t\t - %s ", deviceProp.name ); printf( "\n**************************************"); printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 ); printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 ); printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock ); printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize ); //printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch ); printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock ); printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] ); printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] ); printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem ); printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor ); printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate ); //printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment ); //printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" ); printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount ); } else printf( "\n%s", cudaGetErrorString(cudaGetLastError())); } } /*------------------------------------------------------ ** InitProblemOnce -- Initialize all of matrices and ** vectors by opening a data file specified by the user. ** ** We used dynamic array *a, *b, and *m to allocate ** the memory storages. **------------------------------------------------------ */ void InitProblemOnce(char *filename) { //char *filename = argv[1]; //printf("Enter the data file name: "); //scanf("%s", filename); //printf("The file name is: %s\n", filename); fp = fopen(filename, "r"); fscanf(fp, "%d", &Size); a = (float *) malloc(Size * Size * sizeof(float)); InitMat(a, Size, Size); //printf("The input matrix a is:\n"); //PrintMat(a, Size, Size); b = (float *) malloc(Size * sizeof(float)); InitAry(b, Size); //printf("The input array b is:\n"); //PrintAry(b, Size); m = (float *) malloc(Size * Size * sizeof(float)); } /*------------------------------------------------------ ** InitPerRun() -- Initialize the contents of the ** multipier matrix **m **------------------------------------------------------ */ void InitPerRun() { int i; for (i=0; i<Size*Size; i++) *(m+i) = 0.0; } /*------------------------------------------------------- ** Fan1() -- Calculate multiplier matrix ** Pay attention to the index. Index i give the range ** which starts from 0 to range-1. The real values of ** the index should be adjust and related with the value ** of t which is defined on the ForwardSub(). **------------------------------------------------------- */ __global__ void Fan1(float *m_cuda, float *a_cuda, int size, int t){ int gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid < size-1-t){ *(m_cuda+size*(gid + t + 1)+t) = *(a_cuda+size*(gid + t + 1) + t) / *(a_cuda + size * t + t); } } /*------------------------------------------------------- ** Fan2() -- Modify the matrix A into LUD **------------------------------------------------------- */ __global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t) { if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return; if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return; int xidx = blockIdx.x * blockDim.x + threadIdx.x; int yidx = blockIdx.y * blockDim.y + threadIdx.y; //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y); a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)]; //a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t]; if(yidx == 0){ //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y); //printf("xidx:%d,yidx:%d\n",xidx,yidx); b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t]; } } /*------------------------------------------------------ ** ForwardSub() -- Forward substitution of Gaussian ** elimination. **------------------------------------------------------ */ void ForwardSub() { int t; float *m_cuda,*a_cuda,*b_cuda; // allocate memory on GPU cudaMalloc((void **) &m_cuda, Size * Size * sizeof(float)); cudaMalloc((void **) &a_cuda, Size * Size * sizeof(float)); cudaMalloc((void **) &b_cuda, Size * sizeof(float)); // copy memory to GPU cudaMemcpy(m_cuda, m, Size * Size * sizeof(float),cudaMemcpyHostToDevice ); cudaMemcpy(a_cuda, a, Size * Size * sizeof(float),cudaMemcpyHostToDevice ); cudaMemcpy(b_cuda, b, Size * sizeof(float),cudaMemcpyHostToDevice ); int block_size,grid_size; //if we have smaller work to do than the max block size, just run 1 block of that size block_size = (Size % MAXBLOCKSIZE == 0) ? MAXBLOCKSIZE : Size; grid_size = (Size/block_size) + (!(Size%block_size)? 0:1); //block_size = 1; //grid_size = 1; //printf("1d grid size: %d\n",grid_size); dim3 dimBlock(block_size); dim3 dimGrid(grid_size); //dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) ); int blockSize2d, gridSize2d; blockSize2d = 4; gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1)); dim3 dimBlockXY(blockSize2d,blockSize2d); dim3 dimGridXY(gridSize2d,gridSize2d); printf("sizing info: %dx%d blocks, in a %dx%d grid\n",blockSize2d,blockSize2d,gridSize2d,gridSize2d); // begin timing kernels struct timeval time_start; gettimeofday(&time_start, NULL); for (t=0; t<(Size-1); t++) { Fan1<<<dimGrid,dimBlock>>>(m_cuda,a_cuda,Size,t); cudaDeviceSynchronize(); Fan2<<<dimGridXY,dimBlockXY>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t); cudaDeviceSynchronize(); checkCUDAError("Fan2"); } // end timing kernels struct timeval time_end; gettimeofday(&time_end, NULL); totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec); // copy memory back to CPU cudaMemcpy(m, m_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost ); cudaMemcpy(a, a_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost ); cudaMemcpy(b, b_cuda, Size * sizeof(float),cudaMemcpyDeviceToHost ); cudaFree(m_cuda); cudaFree(a_cuda); cudaFree(b_cuda); } /*------------------------------------------------------ ** BackSub() -- Backward substitution **------------------------------------------------------ */ void BackSub() { // create a new vector to hold the final answer finalVec = (float *) malloc(Size * sizeof(float)); // solve "bottom up" int i,j; for(i=0;i<Size;i++){ finalVec[Size-i-1]=b[Size-i-1]; for(j=0;j<i;j++) { finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1]; } finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1)); } } void InitMat(float *ary, int nrow, int ncol) { int i, j; for (i=0; i<nrow; i++) { for (j=0; j<ncol; j++) { fscanf(fp, "%f", ary+Size*i+j); } } } /*------------------------------------------------------ ** PrintMat() -- Print the contents of the matrix **------------------------------------------------------ */ void PrintMat(float *ary, int nrow, int ncol) { int i, j; for (i=0; i<nrow; i++) { for (j=0; j<ncol; j++) { printf("%8.2f ", *(ary+Size*i+j)); } printf("\n"); } printf("\n"); } /*------------------------------------------------------ ** InitAry() -- Initialize the array (vector) by reading ** data from the data file **------------------------------------------------------ */ void InitAry(float *ary, int ary_size) { int i; for (i=0; i<ary_size; i++) { fscanf(fp, "%f", &ary[i]); } } /*------------------------------------------------------ ** PrintAry() -- Print the contents of the array (vector) **------------------------------------------------------ */ void PrintAry(float *ary, int ary_size) { int i; for (i=0; i<ary_size; i++) { printf("%.2f ", ary[i]); } printf("\n\n"); } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
1653149f33c164fece4201e08e0bd91dee8f6473.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/sorting.hpp> #include <cudf/sorting.hpp> #include <cudf/table/row_operators.cuh> #include <cudf/table/table.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/table/table_view.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/sequence.h> namespace cudf { namespace detail { namespace { // Functor to identify unique elements in a sorted order table/column template <typename ReturnType, typename Iterator> struct unique_comparator { unique_comparator(table_device_view device_table, Iterator const sorted_order, bool has_nulls) : comparator(nullate::DYNAMIC{has_nulls}, device_table, device_table, null_equality::EQUAL), permute(sorted_order) { } __device__ ReturnType operator()(size_type index) const noexcept { return index == 0 || not comparator(permute[index], permute[index - 1]); }; private: row_equality_comparator<nullate::DYNAMIC> comparator; Iterator const permute; }; // Assign rank from 1 to n unique values. Equal values get same rank value. rmm::device_uvector<size_type> sorted_dense_rank(column_view input_col, column_view sorted_order_view, rmm::cuda_stream_view stream) { auto device_table = table_device_view::create(table_view{{input_col}}, stream); auto const input_size = input_col.size(); rmm::device_uvector<size_type> dense_rank_sorted(input_size, stream); auto sorted_index_order = thrust::make_permutation_iterator( sorted_order_view.begin<size_type>(), thrust::make_counting_iterator<size_type>(0)); auto conv = unique_comparator<size_type, decltype(sorted_index_order)>( *device_table, sorted_index_order, input_col.has_nulls()); auto unique_it = cudf::detail::make_counting_transform_iterator(0, conv); thrust::inclusive_scan( rmm::exec_policy(stream), unique_it, unique_it + input_size, dense_rank_sorted.data()); return dense_rank_sorted; } /** * @brief Breaks the ties among equal value groups using binary operator and * transform this tied value to final rank. * * @param dense_rank dense rank of sorted input column (acts as key for value * groups). * @param tie_iter iterator of rank to break ties among equal value groups. * @param sorted_order_view sorted order indices of input column * @param rank_iter output rank iterator * @param tie_breaker tie breaking operator. For example, maximum & minimum. * @param transformer transform after tie breaking (useful for average). * @param stream CUDA stream used for device memory operations and kernel launches. */ template <typename TieType, typename outputIterator, typename TieBreaker, typename Transformer, typename TieIterator> void tie_break_ranks_transform(cudf::device_span<size_type const> dense_rank_sorted, TieIterator tie_iter, column_view const& sorted_order_view, outputIterator rank_iter, TieBreaker tie_breaker, Transformer transformer, rmm::cuda_stream_view stream) { auto const input_size = sorted_order_view.size(); // algorithm: reduce_by_key(dense_rank, 1, n, reduction_tie_breaker) // reduction_tie_breaker = min, max, min_count rmm::device_uvector<TieType> tie_sorted(sorted_order_view.size(), stream); thrust::reduce_by_key(rmm::exec_policy(stream), dense_rank_sorted.begin(), dense_rank_sorted.end(), tie_iter, thrust::make_discard_iterator(), tie_sorted.begin(), thrust::equal_to{}, tie_breaker); auto sorted_tied_rank = thrust::make_transform_iterator( dense_rank_sorted.begin(), [tied_rank = tie_sorted.begin(), transformer] __device__(auto dense_pos) { return transformer(tied_rank[dense_pos - 1]); }); thrust::scatter(rmm::exec_policy(stream), sorted_tied_rank, sorted_tied_rank + input_size, sorted_order_view.begin<size_type>(), rank_iter); } template <typename outputType> void rank_first(column_view sorted_order_view, mutable_column_view rank_mutable_view, rmm::cuda_stream_view stream) { // stable sort order ranking (no ties) thrust::scatter(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(1), thrust::make_counting_iterator<size_type>(rank_mutable_view.size() + 1), sorted_order_view.begin<size_type>(), rank_mutable_view.begin<outputType>()); } template <typename outputType> void rank_dense(cudf::device_span<size_type const> dense_rank_sorted, column_view sorted_order_view, mutable_column_view rank_mutable_view, rmm::cuda_stream_view stream) { // All equal values have same rank and rank always increases by 1 between groups thrust::scatter(rmm::exec_policy(stream), dense_rank_sorted.begin(), dense_rank_sorted.end(), sorted_order_view.begin<size_type>(), rank_mutable_view.begin<outputType>()); } template <typename outputType> void rank_min(cudf::device_span<size_type const> group_keys, column_view sorted_order_view, mutable_column_view rank_mutable_view, rmm::cuda_stream_view stream) { // min of first in the group // All equal values have min of ranks among them. // algorithm: reduce_by_key(dense_rank, 1, n, min), scatter tie_break_ranks_transform<size_type>(group_keys, thrust::make_counting_iterator<size_type>(1), sorted_order_view, rank_mutable_view.begin<outputType>(), thrust::minimum{}, thrust::identity{}, stream); } template <typename outputType> void rank_max(cudf::device_span<size_type const> group_keys, column_view sorted_order_view, mutable_column_view rank_mutable_view, rmm::cuda_stream_view stream) { // max of first in the group // All equal values have max of ranks among them. // algorithm: reduce_by_key(dense_rank, 1, n, max), scatter tie_break_ranks_transform<size_type>(group_keys, thrust::make_counting_iterator<size_type>(1), sorted_order_view, rank_mutable_view.begin<outputType>(), thrust::maximum{}, thrust::identity{}, stream); } // Returns index, count template <typename T> struct index_counter { __device__ T operator()(size_type i) { return T{i, 1}; } }; void rank_average(cudf::device_span<size_type const> group_keys, column_view sorted_order_view, mutable_column_view rank_mutable_view, rmm::cuda_stream_view stream) { // k, k+1, .. k+n-1 // average = (n*k+ n*(n-1)/2)/n // average = k + (n-1)/2 = min + (count-1)/2 // Calculate Min of ranks and Count of equal values // algorithm: reduce_by_key(dense_rank, 1, n, min_count) // transform(min+(count-1)/2), scatter using MinCount = thrust::pair<size_type, size_type>; tie_break_ranks_transform<MinCount>( group_keys, // Use device functor with return type. Cannot use device lambda due to limitation. // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#extended-lambda-restrictions cudf::detail::make_counting_transform_iterator(1, index_counter<MinCount>{}), sorted_order_view, rank_mutable_view.begin<double>(), [] __device__(auto rank_count1, auto rank_count2) { return MinCount{::min(rank_count1.first, rank_count2.first), rank_count1.second + rank_count2.second}; }, [] __device__(MinCount minrank_count) { // min+(count-1)/2 return static_cast<double>(thrust::get<0>(minrank_count)) + (static_cast<double>(thrust::get<1>(minrank_count)) - 1) / 2.0; }, stream); } } // anonymous namespace std::unique_ptr<column> rank(column_view const& input, rank_method method, order column_order, null_policy null_handling, null_order null_precedence, bool percentage, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { data_type const output_type = (percentage or method == rank_method::AVERAGE) ? data_type(type_id::FLOAT64) : data_type(type_to_id<size_type>()); std::unique_ptr<column> rank_column = [&null_handling, &output_type, &input, &stream, &mr] { // na_option=keep assign NA to NA values if (null_handling == null_policy::EXCLUDE) return make_numeric_column(output_type, input.size(), detail::copy_bitmask(input, stream, mr), input.null_count(), stream, mr); else return make_numeric_column(output_type, input.size(), mask_state::UNALLOCATED, stream, mr); }(); auto rank_mutable_view = rank_column->mutable_view(); std::unique_ptr<column> sorted_order = (method == rank_method::FIRST) ? detail::stable_sorted_order( table_view{{input}}, {column_order}, {null_precedence}, stream, mr) : detail::sorted_order(table_view{{input}}, {column_order}, {null_precedence}, stream, mr); column_view sorted_order_view = sorted_order->view(); // dense: All equal values have same rank and rank always increases by 1 between groups // acts as key for min, max, average to denote equal value groups rmm::device_uvector<size_type> const dense_rank_sorted = [&method, &input, &sorted_order_view, &stream] { if (method != rank_method::FIRST) return sorted_dense_rank(input, sorted_order_view, stream); else return rmm::device_uvector<size_type>(0, stream); }(); if (output_type.id() == type_id::FLOAT64) { switch (method) { case rank_method::FIRST: rank_first<double>(sorted_order_view, rank_mutable_view, stream); break; case rank_method::DENSE: rank_dense<double>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::MIN: rank_min<double>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::MAX: rank_max<double>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::AVERAGE: rank_average(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; default: CUDF_FAIL("Unexpected rank_method for rank()"); } } else { switch (method) { case rank_method::FIRST: rank_first<size_type>(sorted_order_view, rank_mutable_view, stream); break; case rank_method::DENSE: rank_dense<size_type>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::MIN: rank_min<size_type>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::MAX: rank_max<size_type>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::AVERAGE: rank_average(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; default: CUDF_FAIL("Unexpected rank_method for rank()"); } } // pct inplace transform if (percentage) { auto rank_iter = rank_mutable_view.begin<double>(); size_type const count = (null_handling == null_policy::EXCLUDE) ? input.size() - input.null_count() : input.size(); auto drs = dense_rank_sorted.data(); bool const is_dense = (method == rank_method::DENSE); thrust::transform(rmm::exec_policy(stream), rank_iter, rank_iter + input.size(), rank_iter, [is_dense, drs, count] __device__(double r) -> double { return is_dense ? r / drs[count - 1] : r / count; }); } return rank_column; } } // namespace detail std::unique_ptr<column> rank(column_view const& input, rank_method method, order column_order, null_policy null_handling, null_order null_precedence, bool percentage, rmm::mr::device_memory_resource* mr) { return detail::rank(input, method, column_order, null_handling, null_precedence, percentage, rmm::cuda_stream_default, mr); } } // namespace cudf
1653149f33c164fece4201e08e0bd91dee8f6473.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/sorting.hpp> #include <cudf/sorting.hpp> #include <cudf/table/row_operators.cuh> #include <cudf/table/table.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/table/table_view.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/sequence.h> namespace cudf { namespace detail { namespace { // Functor to identify unique elements in a sorted order table/column template <typename ReturnType, typename Iterator> struct unique_comparator { unique_comparator(table_device_view device_table, Iterator const sorted_order, bool has_nulls) : comparator(nullate::DYNAMIC{has_nulls}, device_table, device_table, null_equality::EQUAL), permute(sorted_order) { } __device__ ReturnType operator()(size_type index) const noexcept { return index == 0 || not comparator(permute[index], permute[index - 1]); }; private: row_equality_comparator<nullate::DYNAMIC> comparator; Iterator const permute; }; // Assign rank from 1 to n unique values. Equal values get same rank value. rmm::device_uvector<size_type> sorted_dense_rank(column_view input_col, column_view sorted_order_view, rmm::cuda_stream_view stream) { auto device_table = table_device_view::create(table_view{{input_col}}, stream); auto const input_size = input_col.size(); rmm::device_uvector<size_type> dense_rank_sorted(input_size, stream); auto sorted_index_order = thrust::make_permutation_iterator( sorted_order_view.begin<size_type>(), thrust::make_counting_iterator<size_type>(0)); auto conv = unique_comparator<size_type, decltype(sorted_index_order)>( *device_table, sorted_index_order, input_col.has_nulls()); auto unique_it = cudf::detail::make_counting_transform_iterator(0, conv); thrust::inclusive_scan( rmm::exec_policy(stream), unique_it, unique_it + input_size, dense_rank_sorted.data()); return dense_rank_sorted; } /** * @brief Breaks the ties among equal value groups using binary operator and * transform this tied value to final rank. * * @param dense_rank dense rank of sorted input column (acts as key for value * groups). * @param tie_iter iterator of rank to break ties among equal value groups. * @param sorted_order_view sorted order indices of input column * @param rank_iter output rank iterator * @param tie_breaker tie breaking operator. For example, maximum & minimum. * @param transformer transform after tie breaking (useful for average). * @param stream CUDA stream used for device memory operations and kernel launches. */ template <typename TieType, typename outputIterator, typename TieBreaker, typename Transformer, typename TieIterator> void tie_break_ranks_transform(cudf::device_span<size_type const> dense_rank_sorted, TieIterator tie_iter, column_view const& sorted_order_view, outputIterator rank_iter, TieBreaker tie_breaker, Transformer transformer, rmm::cuda_stream_view stream) { auto const input_size = sorted_order_view.size(); // algorithm: reduce_by_key(dense_rank, 1, n, reduction_tie_breaker) // reduction_tie_breaker = min, max, min_count rmm::device_uvector<TieType> tie_sorted(sorted_order_view.size(), stream); thrust::reduce_by_key(rmm::exec_policy(stream), dense_rank_sorted.begin(), dense_rank_sorted.end(), tie_iter, thrust::make_discard_iterator(), tie_sorted.begin(), thrust::equal_to{}, tie_breaker); auto sorted_tied_rank = thrust::make_transform_iterator( dense_rank_sorted.begin(), [tied_rank = tie_sorted.begin(), transformer] __device__(auto dense_pos) { return transformer(tied_rank[dense_pos - 1]); }); thrust::scatter(rmm::exec_policy(stream), sorted_tied_rank, sorted_tied_rank + input_size, sorted_order_view.begin<size_type>(), rank_iter); } template <typename outputType> void rank_first(column_view sorted_order_view, mutable_column_view rank_mutable_view, rmm::cuda_stream_view stream) { // stable sort order ranking (no ties) thrust::scatter(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(1), thrust::make_counting_iterator<size_type>(rank_mutable_view.size() + 1), sorted_order_view.begin<size_type>(), rank_mutable_view.begin<outputType>()); } template <typename outputType> void rank_dense(cudf::device_span<size_type const> dense_rank_sorted, column_view sorted_order_view, mutable_column_view rank_mutable_view, rmm::cuda_stream_view stream) { // All equal values have same rank and rank always increases by 1 between groups thrust::scatter(rmm::exec_policy(stream), dense_rank_sorted.begin(), dense_rank_sorted.end(), sorted_order_view.begin<size_type>(), rank_mutable_view.begin<outputType>()); } template <typename outputType> void rank_min(cudf::device_span<size_type const> group_keys, column_view sorted_order_view, mutable_column_view rank_mutable_view, rmm::cuda_stream_view stream) { // min of first in the group // All equal values have min of ranks among them. // algorithm: reduce_by_key(dense_rank, 1, n, min), scatter tie_break_ranks_transform<size_type>(group_keys, thrust::make_counting_iterator<size_type>(1), sorted_order_view, rank_mutable_view.begin<outputType>(), thrust::minimum{}, thrust::identity{}, stream); } template <typename outputType> void rank_max(cudf::device_span<size_type const> group_keys, column_view sorted_order_view, mutable_column_view rank_mutable_view, rmm::cuda_stream_view stream) { // max of first in the group // All equal values have max of ranks among them. // algorithm: reduce_by_key(dense_rank, 1, n, max), scatter tie_break_ranks_transform<size_type>(group_keys, thrust::make_counting_iterator<size_type>(1), sorted_order_view, rank_mutable_view.begin<outputType>(), thrust::maximum{}, thrust::identity{}, stream); } // Returns index, count template <typename T> struct index_counter { __device__ T operator()(size_type i) { return T{i, 1}; } }; void rank_average(cudf::device_span<size_type const> group_keys, column_view sorted_order_view, mutable_column_view rank_mutable_view, rmm::cuda_stream_view stream) { // k, k+1, .. k+n-1 // average = (n*k+ n*(n-1)/2)/n // average = k + (n-1)/2 = min + (count-1)/2 // Calculate Min of ranks and Count of equal values // algorithm: reduce_by_key(dense_rank, 1, n, min_count) // transform(min+(count-1)/2), scatter using MinCount = thrust::pair<size_type, size_type>; tie_break_ranks_transform<MinCount>( group_keys, // Use device functor with return type. Cannot use device lambda due to limitation. // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#extended-lambda-restrictions cudf::detail::make_counting_transform_iterator(1, index_counter<MinCount>{}), sorted_order_view, rank_mutable_view.begin<double>(), [] __device__(auto rank_count1, auto rank_count2) { return MinCount{std::min(rank_count1.first, rank_count2.first), rank_count1.second + rank_count2.second}; }, [] __device__(MinCount minrank_count) { // min+(count-1)/2 return static_cast<double>(thrust::get<0>(minrank_count)) + (static_cast<double>(thrust::get<1>(minrank_count)) - 1) / 2.0; }, stream); } } // anonymous namespace std::unique_ptr<column> rank(column_view const& input, rank_method method, order column_order, null_policy null_handling, null_order null_precedence, bool percentage, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { data_type const output_type = (percentage or method == rank_method::AVERAGE) ? data_type(type_id::FLOAT64) : data_type(type_to_id<size_type>()); std::unique_ptr<column> rank_column = [&null_handling, &output_type, &input, &stream, &mr] { // na_option=keep assign NA to NA values if (null_handling == null_policy::EXCLUDE) return make_numeric_column(output_type, input.size(), detail::copy_bitmask(input, stream, mr), input.null_count(), stream, mr); else return make_numeric_column(output_type, input.size(), mask_state::UNALLOCATED, stream, mr); }(); auto rank_mutable_view = rank_column->mutable_view(); std::unique_ptr<column> sorted_order = (method == rank_method::FIRST) ? detail::stable_sorted_order( table_view{{input}}, {column_order}, {null_precedence}, stream, mr) : detail::sorted_order(table_view{{input}}, {column_order}, {null_precedence}, stream, mr); column_view sorted_order_view = sorted_order->view(); // dense: All equal values have same rank and rank always increases by 1 between groups // acts as key for min, max, average to denote equal value groups rmm::device_uvector<size_type> const dense_rank_sorted = [&method, &input, &sorted_order_view, &stream] { if (method != rank_method::FIRST) return sorted_dense_rank(input, sorted_order_view, stream); else return rmm::device_uvector<size_type>(0, stream); }(); if (output_type.id() == type_id::FLOAT64) { switch (method) { case rank_method::FIRST: rank_first<double>(sorted_order_view, rank_mutable_view, stream); break; case rank_method::DENSE: rank_dense<double>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::MIN: rank_min<double>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::MAX: rank_max<double>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::AVERAGE: rank_average(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; default: CUDF_FAIL("Unexpected rank_method for rank()"); } } else { switch (method) { case rank_method::FIRST: rank_first<size_type>(sorted_order_view, rank_mutable_view, stream); break; case rank_method::DENSE: rank_dense<size_type>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::MIN: rank_min<size_type>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::MAX: rank_max<size_type>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; case rank_method::AVERAGE: rank_average(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream); break; default: CUDF_FAIL("Unexpected rank_method for rank()"); } } // pct inplace transform if (percentage) { auto rank_iter = rank_mutable_view.begin<double>(); size_type const count = (null_handling == null_policy::EXCLUDE) ? input.size() - input.null_count() : input.size(); auto drs = dense_rank_sorted.data(); bool const is_dense = (method == rank_method::DENSE); thrust::transform(rmm::exec_policy(stream), rank_iter, rank_iter + input.size(), rank_iter, [is_dense, drs, count] __device__(double r) -> double { return is_dense ? r / drs[count - 1] : r / count; }); } return rank_column; } } // namespace detail std::unique_ptr<column> rank(column_view const& input, rank_method method, order column_order, null_policy null_handling, null_order null_precedence, bool percentage, rmm::mr::device_memory_resource* mr) { return detail::rank(input, method, column_order, null_handling, null_precedence, percentage, rmm::cuda_stream_default, mr); } } // namespace cudf
c9d17facc90ad85573af1f251bef9e78c0014a3d.hip
// !!! This is a file automatically generated by hipify!!! // random generator includes #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/xor_combine_engine.h> #include <thrust/random.h> #include <hiprand/hiprand_kernel.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/for_each.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> #include <thrust/transform_reduce.h> #include <thrust/binary_search.h> #include <thrust/adjacent_difference.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <map> #include <iostream> #include <cstdlib> #include <cmath> #include <math.h> #include <string> #include <boost/math/tools/roots.hpp> #include <thrust/tuple.h> #include "STE_DataStructures.cuh" #include <vector> // to write 6-vector to screen // __host__ std::ostream& operator<< (std::ostream& os, const float6& p) // { // os << std::setw(15) << "x" << std::setw(15) << "y" << std::setw(15) << "z" << std::endl; // os << std::setw(15) << p.x << std::setw(15) << p.px << std::setw(15) << p.y << std::endl; // os << std::setw(15) << p.py << std::setw(15) << p.t <<std::setw(15) << p.delta << std::endl;; // return os; // }; // // to write 2-vector to screen // __host__ std::ostream& operator<< (std::ostream& os, const float2& p) // { // os << std::setw(21) << p.x << std::setw(21) << p.y; // // os << printf("%.16f",p.x) << "\t" << printf("%.16f",p.y) << std::endl; // // os << printf("%.16f \t %.16f\n",p.x,p.y); // return os; // };
c9d17facc90ad85573af1f251bef9e78c0014a3d.cu
// random generator includes #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/xor_combine_engine.h> #include <thrust/random.h> #include <curand_kernel.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/for_each.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> #include <thrust/transform_reduce.h> #include <thrust/binary_search.h> #include <thrust/adjacent_difference.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <map> #include <iostream> #include <cstdlib> #include <cmath> #include <math.h> #include <string> #include <boost/math/tools/roots.hpp> #include <thrust/tuple.h> #include "STE_DataStructures.cuh" #include <vector> // to write 6-vector to screen // __host__ std::ostream& operator<< (std::ostream& os, const float6& p) // { // os << std::setw(15) << "x" << std::setw(15) << "y" << std::setw(15) << "z" << std::endl; // os << std::setw(15) << p.x << std::setw(15) << p.px << std::setw(15) << p.y << std::endl; // os << std::setw(15) << p.py << std::setw(15) << p.t <<std::setw(15) << p.delta << std::endl;; // return os; // }; // // to write 2-vector to screen // __host__ std::ostream& operator<< (std::ostream& os, const float2& p) // { // os << std::setw(21) << p.x << std::setw(21) << p.y; // // os << printf("%.16f",p.x) << "\t" << printf("%.16f",p.y) << std::endl; // // os << printf("%.16f \t %.16f\n",p.x,p.y); // return os; // };
514cdca217ba7926c4fc7080aedbb4cabfb8e21b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #include <thrust/sort.h> #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/utility.hpp" #include "opencv2/gpu/device/functional.hpp" namespace cv { namespace gpu { namespace device { namespace orb { //////////////////////////////////////////////////////////////////////////////////////////////////////// // cull int cull_gpu(int* loc, float* response, int size, int n_points) { thrust::device_ptr<int> loc_ptr(loc); thrust::device_ptr<float> response_ptr(response); thrust::sort_by_key(response_ptr, response_ptr + size, loc_ptr, thrust::greater<float>()); return n_points; } //////////////////////////////////////////////////////////////////////////////////////////////////////// // HarrisResponses __global__ void HarrisResponses(const PtrStepb img, const short2* loc_, float* response, const int npoints, const int blockSize, const float harris_k) { __shared__ int smem[8 * 32]; volatile int* srow = smem + threadIdx.y * blockDim.x; const int ptidx = blockIdx.x * blockDim.y + threadIdx.y; if (ptidx < npoints) { const short2 loc = loc_[ptidx]; const int r = blockSize / 2; const int x0 = loc.x - r; const int y0 = loc.y - r; int a = 0, b = 0, c = 0; for (int ind = threadIdx.x; ind < blockSize * blockSize; ind += blockDim.x) { const int i = ind / blockSize; const int j = ind % blockSize; int Ix = (img(y0 + i, x0 + j + 1) - img(y0 + i, x0 + j - 1)) * 2 + (img(y0 + i - 1, x0 + j + 1) - img(y0 + i - 1, x0 + j - 1)) + (img(y0 + i + 1, x0 + j + 1) - img(y0 + i + 1, x0 + j - 1)); int Iy = (img(y0 + i + 1, x0 + j) - img(y0 + i - 1, x0 + j)) * 2 + (img(y0 + i + 1, x0 + j - 1) - img(y0 + i - 1, x0 + j - 1)) + (img(y0 + i + 1, x0 + j + 1) - img(y0 + i - 1, x0 + j + 1)); a += Ix * Ix; b += Iy * Iy; c += Ix * Iy; } reduce<32>(srow, a, threadIdx.x, plus<volatile int>()); reduce<32>(srow, b, threadIdx.x, plus<volatile int>()); reduce<32>(srow, c, threadIdx.x, plus<volatile int>()); if (threadIdx.x == 0) { float scale = (1 << 2) * blockSize * 255.0f; scale = 1.0f / scale; const float scale_sq_sq = scale * scale * scale * scale; response[ptidx] = ((float)a * b - (float)c * c - harris_k * ((float)a + b) * ((float)a + b)) * scale_sq_sq; } } } void HarrisResponses_gpu(DevMem2Db img, const short2* loc, float* response, const int npoints, int blockSize, float harris_k, hipStream_t stream) { dim3 block(32, 8); dim3 grid; grid.x = divUp(npoints, block.y); hipLaunchKernelGGL(( HarrisResponses), dim3(grid), dim3(block), 0, stream, img, loc, response, npoints, blockSize, harris_k); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////////////////////////////////////// // IC_Angle __constant__ int c_u_max[32]; void loadUMax(const int* u_max, int count) { cudaSafeCall( hipMemcpyToSymbol(c_u_max, u_max, count * sizeof(int)) ); } __global__ void IC_Angle(const PtrStepb image, const short2* loc_, float* angle, const int npoints, const int half_k) { __shared__ int smem[8 * 32]; volatile int* srow = smem + threadIdx.y * blockDim.x; const int ptidx = blockIdx.x * blockDim.y + threadIdx.y; if (ptidx < npoints) { int m_01 = 0, m_10 = 0; const short2 loc = loc_[ptidx]; // Treat the center line differently, v=0 for (int u = threadIdx.x - half_k; u <= half_k; u += blockDim.x) m_10 += u * image(loc.y, loc.x + u); reduce<32>(srow, m_10, threadIdx.x, plus<volatile int>()); for (int v = 1; v <= half_k; ++v) { // Proceed over the two lines int v_sum = 0; int m_sum = 0; const int d = c_u_max[v]; for (int u = threadIdx.x - d; u <= d; u += blockDim.x) { int val_plus = image(loc.y + v, loc.x + u); int val_minus = image(loc.y - v, loc.x + u); v_sum += (val_plus - val_minus); m_sum += u * (val_plus + val_minus); } reduce<32>(srow, v_sum, threadIdx.x, plus<volatile int>()); reduce<32>(srow, m_sum, threadIdx.x, plus<volatile int>()); m_10 += m_sum; m_01 += v * v_sum; } if (threadIdx.x == 0) { float kp_dir = ::atan2f((float)m_01, (float)m_10); kp_dir += (kp_dir < 0) * (2.0f * CV_PI); kp_dir *= 180.0f / CV_PI; angle[ptidx] = kp_dir; } } } void IC_Angle_gpu(DevMem2Db image, const short2* loc, float* angle, int npoints, int half_k, hipStream_t stream) { dim3 block(32, 8); dim3 grid; grid.x = divUp(npoints, block.y); hipLaunchKernelGGL(( IC_Angle), dim3(grid), dim3(block), 0, stream, image, loc, angle, npoints, half_k); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////////////////////////////////////// // computeOrbDescriptor template <int WTA_K> struct OrbDescriptor; #define GET_VALUE(idx) \ img(loc.y + __float2int_rn(pattern_x[idx] * sina + pattern_y[idx] * cosa), \ loc.x + __float2int_rn(pattern_x[idx] * cosa - pattern_y[idx] * sina)) template <> struct OrbDescriptor<2> { __device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i) { pattern_x += 16 * i; pattern_y += 16 * i; int t0, t1, val; t0 = GET_VALUE(0); t1 = GET_VALUE(1); val = t0 < t1; t0 = GET_VALUE(2); t1 = GET_VALUE(3); val |= (t0 < t1) << 1; t0 = GET_VALUE(4); t1 = GET_VALUE(5); val |= (t0 < t1) << 2; t0 = GET_VALUE(6); t1 = GET_VALUE(7); val |= (t0 < t1) << 3; t0 = GET_VALUE(8); t1 = GET_VALUE(9); val |= (t0 < t1) << 4; t0 = GET_VALUE(10); t1 = GET_VALUE(11); val |= (t0 < t1) << 5; t0 = GET_VALUE(12); t1 = GET_VALUE(13); val |= (t0 < t1) << 6; t0 = GET_VALUE(14); t1 = GET_VALUE(15); val |= (t0 < t1) << 7; return val; } }; template <> struct OrbDescriptor<3> { __device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i) { pattern_x += 12 * i; pattern_y += 12 * i; int t0, t1, t2, val; t0 = GET_VALUE(0); t1 = GET_VALUE(1); t2 = GET_VALUE(2); val = t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0); t0 = GET_VALUE(3); t1 = GET_VALUE(4); t2 = GET_VALUE(5); val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 2; t0 = GET_VALUE(6); t1 = GET_VALUE(7); t2 = GET_VALUE(8); val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 4; t0 = GET_VALUE(9); t1 = GET_VALUE(10); t2 = GET_VALUE(11); val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 6; return val; } }; template <> struct OrbDescriptor<4> { __device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i) { pattern_x += 16 * i; pattern_y += 16 * i; int t0, t1, t2, t3, k, val; int a, b; t0 = GET_VALUE(0); t1 = GET_VALUE(1); t2 = GET_VALUE(2); t3 = GET_VALUE(3); a = 0, b = 2; if( t1 > t0 ) t0 = t1, a = 1; if( t3 > t2 ) t2 = t3, b = 3; k = t0 > t2 ? a : b; val = k; t0 = GET_VALUE(4); t1 = GET_VALUE(5); t2 = GET_VALUE(6); t3 = GET_VALUE(7); a = 0, b = 2; if( t1 > t0 ) t0 = t1, a = 1; if( t3 > t2 ) t2 = t3, b = 3; k = t0 > t2 ? a : b; val |= k << 2; t0 = GET_VALUE(8); t1 = GET_VALUE(9); t2 = GET_VALUE(10); t3 = GET_VALUE(11); a = 0, b = 2; if( t1 > t0 ) t0 = t1, a = 1; if( t3 > t2 ) t2 = t3, b = 3; k = t0 > t2 ? a : b; val |= k << 4; t0 = GET_VALUE(12); t1 = GET_VALUE(13); t2 = GET_VALUE(14); t3 = GET_VALUE(15); a = 0, b = 2; if( t1 > t0 ) t0 = t1, a = 1; if( t3 > t2 ) t2 = t3, b = 3; k = t0 > t2 ? a : b; val |= k << 6; return val; } }; #undef GET_VALUE template <int WTA_K> __global__ void computeOrbDescriptor(const PtrStepb img, const short2* loc, const float* angle_, const int npoints, const int* pattern_x, const int* pattern_y, PtrStepb desc, int dsize) { const int descidx = blockIdx.x * blockDim.x + threadIdx.x; const int ptidx = blockIdx.y * blockDim.y + threadIdx.y; if (ptidx < npoints && descidx < dsize) { float angle = angle_[ptidx]; angle *= (float)(CV_PI / 180.f); float sina, cosa; ::sincosf(angle, &sina, &cosa); desc.ptr(ptidx)[descidx] = OrbDescriptor<WTA_K>::calc(img, loc[ptidx], pattern_x, pattern_y, sina, cosa, descidx); } } void computeOrbDescriptor_gpu(PtrStepb img, const short2* loc, const float* angle, const int npoints, const int* pattern_x, const int* pattern_y, PtrStepb desc, int dsize, int WTA_K, hipStream_t stream) { dim3 block(32, 8); dim3 grid; grid.x = divUp(dsize, block.x); grid.y = divUp(npoints, block.y); switch (WTA_K) { case 2: hipLaunchKernelGGL(( computeOrbDescriptor<2>), dim3(grid), dim3(block), 0, stream, img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize); break; case 3: hipLaunchKernelGGL(( computeOrbDescriptor<3>), dim3(grid), dim3(block), 0, stream, img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize); break; case 4: hipLaunchKernelGGL(( computeOrbDescriptor<4>), dim3(grid), dim3(block), 0, stream, img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize); break; } cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////////////////////////////////////// // mergeLocation __global__ void mergeLocation(const short2* loc_, float* x, float* y, const int npoints, float scale) { const int ptidx = blockIdx.x * blockDim.x + threadIdx.x; if (ptidx < npoints) { short2 loc = loc_[ptidx]; x[ptidx] = loc.x * scale; y[ptidx] = loc.y * scale; } } void mergeLocation_gpu(const short2* loc, float* x, float* y, int npoints, float scale, hipStream_t stream) { dim3 block(256); dim3 grid; grid.x = divUp(npoints, block.x); hipLaunchKernelGGL(( mergeLocation), dim3(grid), dim3(block), 0, stream, loc, x, y, npoints, scale); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } } }}}
514cdca217ba7926c4fc7080aedbb4cabfb8e21b.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #include <thrust/sort.h> #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/utility.hpp" #include "opencv2/gpu/device/functional.hpp" namespace cv { namespace gpu { namespace device { namespace orb { //////////////////////////////////////////////////////////////////////////////////////////////////////// // cull int cull_gpu(int* loc, float* response, int size, int n_points) { thrust::device_ptr<int> loc_ptr(loc); thrust::device_ptr<float> response_ptr(response); thrust::sort_by_key(response_ptr, response_ptr + size, loc_ptr, thrust::greater<float>()); return n_points; } //////////////////////////////////////////////////////////////////////////////////////////////////////// // HarrisResponses __global__ void HarrisResponses(const PtrStepb img, const short2* loc_, float* response, const int npoints, const int blockSize, const float harris_k) { __shared__ int smem[8 * 32]; volatile int* srow = smem + threadIdx.y * blockDim.x; const int ptidx = blockIdx.x * blockDim.y + threadIdx.y; if (ptidx < npoints) { const short2 loc = loc_[ptidx]; const int r = blockSize / 2; const int x0 = loc.x - r; const int y0 = loc.y - r; int a = 0, b = 0, c = 0; for (int ind = threadIdx.x; ind < blockSize * blockSize; ind += blockDim.x) { const int i = ind / blockSize; const int j = ind % blockSize; int Ix = (img(y0 + i, x0 + j + 1) - img(y0 + i, x0 + j - 1)) * 2 + (img(y0 + i - 1, x0 + j + 1) - img(y0 + i - 1, x0 + j - 1)) + (img(y0 + i + 1, x0 + j + 1) - img(y0 + i + 1, x0 + j - 1)); int Iy = (img(y0 + i + 1, x0 + j) - img(y0 + i - 1, x0 + j)) * 2 + (img(y0 + i + 1, x0 + j - 1) - img(y0 + i - 1, x0 + j - 1)) + (img(y0 + i + 1, x0 + j + 1) - img(y0 + i - 1, x0 + j + 1)); a += Ix * Ix; b += Iy * Iy; c += Ix * Iy; } reduce<32>(srow, a, threadIdx.x, plus<volatile int>()); reduce<32>(srow, b, threadIdx.x, plus<volatile int>()); reduce<32>(srow, c, threadIdx.x, plus<volatile int>()); if (threadIdx.x == 0) { float scale = (1 << 2) * blockSize * 255.0f; scale = 1.0f / scale; const float scale_sq_sq = scale * scale * scale * scale; response[ptidx] = ((float)a * b - (float)c * c - harris_k * ((float)a + b) * ((float)a + b)) * scale_sq_sq; } } } void HarrisResponses_gpu(DevMem2Db img, const short2* loc, float* response, const int npoints, int blockSize, float harris_k, cudaStream_t stream) { dim3 block(32, 8); dim3 grid; grid.x = divUp(npoints, block.y); HarrisResponses<<<grid, block, 0, stream>>>(img, loc, response, npoints, blockSize, harris_k); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////////////////////////////////////// // IC_Angle __constant__ int c_u_max[32]; void loadUMax(const int* u_max, int count) { cudaSafeCall( cudaMemcpyToSymbol(c_u_max, u_max, count * sizeof(int)) ); } __global__ void IC_Angle(const PtrStepb image, const short2* loc_, float* angle, const int npoints, const int half_k) { __shared__ int smem[8 * 32]; volatile int* srow = smem + threadIdx.y * blockDim.x; const int ptidx = blockIdx.x * blockDim.y + threadIdx.y; if (ptidx < npoints) { int m_01 = 0, m_10 = 0; const short2 loc = loc_[ptidx]; // Treat the center line differently, v=0 for (int u = threadIdx.x - half_k; u <= half_k; u += blockDim.x) m_10 += u * image(loc.y, loc.x + u); reduce<32>(srow, m_10, threadIdx.x, plus<volatile int>()); for (int v = 1; v <= half_k; ++v) { // Proceed over the two lines int v_sum = 0; int m_sum = 0; const int d = c_u_max[v]; for (int u = threadIdx.x - d; u <= d; u += blockDim.x) { int val_plus = image(loc.y + v, loc.x + u); int val_minus = image(loc.y - v, loc.x + u); v_sum += (val_plus - val_minus); m_sum += u * (val_plus + val_minus); } reduce<32>(srow, v_sum, threadIdx.x, plus<volatile int>()); reduce<32>(srow, m_sum, threadIdx.x, plus<volatile int>()); m_10 += m_sum; m_01 += v * v_sum; } if (threadIdx.x == 0) { float kp_dir = ::atan2f((float)m_01, (float)m_10); kp_dir += (kp_dir < 0) * (2.0f * CV_PI); kp_dir *= 180.0f / CV_PI; angle[ptidx] = kp_dir; } } } void IC_Angle_gpu(DevMem2Db image, const short2* loc, float* angle, int npoints, int half_k, cudaStream_t stream) { dim3 block(32, 8); dim3 grid; grid.x = divUp(npoints, block.y); IC_Angle<<<grid, block, 0, stream>>>(image, loc, angle, npoints, half_k); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////////////////////////////////////// // computeOrbDescriptor template <int WTA_K> struct OrbDescriptor; #define GET_VALUE(idx) \ img(loc.y + __float2int_rn(pattern_x[idx] * sina + pattern_y[idx] * cosa), \ loc.x + __float2int_rn(pattern_x[idx] * cosa - pattern_y[idx] * sina)) template <> struct OrbDescriptor<2> { __device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i) { pattern_x += 16 * i; pattern_y += 16 * i; int t0, t1, val; t0 = GET_VALUE(0); t1 = GET_VALUE(1); val = t0 < t1; t0 = GET_VALUE(2); t1 = GET_VALUE(3); val |= (t0 < t1) << 1; t0 = GET_VALUE(4); t1 = GET_VALUE(5); val |= (t0 < t1) << 2; t0 = GET_VALUE(6); t1 = GET_VALUE(7); val |= (t0 < t1) << 3; t0 = GET_VALUE(8); t1 = GET_VALUE(9); val |= (t0 < t1) << 4; t0 = GET_VALUE(10); t1 = GET_VALUE(11); val |= (t0 < t1) << 5; t0 = GET_VALUE(12); t1 = GET_VALUE(13); val |= (t0 < t1) << 6; t0 = GET_VALUE(14); t1 = GET_VALUE(15); val |= (t0 < t1) << 7; return val; } }; template <> struct OrbDescriptor<3> { __device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i) { pattern_x += 12 * i; pattern_y += 12 * i; int t0, t1, t2, val; t0 = GET_VALUE(0); t1 = GET_VALUE(1); t2 = GET_VALUE(2); val = t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0); t0 = GET_VALUE(3); t1 = GET_VALUE(4); t2 = GET_VALUE(5); val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 2; t0 = GET_VALUE(6); t1 = GET_VALUE(7); t2 = GET_VALUE(8); val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 4; t0 = GET_VALUE(9); t1 = GET_VALUE(10); t2 = GET_VALUE(11); val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 6; return val; } }; template <> struct OrbDescriptor<4> { __device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i) { pattern_x += 16 * i; pattern_y += 16 * i; int t0, t1, t2, t3, k, val; int a, b; t0 = GET_VALUE(0); t1 = GET_VALUE(1); t2 = GET_VALUE(2); t3 = GET_VALUE(3); a = 0, b = 2; if( t1 > t0 ) t0 = t1, a = 1; if( t3 > t2 ) t2 = t3, b = 3; k = t0 > t2 ? a : b; val = k; t0 = GET_VALUE(4); t1 = GET_VALUE(5); t2 = GET_VALUE(6); t3 = GET_VALUE(7); a = 0, b = 2; if( t1 > t0 ) t0 = t1, a = 1; if( t3 > t2 ) t2 = t3, b = 3; k = t0 > t2 ? a : b; val |= k << 2; t0 = GET_VALUE(8); t1 = GET_VALUE(9); t2 = GET_VALUE(10); t3 = GET_VALUE(11); a = 0, b = 2; if( t1 > t0 ) t0 = t1, a = 1; if( t3 > t2 ) t2 = t3, b = 3; k = t0 > t2 ? a : b; val |= k << 4; t0 = GET_VALUE(12); t1 = GET_VALUE(13); t2 = GET_VALUE(14); t3 = GET_VALUE(15); a = 0, b = 2; if( t1 > t0 ) t0 = t1, a = 1; if( t3 > t2 ) t2 = t3, b = 3; k = t0 > t2 ? a : b; val |= k << 6; return val; } }; #undef GET_VALUE template <int WTA_K> __global__ void computeOrbDescriptor(const PtrStepb img, const short2* loc, const float* angle_, const int npoints, const int* pattern_x, const int* pattern_y, PtrStepb desc, int dsize) { const int descidx = blockIdx.x * blockDim.x + threadIdx.x; const int ptidx = blockIdx.y * blockDim.y + threadIdx.y; if (ptidx < npoints && descidx < dsize) { float angle = angle_[ptidx]; angle *= (float)(CV_PI / 180.f); float sina, cosa; ::sincosf(angle, &sina, &cosa); desc.ptr(ptidx)[descidx] = OrbDescriptor<WTA_K>::calc(img, loc[ptidx], pattern_x, pattern_y, sina, cosa, descidx); } } void computeOrbDescriptor_gpu(PtrStepb img, const short2* loc, const float* angle, const int npoints, const int* pattern_x, const int* pattern_y, PtrStepb desc, int dsize, int WTA_K, cudaStream_t stream) { dim3 block(32, 8); dim3 grid; grid.x = divUp(dsize, block.x); grid.y = divUp(npoints, block.y); switch (WTA_K) { case 2: computeOrbDescriptor<2><<<grid, block, 0, stream>>>(img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize); break; case 3: computeOrbDescriptor<3><<<grid, block, 0, stream>>>(img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize); break; case 4: computeOrbDescriptor<4><<<grid, block, 0, stream>>>(img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize); break; } cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////////////////////////////////////// // mergeLocation __global__ void mergeLocation(const short2* loc_, float* x, float* y, const int npoints, float scale) { const int ptidx = blockIdx.x * blockDim.x + threadIdx.x; if (ptidx < npoints) { short2 loc = loc_[ptidx]; x[ptidx] = loc.x * scale; y[ptidx] = loc.y * scale; } } void mergeLocation_gpu(const short2* loc, float* x, float* y, int npoints, float scale, cudaStream_t stream) { dim3 block(256); dim3 grid; grid.x = divUp(npoints, block.x); mergeLocation<<<grid, block, 0, stream>>>(loc, x, y, npoints, scale); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } }}}
5f140d5ddd356c21a0b46c06b92ee100b5ffecb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_2_front; int xdim0_update_halo_kernel4_plus_2_front_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_2_front; int ydim0_update_halo_kernel4_plus_2_front_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_2_front; int xdim1_update_halo_kernel4_plus_2_front_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_2_front; int ydim1_update_halo_kernel4_plus_2_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel4_plus_2_front * (y) + \ xdim0_update_halo_kernel4_plus_2_front * \ ydim0_update_halo_kernel4_plus_2_front * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel4_plus_2_front * (y) + \ xdim1_update_halo_kernel4_plus_2_front * \ ydim1_update_halo_kernel4_plus_2_front * (z)) // user function __device__ inline void update_halo_kernel4_plus_2_front_gpu(double *vol_flux_y, double *mass_flux_y, const int *fields) { if (fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(0, 0, -2)]; if (fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(0, 0, -2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_2_front(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_2_front + idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_2_front * ydim0_update_halo_kernel4_plus_2_front; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_2_front + idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_2_front * ydim1_update_halo_kernel4_plus_2_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_2_front_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel4_plus_2_front(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 128)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(128, "update_halo_kernel4_plus_2_front"); OPS_kernels[128].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_2_front_h || ydim0 != ydim0_update_halo_kernel4_plus_2_front_h || xdim1 != xdim1_update_halo_kernel4_plus_2_front_h || ydim1 != ydim1_update_halo_kernel4_plus_2_front_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel4_plus_2_front, &xdim0, sizeof(int)); xdim0_update_halo_kernel4_plus_2_front_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel4_plus_2_front, &ydim0, sizeof(int)); ydim0_update_halo_kernel4_plus_2_front_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel4_plus_2_front, &xdim1, sizeof(int)); xdim1_update_halo_kernel4_plus_2_front_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel4_plus_2_front, &ydim1, sizeof(int)); ydim1_update_halo_kernel4_plus_2_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[128].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_front), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[128].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[128].mpi_time += t2 - t1; OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
5f140d5ddd356c21a0b46c06b92ee100b5ffecb9.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_2_front; int xdim0_update_halo_kernel4_plus_2_front_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_2_front; int ydim0_update_halo_kernel4_plus_2_front_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_2_front; int xdim1_update_halo_kernel4_plus_2_front_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_2_front; int ydim1_update_halo_kernel4_plus_2_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel4_plus_2_front * (y) + \ xdim0_update_halo_kernel4_plus_2_front * \ ydim0_update_halo_kernel4_plus_2_front * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel4_plus_2_front * (y) + \ xdim1_update_halo_kernel4_plus_2_front * \ ydim1_update_halo_kernel4_plus_2_front * (z)) // user function __device__ inline void update_halo_kernel4_plus_2_front_gpu(double *vol_flux_y, double *mass_flux_y, const int *fields) { if (fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(0, 0, -2)]; if (fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(0, 0, -2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_2_front(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_2_front + idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_2_front * ydim0_update_halo_kernel4_plus_2_front; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_2_front + idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_2_front * ydim1_update_halo_kernel4_plus_2_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_2_front_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel4_plus_2_front(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 128)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(128, "update_halo_kernel4_plus_2_front"); OPS_kernels[128].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_2_front_h || ydim0 != ydim0_update_halo_kernel4_plus_2_front_h || xdim1 != xdim1_update_halo_kernel4_plus_2_front_h || ydim1 != ydim1_update_halo_kernel4_plus_2_front_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel4_plus_2_front, &xdim0, sizeof(int)); xdim0_update_halo_kernel4_plus_2_front_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel4_plus_2_front, &ydim0, sizeof(int)); ydim0_update_halo_kernel4_plus_2_front_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel4_plus_2_front, &xdim1, sizeof(int)); xdim1_update_halo_kernel4_plus_2_front_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel4_plus_2_front, &ydim1, sizeof(int)); ydim1_update_halo_kernel4_plus_2_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[128].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel4_plus_2_front<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[128].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[128].mpi_time += t2 - t1; OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
2fc9c54169667c620b12cdb6d77004ed2fb76da4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "RiemannFitOnGPU.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" template <typename TrackerTraits> void HelixFitOnGPU<TrackerTraits>::launchRiemannKernels(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples, hipStream_t stream) { assert(tuples_); auto blockSize = 64; auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; // Fit internals auto hitsGPU = cms::cuda::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix3xNd<4>) / sizeof(double), stream); auto hits_geGPU = cms::cuda::make_device_unique<float[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix6x4f) / sizeof(float), stream); auto fast_fit_resultsGPU = cms::cuda::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Vector4d) / sizeof(double), stream); auto circle_fit_resultsGPU_holder = cms::cuda::make_device_unique<char[]>(maxNumberOfConcurrentFits_ * sizeof(riemannFit::CircleFit), stream); riemannFit::CircleFit *circle_fit_resultsGPU_ = (riemannFit::CircleFit *)(circle_fit_resultsGPU_holder.get()); for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { // triplets hipLaunchKernelGGL(( kernel_FastFit<3, TrackerTraits>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tuples_, tupleMultiplicity_, 3, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_CircleFit<3, TrackerTraits>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_, 3, bField_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_LineFit<3, TrackerTraits>), dim3(numberOfBlocks), dim3(blockSize), 0, stream, tupleMultiplicity_, 3, bField_, outputSoa_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); // quads hipLaunchKernelGGL(( kernel_FastFit<4, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_, tupleMultiplicity_, 4, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_CircleFit<4, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_, 4, bField_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_LineFit<4, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_, 4, bField_, outputSoa_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); if (fitNas4_) { // penta hipLaunchKernelGGL(( kernel_FastFit<4, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_, tupleMultiplicity_, 5, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_CircleFit<4, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_, 5, bField_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_LineFit<4, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_, 5, bField_, outputSoa_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); } else { // penta all 5 hipLaunchKernelGGL(( kernel_FastFit<5, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tuples_, tupleMultiplicity_, 5, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_CircleFit<5, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_, 5, bField_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( kernel_LineFit<5, TrackerTraits>), dim3(numberOfBlocks / 4), dim3(blockSize), 0, stream, tupleMultiplicity_, 5, bField_, outputSoa_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(hipGetLastError()); } } } template class HelixFitOnGPU<pixelTopology::Phase1>; template class HelixFitOnGPU<pixelTopology::Phase2>;
2fc9c54169667c620b12cdb6d77004ed2fb76da4.cu
#include "RiemannFitOnGPU.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" template <typename TrackerTraits> void HelixFitOnGPU<TrackerTraits>::launchRiemannKernels(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples, cudaStream_t stream) { assert(tuples_); auto blockSize = 64; auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; // Fit internals auto hitsGPU = cms::cuda::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix3xNd<4>) / sizeof(double), stream); auto hits_geGPU = cms::cuda::make_device_unique<float[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix6x4f) / sizeof(float), stream); auto fast_fit_resultsGPU = cms::cuda::make_device_unique<double[]>( maxNumberOfConcurrentFits_ * sizeof(riemannFit::Vector4d) / sizeof(double), stream); auto circle_fit_resultsGPU_holder = cms::cuda::make_device_unique<char[]>(maxNumberOfConcurrentFits_ * sizeof(riemannFit::CircleFit), stream); riemannFit::CircleFit *circle_fit_resultsGPU_ = (riemannFit::CircleFit *)(circle_fit_resultsGPU_holder.get()); for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { // triplets kernel_FastFit<3, TrackerTraits><<<numberOfBlocks, blockSize, 0, stream>>>( tuples_, tupleMultiplicity_, 3, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset); cudaCheck(cudaGetLastError()); kernel_CircleFit<3, TrackerTraits><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_, 3, bField_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(cudaGetLastError()); kernel_LineFit<3, TrackerTraits><<<numberOfBlocks, blockSize, 0, stream>>>(tupleMultiplicity_, 3, bField_, outputSoa_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(cudaGetLastError()); // quads kernel_FastFit<4, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_, tupleMultiplicity_, 4, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset); cudaCheck(cudaGetLastError()); kernel_CircleFit<4, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_, 4, bField_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(cudaGetLastError()); kernel_LineFit<4, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_, 4, bField_, outputSoa_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(cudaGetLastError()); if (fitNas4_) { // penta kernel_FastFit<4, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_, tupleMultiplicity_, 5, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset); cudaCheck(cudaGetLastError()); kernel_CircleFit<4, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_, 5, bField_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(cudaGetLastError()); kernel_LineFit<4, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_, 5, bField_, outputSoa_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(cudaGetLastError()); } else { // penta all 5 kernel_FastFit<5, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>( tuples_, tupleMultiplicity_, 5, hv, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), offset); cudaCheck(cudaGetLastError()); kernel_CircleFit<5, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_, 5, bField_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(cudaGetLastError()); kernel_LineFit<5, TrackerTraits><<<numberOfBlocks / 4, blockSize, 0, stream>>>(tupleMultiplicity_, 5, bField_, outputSoa_, hitsGPU.get(), hits_geGPU.get(), fast_fit_resultsGPU.get(), circle_fit_resultsGPU_, offset); cudaCheck(cudaGetLastError()); } } } template class HelixFitOnGPU<pixelTopology::Phase1>; template class HelixFitOnGPU<pixelTopology::Phase2>;
3b10cc323d8c8d81276f9f6807a50cdb6cdc5105.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples #include "../include/ContAcq-IntClk.h" //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 240 #define ITERATIONS 30 // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal1(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=A[i]; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; // exponential function for(unsigned k=0; k<ITERATIONS*(blockDim.x/blockDim.x+50);k++) { Value2=exp(Value1); Value3=exp(Value2); Value1=exp(Value3); Value2=exp(Value1); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=A[i]; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; //sinusoidal functions for(unsigned k=0; k<ITERATIONS*(blockDim.x/blockDim.x+50);k++) { Value2=cos(Value1); Value3=sin(Value2); Value2=cos(Value1); Value1=sin(Value2); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernal3(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=0; float Value2=99999; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; //square root for(unsigned long k=0; k<ITERATIONS*(blockDim.x/blockDim.x+100);k++) { Value1=Value2*Value2; Value1=sqrt(abs(Value1)); Value2=sqrt(abs(I2))*sqrt(abs(I2)); Value3=sqrt(abs(Value2)); Value2=sqrt(abs(Value1)); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernalEmpty(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; __syncthreads(); // Excessive Mod/Div Operations for(unsigned long k=0; k<ITERATIONS*(blockDim.x+299);k++) { Value1=(I1)+k; Value2=(I2)+k; Value3=(Value2)+k; Value2=(Value1)+k; __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); } C[i]=I1; __syncthreads(); } __global__ void PowerKernal4(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=0; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; // logarithmic for(unsigned k=0; k<ITERATIONS*(blockDim.x+50);k++) { Value1=log2((I1)); Value2=log2((I2)); Value3=log2((Value2)); Value2=log2((Value1)); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } // Host code int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before malloc in GPU0\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); printf("before malloc in GPU1\n"); checkCudaErrors( hipMalloc((void**)&d_B, size) ); printf("before malloc in GPU2\n"); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after malloc in GPU\n"); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); //sleep(0.5); dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; hipLaunchKernelGGL(( PowerKernal3), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N); //sleep(0.5); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; hipLaunchKernelGGL(( PowerKernal4), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; }
3b10cc323d8c8d81276f9f6807a50cdb6cdc5105.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples #include "../include/ContAcq-IntClk.h" //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 240 #define ITERATIONS 30 // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal1(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=A[i]; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; // exponential function for(unsigned k=0; k<ITERATIONS*(blockDim.x/blockDim.x+50);k++) { Value2=exp(Value1); Value3=exp(Value2); Value1=exp(Value3); Value2=exp(Value1); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=A[i]; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; //sinusoidal functions for(unsigned k=0; k<ITERATIONS*(blockDim.x/blockDim.x+50);k++) { Value2=cos(Value1); Value3=sin(Value2); Value2=cos(Value1); Value1=sin(Value2); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernal3(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=0; float Value2=99999; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; //square root for(unsigned long k=0; k<ITERATIONS*(blockDim.x/blockDim.x+100);k++) { Value1=Value2*Value2; Value1=sqrt(abs(Value1)); Value2=sqrt(abs(I2))*sqrt(abs(I2)); Value3=sqrt(abs(Value2)); Value2=sqrt(abs(Value1)); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernalEmpty(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; __syncthreads(); // Excessive Mod/Div Operations for(unsigned long k=0; k<ITERATIONS*(blockDim.x+299);k++) { Value1=(I1)+k; Value2=(I2)+k; Value3=(Value2)+k; Value2=(Value1)+k; __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); } C[i]=I1; __syncthreads(); } __global__ void PowerKernal4(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=0; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; // logarithmic for(unsigned k=0; k<ITERATIONS*(blockDim.x+50);k++) { Value1=log2((I1)); Value2=log2((I2)); Value3=log2((Value2)); Value2=log2((Value1)); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } // Host code int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before malloc in GPU0\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); printf("before malloc in GPU1\n"); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); printf("before malloc in GPU2\n"); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after malloc in GPU\n"); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); //sleep(0.5); dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; PowerKernal3<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N); //sleep(0.5); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; PowerKernal4<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; }
721db38dd49b0ec1a3049dfde18cd22bc6bd0c1b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //--blockDim=4 --gridDim=1 /* * This kernel suffers from barrier divergence. * Can you see why? */ __global__ void inloop(/* no inputs or outputs in this illustrative example */) { __shared__ int A[2][4]; int buf, i, j; int tid = threadIdx.x; int x = tid == 0 ? 4 : 1; int y = tid == 0 ? 1 : 4; buf = 0; for(int i = 0; i < x; i++) { for(int j = 0; j < y; j++) { __syncthreads(); A[1-buf][tid] = A[buf][(tid+1)%4]; buf = 1 - buf; } } }
721db38dd49b0ec1a3049dfde18cd22bc6bd0c1b.cu
//--blockDim=4 --gridDim=1 /* * This kernel suffers from barrier divergence. * Can you see why? */ __global__ void inloop(/* no inputs or outputs in this illustrative example */) { __shared__ int A[2][4]; int buf, i, j; int tid = threadIdx.x; int x = tid == 0 ? 4 : 1; int y = tid == 0 ? 1 : 4; buf = 0; for(int i = 0; i < x; i++) { for(int j = 0; j < y; j++) { __syncthreads(); A[1-buf][tid] = A[buf][(tid+1)%4]; buf = 1 - buf; } } }
9614725854c767e64fa4ea72e97f8ec8721e17a2.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #define NUM_BLOCKS 1024 __global__ void primecuda( int *dev_num, int *arr) { int blckid = blockIdx.x; //Handle the data at the block index int tid = threadIdx.x; int gid = 1+(*dev_num/1024); int bid = (blckid*gid) + tid; int flag, i; flag = 0; if(bid <= *dev_num) { if(bid % 2 != 0) { for (i = 3 ; i<= sqrtf(bid); i++) { if(bid % i == 0) { flag = 1; break; } } if (flag == 0) { if (bid == 0 || bid == 1 || bid == 2) { //Do nothing } else { arr[bid] = bid; } } } } } int main(int argc, char **argv) { if(strcmp("-t",argv[1])!=0) { printf("Error\n"); return 0; } int *dev_num; int num,count,biggest; num = atoi(argv[2]); int i; int *dev_arr; int arr[num]; int NUM_THREADS; clock_t begin, end; begin = clock(); for(i = 0;i<num;i++) { arr[i] = 0; } NUM_THREADS = 1+(num/NUM_BLOCKS); count = 1; biggest = 2; hipMalloc ( (void**)&dev_num, sizeof (int) ); hipMalloc ( (void**)&dev_arr, num * sizeof (int) ); hipMemcpy( dev_num, &num, sizeof(int), hipMemcpyHostToDevice); hipMemcpy( dev_arr, arr, num * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( primecuda), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, dev_num, dev_arr); hipMemcpy( arr, dev_arr, num * sizeof(int), hipMemcpyDeviceToHost); for(i = 0; i<num;i++) { if(arr[i]> 0) { //printf("%d ",arr[i]); count++; if(arr[i]>biggest) biggest = arr[i]; } } end = clock(); printf("The largest prime number is: %d\n",biggest); printf("The number of prime numbers are: %d\n",count); printf("The time is: %f\n", (double)(end - begin) / CLOCKS_PER_SEC); hipFree( dev_num ); hipFree( dev_arr ); return 0; }
9614725854c767e64fa4ea72e97f8ec8721e17a2.cu
#include <cuda.h> #include <stdio.h> #include <math.h> #define NUM_BLOCKS 1024 __global__ void primecuda( int *dev_num, int *arr) { int blckid = blockIdx.x; //Handle the data at the block index int tid = threadIdx.x; int gid = 1+(*dev_num/1024); int bid = (blckid*gid) + tid; int flag, i; flag = 0; if(bid <= *dev_num) { if(bid % 2 != 0) { for (i = 3 ; i<= sqrtf(bid); i++) { if(bid % i == 0) { flag = 1; break; } } if (flag == 0) { if (bid == 0 || bid == 1 || bid == 2) { //Do nothing } else { arr[bid] = bid; } } } } } int main(int argc, char **argv) { if(strcmp("-t",argv[1])!=0) { printf("Error\n"); return 0; } int *dev_num; int num,count,biggest; num = atoi(argv[2]); int i; int *dev_arr; int arr[num]; int NUM_THREADS; clock_t begin, end; begin = clock(); for(i = 0;i<num;i++) { arr[i] = 0; } NUM_THREADS = 1+(num/NUM_BLOCKS); count = 1; biggest = 2; cudaMalloc ( (void**)&dev_num, sizeof (int) ); cudaMalloc ( (void**)&dev_arr, num * sizeof (int) ); cudaMemcpy( dev_num, &num, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( dev_arr, arr, num * sizeof(int), cudaMemcpyHostToDevice); primecuda<<<NUM_BLOCKS, NUM_THREADS>>> (dev_num, dev_arr); cudaMemcpy( arr, dev_arr, num * sizeof(int), cudaMemcpyDeviceToHost); for(i = 0; i<num;i++) { if(arr[i]> 0) { //printf("%d ",arr[i]); count++; if(arr[i]>biggest) biggest = arr[i]; } } end = clock(); printf("The largest prime number is: %d\n",biggest); printf("The number of prime numbers are: %d\n",count); printf("The time is: %f\n", (double)(end - begin) / CLOCKS_PER_SEC); cudaFree( dev_num ); cudaFree( dev_arr ); return 0; }
70d64ed1d6c6762c1f36325f6d839b43da6d960e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuUtils_hip.cuh" // wczeniej nazywao si normalizeVectorSum __global__ void reciprocal(double * v, int n){ // inverse values of elements in a vector // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ if (v[i] != 0.0){ v[i] = 1.0 / v[i]; } } } __global__ void saxdotpy(double a, double * x, double *y, double n, double *z){ // perform following operation // z = z + a*(x.*y); // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ z[i] += a*x[i]*y[i]; } } __global__ void elemByElem(int n, double *x, double *y, double *z){ // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ z[i] = x[i]*y[i]; } } __global__ void absComplex(hipfftDoubleComplex * idata, double *odata, int n){ /* Instead of completely eliminating the loop when parallelizing the computation, a grid-stride loop approach is used here */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ hipfftDoubleComplex c = idata[i]; double x2 = c.x*c.x; // pow2 double y2 = c.y*c.y; // pow2 odata[i] = sqrt(x2+y2); } } /*compute sqrt root of complex c Newtow's method for computing sqrt */ __device__ __inline__ hipDoubleComplex sqrtComplex(hipDoubleComplex c){ //Csub - subtract two double complex number: x - y //Cmul - multiplicate two double complex number: x*y hipDoubleComplex x = c; hipDoubleComplex real2 = make_cuDoubleComplex (2.0, 0.0); /* for(unsigned iter=0; iter<10; iter++){ x = cuCsub(x,cuCdivf(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); // }*/ //we can unroll the loop - czy na pewno?? /*1*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*2*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*3*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*4*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*5*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*6*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*7*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*8*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*9*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*10*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /* int iter; for(iter=0; iter<10; iter++){ x = cuCsubf(x,cuCdivf(cuCsubf(cuCmulf(x,x), c), cuCmulf(real2,x))); // } */ return x; } __global__ void copyRealFromComplexCufft(hipDoubleComplex* complex, double* real, int m, int n){ /* int x = threadIdx.x; int y = blockIdx.x; int real_index = x + y*m; int cufft_height = m/2+1; int complex_index; if (x < cufft_height){ //dane trzymane kolumnowo complex_index = y*cufft_height + x; } else{ // indeks y wychodzi poza macierz skompresowan cufft x = m - x; y = n - y; //dane trzymane kolumnowo complex_index = y*cufft_height + x; } real[real_index] = complex[complex_index].x; */ int x = threadIdx.x; int y = blockIdx.x; int x2 = (m - x) % m; // indeksowanie int y2 = (n - y) % n; // indeksowanie int cut_cols = n/2+1; int out_index = x + y*m; // pytanie czy czym to si rni od threadIdx.x + blockIdx.x*blockDim.x - rni si jak wida fft2_m a blockDim.x int in_index = (x + y*cut_cols)*(x < cut_cols) + (x2 + y2*cut_cols)*(x >= cut_cols); // ale ale kolego!! dla fft2 nie tylko zmieniamy indeks ale bierzemy // sprzenie wartoci zespolonej ! // tylko, e tutaj to nie ma znaczenia - obliczamy modu liczby zespolonej if(in_index < cut_cols*m){ //real[out_index] = complex[in_index].x; real[out_index] = complex[in_index].x; } } __global__ void copy_real_from_cufft_1d(hipDoubleComplex* complex, double* real, int n){ int cufft_width = n/2+1; int index = threadIdx.x + blockIdx.x*blockDim.x; int cufft_index = index *(index < cufft_width) + (n-index)*(index >= cufft_width); real[index] = complex[cufft_index].x; } __global__ void copy_with_comparison(double * d_U, double * d_xk, double * d_max_X, double * d_min_X, int n){ //rec(rec<minx) = minx; rec(rec>maxx) = maxx; //xk = rec; // fctr = 1/URange; URange = Umax - Umin double max = d_max_X[0]; double min = d_min_X[0]; double range = max - min; // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ double val = d_U[i]*range; d_xk[i] = min*(val<min) + max*(val>max) + val*((!(val<min)) && (!(val>max))); //d_xk[i] = val; } } __global__ void normalize_ifft_result(double* ifft_vector, double denominator, int n){ // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ ifft_vector[i] /= denominator; } } __global__ void simple_copy_from_complex(hipDoubleComplex* complex, double* real, int n){ // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ real[i] = complex[i].y; } } __global__ void generate_dct_matrix_coefficients(double *A, double *AT, double N){ int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; double lx = 1.0 + (1.0)*(x>0); double ly = 1.0 + (1.0)*(y>0); int n = N; // row major order // A[x + y*N] = cospi((2*x+1)*y/(2*N)); // column major order AT[x + y*n] = sqrt(lx/N) * cospi((2.0*y+1.0)*x/(2.0*N)); A[x + y*n] = sqrt(ly/N) * cospi((2.0*x+1.0)*y/(2.0*N)); }
70d64ed1d6c6762c1f36325f6d839b43da6d960e.cu
#include "cuUtils.cuh" // wcześniej nazywało się normalizeVectorSum __global__ void reciprocal(double * v, int n){ // inverse values of elements in a vector // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ if (v[i] != 0.0){ v[i] = 1.0 / v[i]; } } } __global__ void saxdotpy(double a, double * x, double *y, double n, double *z){ // perform following operation // z = z + a*(x.*y); // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ z[i] += a*x[i]*y[i]; } } __global__ void elemByElem(int n, double *x, double *y, double *z){ // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ z[i] = x[i]*y[i]; } } __global__ void absComplex(cufftDoubleComplex * idata, double *odata, int n){ /* Instead of completely eliminating the loop when parallelizing the computation, a grid-stride loop approach is used here */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ cufftDoubleComplex c = idata[i]; double x2 = c.x*c.x; // pow2 double y2 = c.y*c.y; // pow2 odata[i] = sqrt(x2+y2); } } /*compute sqrt root of complex c Newtow's method for computing sqrt */ __device__ __inline__ cuDoubleComplex sqrtComplex(cuDoubleComplex c){ //Csub - subtract two double complex number: x - y //Cmul - multiplicate two double complex number: x*y cuDoubleComplex x = c; cuDoubleComplex real2 = make_cuDoubleComplex (2.0, 0.0); /* for(unsigned iter=0; iter<10; iter++){ x = cuCsub(x,cuCdivf(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); // }*/ //we can unroll the loop - czy na pewno?? /*1*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*2*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*3*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*4*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*5*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*6*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*7*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*8*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*9*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /*10*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); /* int iter; for(iter=0; iter<10; iter++){ x = cuCsubf(x,cuCdivf(cuCsubf(cuCmulf(x,x), c), cuCmulf(real2,x))); // } */ return x; } __global__ void copyRealFromComplexCufft(cuDoubleComplex* complex, double* real, int m, int n){ /* int x = threadIdx.x; int y = blockIdx.x; int real_index = x + y*m; int cufft_height = m/2+1; int complex_index; if (x < cufft_height){ //dane trzymane kolumnowo complex_index = y*cufft_height + x; } else{ // indeks y wychodzi poza macierz skompresowaną cufft x = m - x; y = n - y; //dane trzymane kolumnowo complex_index = y*cufft_height + x; } real[real_index] = complex[complex_index].x; */ int x = threadIdx.x; int y = blockIdx.x; int x2 = (m - x) % m; // indeksowanie int y2 = (n - y) % n; // indeksowanie int cut_cols = n/2+1; int out_index = x + y*m; // pytanie czy czymś to się różni od threadIdx.x + blockIdx.x*blockDim.x - różni się jak widać fft2_m a blockDim.x int in_index = (x + y*cut_cols)*(x < cut_cols) + (x2 + y2*cut_cols)*(x >= cut_cols); // ale ale kolego!! dla fft2 nie tylko zmieniamy indeks ale bierzemy // sprzężenie wartości zespolonej ! // tylko, że tutaj to nie ma znaczenia - obliczamy moduł liczby zespolonej if(in_index < cut_cols*m){ //real[out_index] = complex[in_index].x; real[out_index] = complex[in_index].x; } } __global__ void copy_real_from_cufft_1d(cuDoubleComplex* complex, double* real, int n){ int cufft_width = n/2+1; int index = threadIdx.x + blockIdx.x*blockDim.x; int cufft_index = index *(index < cufft_width) + (n-index)*(index >= cufft_width); real[index] = complex[cufft_index].x; } __global__ void copy_with_comparison(double * d_U, double * d_xk, double * d_max_X, double * d_min_X, int n){ //rec(rec<minx) = minx; rec(rec>maxx) = maxx; //xk = rec; // fctr = 1/URange; URange = Umax - Umin double max = d_max_X[0]; double min = d_min_X[0]; double range = max - min; // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ double val = d_U[i]*range; d_xk[i] = min*(val<min) + max*(val>max) + val*((!(val<min)) && (!(val>max))); //d_xk[i] = val; } } __global__ void normalize_ifft_result(double* ifft_vector, double denominator, int n){ // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ ifft_vector[i] /= denominator; } } __global__ void simple_copy_from_complex(cuDoubleComplex* complex, double* real, int n){ // grid stride loop for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ real[i] = complex[i].y; } } __global__ void generate_dct_matrix_coefficients(double *A, double *AT, double N){ int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; double lx = 1.0 + (1.0)*(x>0); double ly = 1.0 + (1.0)*(y>0); int n = N; // row major order // A[x + y*N] = cospi((2*x+1)*y/(2*N)); // column major order AT[x + y*n] = sqrt(lx/N) * cospi((2.0*y+1.0)*x/(2.0*N)); A[x + y*n] = sqrt(ly/N) * cospi((2.0*x+1.0)*y/(2.0*N)); }
7af2aa2caf210c9c728da8d1477729d13d27cfac.hip
// !!! This is a file automatically generated by hipify!!! /** @internal ** @file: quickshift.cpp ** @author: Brian Fulkerson ** @author: Andrea Vedaldi ** @brief: Quickshift command line **/ #include <math.h> #include <string.h> #include <assert.h> #include <stdio.h> #include "hip/hip_runtime.h" #include "quickshift_common.h" #include "my_functions.h" texture<float, 3, hipReadModeElementType> texI; texture<float, 2, hipReadModeElementType> texE; #define USE_TEX_E 1 #define USE_TEX_I 1 #if USE_TEX_I #define TEXI(x,y,c) tex3D(texI, x + 0.5f, y + 0.5f, c + 0.5f) #else #define TEXI(x,y,c) I [ (x) + N1*(y) + N2*N1*k ] #endif #if USE_TEX_E #define TEXE(x,y) tex2D(texE, x + 0.5f, y + 0.5f) #else #define TEXE(x,y) E [ (x) + N1* (y)] #endif #define distance(I,N1,N2,K,v,j1,j2,dist) \ { \ dist = 0 ; \ int d1 = j1 - i1 ; \ int d2 = j2 - i2 ; \ int k ; \ dist += d1*d1 + d2*d2 ; \ for (k = 0 ; k < K ; ++k) { \ float d = v[k] - TEXI(j1,j2,k); \ dist += d*d ; \ } \ } extern "C" int iDivUp(int num, int denom) { return (num % denom != 0) ? (num / denom + 1) : (num / denom); } extern "C" __global__ void find_neighbors_gpu(const float * I, int N1, int N2, int K, float * E, float tau2, int tR, float * map, float * gaps) { int i1 = blockIdx.y * blockDim.y + threadIdx.y; int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i1 >= N1 || i2 >= N2) return; // out of bounds int j1,j2; /* Quickshift assigns each i to the closest j which has an increase in the * density (E). If there is no j s.t. Ej > Ei, then gaps_i == inf (a root * node in one of the trees of merges). */ float E0 = TEXE(i1, i2) ; float d_best = INF ; float j1_best = i1 ; float j2_best = i2 ; int j1min = VL_MAX(i1 - tR, 0 ) ; int j1max = VL_MIN(i1 + tR, N1-1) ; int j2min = VL_MAX(i2 - tR, 0 ) ; int j2max = VL_MIN(i2 + tR, N2-1) ; /* Cache the center value in registers */ float v[3]; for (int k = 0 ; k < K ; ++k) { v[k] = TEXI(i1,i2,k); } for (j2 = j2min ; j2 <= j2max ; ++ j2) { for (j1 = j1min ; j1 <= j1max ; ++ j1) { if (TEXE(j1,j2) > E0) { float Dij; distance(I,N1,N2,K, v, j1,j2,Dij) ; if (Dij <= tau2 && Dij < d_best) { d_best = Dij ; j1_best = j1 ; j2_best = j2 ; } } } } /* map is the index of the best pair */ /* gaps_i is the minimal distance, inf implies no Ej > Ei within * distance tau from the point */ map [i1 + N1 * i2] = j1_best + N1 * j2_best ; /* + 1 ; */ if (map[i1 + N1 * i2] != i1 + N1 * i2) gaps[i1 + N1 * i2] = sqrt(d_best) ; else gaps[i1 + N1 * i2] = d_best; /* inf */ } extern "C" __global__ void compute_E_gpu(const float * I, int N1, int N2, int K, int R, float sigma, float * E, float * n, float * M) { int i1 = blockIdx.y * blockDim.y + threadIdx.y; int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i1 >= N1 || i2 >= N2) return; // out of bounds int j1,j2; /* ----------------------------------------------------------------- * E = - [oN'*F]', M * -------------------------------------------------------------- */ /* D_ij = d(x_i,x_j) E_ij = exp(- .5 * D_ij / sigma^2) ; F_ij = - E_ij E_i = sum_j E_ij M_di = sum_j X_j F_ij E is the parzen window estimate of the density 0 = dissimilar to everything, windowsize = identical */ int j1min = VL_MAX(i1 - R, 0 ) ; int j1max = VL_MIN(i1 + R, N1-1) ; int j2min = VL_MAX(i2 - R, 0 ) ; int j2max = VL_MIN(i2 + R, N2-1) ; float Ei = 0; /* Cache the center value in registers */ float v[3]; for (int k = 0 ; k < K ; ++k) { v[k] = TEXI(i1,i2,k); } /* For each pixel in the window compute the distance between it and the * source pixel */ for (j2 = j2min ; j2 <= j2max ; ++ j2) { for (j1 = j1min ; j1 <= j1max ; ++ j1) { float Dij; distance(I, N1, N2, K,v ,j1, j2, Dij) ; /* Make distance a similarity */ float Fij = - exp(- Dij / (2*sigma*sigma)) ; /* E is E_i above */ Ei += -Fij; } /* j1 */ } /* j2 */ /* Normalize */ E [i1 + N1 * i2] = Ei / ((j1max-j1min)*(j2max-j2min)); } extern "C" void quickshift_gpu(image_t im, float sigma, float tau, float * map, float * gaps, float * E) { #if USE_TEX_I hipArray * cu_array_I; // Allocate array hipChannelFormatDesc descriptionI = hipCreateChannelDesc<float>(); hipExtent const ext = {im.N1, im.N2, im.K}; hipMalloc3DArray(&cu_array_I, &descriptionI, ext); hipMemcpy3DParms copyParams = {0}; copyParams.extent = make_hipExtent(im.N1, im.N2, im.K); copyParams.kind = hipMemcpyHostToDevice; copyParams.dstArray = cu_array_I; // The pitched pointer is really tricky to get right. We give the // pitch of a row, then the number of elements in a row, then the // height, and we omit the 3rd dimension. copyParams.srcPtr = make_hipPitchedPtr( (void*)&im.I[0], ext.width*sizeof(float), ext.width, ext.height); hipMemcpy3D(&copyParams); hipBindTextureToArray(texI, cu_array_I, descriptionI); texI.normalized = false; texI.filterMode = hipFilterModePoint; #endif float *map_d, *E_d, *gaps_d, *I; int verb = 0 ; float tau2; int K; int N1,N2, R, tR; N1 = im.N1; N2 = im.N2; K = im.K; //d = 2 + K ; /* Total dimensions include spatial component (x,y) */ tau2 = tau*tau; unsigned int size = im.N1*im.N2 * sizeof(float); hipMalloc( (void**) &I, size*im.K); hipMalloc( (void**) &map_d, size); hipMalloc( (void**) &gaps_d, size); hipMalloc( (void**) &E_d, size); hipMemcpy( I, im.I, size*im.K, hipMemcpyHostToDevice); hipMemset( E_d, 0, size); R = (int) ceil (3 * sigma) ; tR = (int) ceil (tau) ; if (verb) { printf("quickshiftGPU: [N1,N2,K]: [%d,%d,%d]\n", N1,N2,K) ; printf("quickshiftGPU: type: quick\n"); printf("quickshiftGPU: sigma: %g\n", sigma) ; /* R is ceil(3 * sigma) and determines the window size to accumulate * similarity */ printf("quickshiftGPU: R: %d\n", R) ; printf("quickshiftGPU: tau: %g\n", tau) ; printf("quickshiftGPU: tR: %d\n", tR) ; } dim3 dimBlock(32,4,1); dim3 dimGrid(iDivUp(N2, dimBlock.x), iDivUp(N1, dimBlock.y), 1); hipDeviceSynchronize(); { Timer stopwatch("Compute E"); compute_E_gpu << <dimGrid, dimBlock >> > (I, N1, N2, K, R, sigma, E_d, 0, 0); hipDeviceSynchronize(); } hipDeviceSynchronize(); hipMemcpy(E, E_d, size, hipMemcpyDeviceToHost); /* Texture map E */ #if USE_TEX_E /*printf("quickshiftGPU: using texture for E\n");*/ hipChannelFormatDesc descriptionE = hipCreateChannelDesc<float>(); hipArray * cu_array_E; hipMallocArray(&cu_array_E, &descriptionE, im.N1, im.N2); hipMemcpyToArray(cu_array_E, 0, 0, E, sizeof(float)*im.N1*im.N2, hipMemcpyHostToDevice); texE.normalized = false; texE.filterMode = hipFilterModePoint; hipBindTextureToArray(texE, cu_array_E, descriptionE); hipDeviceSynchronize(); #endif /* ----------------------------------------------------------------- * Find best neighbors * -------------------------------------------------------------- */ hipDeviceSynchronize(); { Timer stopwatch("find neighbours"); find_neighbors_gpu << <dimGrid, dimBlock >> > (I, N1, N2, K, E_d, tau2, tR, map_d, gaps_d); hipDeviceSynchronize(); } hipDeviceSynchronize(); hipMemcpy(map, map_d, size, hipMemcpyDeviceToHost); hipMemcpy(gaps, gaps_d, size, hipMemcpyDeviceToHost); hipFree(I); hipFree(map_d); hipFree(gaps_d); hipFree(E_d); hipUnbindTexture(texI); hipFreeArray(cu_array_I); hipUnbindTexture(texE); hipFreeArray(cu_array_E); }
7af2aa2caf210c9c728da8d1477729d13d27cfac.cu
/** @internal ** @file: quickshift.cpp ** @author: Brian Fulkerson ** @author: Andrea Vedaldi ** @brief: Quickshift command line **/ #include <math.h> #include <string.h> #include <assert.h> #include <stdio.h> #include "cuda_runtime.h" #include "quickshift_common.h" #include "my_functions.h" texture<float, 3, cudaReadModeElementType> texI; texture<float, 2, cudaReadModeElementType> texE; #define USE_TEX_E 1 #define USE_TEX_I 1 #if USE_TEX_I #define TEXI(x,y,c) tex3D(texI, x + 0.5f, y + 0.5f, c + 0.5f) #else #define TEXI(x,y,c) I [ (x) + N1*(y) + N2*N1*k ] #endif #if USE_TEX_E #define TEXE(x,y) tex2D(texE, x + 0.5f, y + 0.5f) #else #define TEXE(x,y) E [ (x) + N1* (y)] #endif #define distance(I,N1,N2,K,v,j1,j2,dist) \ { \ dist = 0 ; \ int d1 = j1 - i1 ; \ int d2 = j2 - i2 ; \ int k ; \ dist += d1*d1 + d2*d2 ; \ for (k = 0 ; k < K ; ++k) { \ float d = v[k] - TEXI(j1,j2,k); \ dist += d*d ; \ } \ } extern "C" int iDivUp(int num, int denom) { return (num % denom != 0) ? (num / denom + 1) : (num / denom); } extern "C" __global__ void find_neighbors_gpu(const float * I, int N1, int N2, int K, float * E, float tau2, int tR, float * map, float * gaps) { int i1 = blockIdx.y * blockDim.y + threadIdx.y; int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i1 >= N1 || i2 >= N2) return; // out of bounds int j1,j2; /* Quickshift assigns each i to the closest j which has an increase in the * density (E). If there is no j s.t. Ej > Ei, then gaps_i == inf (a root * node in one of the trees of merges). */ float E0 = TEXE(i1, i2) ; float d_best = INF ; float j1_best = i1 ; float j2_best = i2 ; int j1min = VL_MAX(i1 - tR, 0 ) ; int j1max = VL_MIN(i1 + tR, N1-1) ; int j2min = VL_MAX(i2 - tR, 0 ) ; int j2max = VL_MIN(i2 + tR, N2-1) ; /* Cache the center value in registers */ float v[3]; for (int k = 0 ; k < K ; ++k) { v[k] = TEXI(i1,i2,k); } for (j2 = j2min ; j2 <= j2max ; ++ j2) { for (j1 = j1min ; j1 <= j1max ; ++ j1) { if (TEXE(j1,j2) > E0) { float Dij; distance(I,N1,N2,K, v, j1,j2,Dij) ; if (Dij <= tau2 && Dij < d_best) { d_best = Dij ; j1_best = j1 ; j2_best = j2 ; } } } } /* map is the index of the best pair */ /* gaps_i is the minimal distance, inf implies no Ej > Ei within * distance tau from the point */ map [i1 + N1 * i2] = j1_best + N1 * j2_best ; /* + 1 ; */ if (map[i1 + N1 * i2] != i1 + N1 * i2) gaps[i1 + N1 * i2] = sqrt(d_best) ; else gaps[i1 + N1 * i2] = d_best; /* inf */ } extern "C" __global__ void compute_E_gpu(const float * I, int N1, int N2, int K, int R, float sigma, float * E, float * n, float * M) { int i1 = blockIdx.y * blockDim.y + threadIdx.y; int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i1 >= N1 || i2 >= N2) return; // out of bounds int j1,j2; /* ----------------------------------------------------------------- * E = - [oN'*F]', M * -------------------------------------------------------------- */ /* D_ij = d(x_i,x_j) E_ij = exp(- .5 * D_ij / sigma^2) ; F_ij = - E_ij E_i = sum_j E_ij M_di = sum_j X_j F_ij E is the parzen window estimate of the density 0 = dissimilar to everything, windowsize = identical */ int j1min = VL_MAX(i1 - R, 0 ) ; int j1max = VL_MIN(i1 + R, N1-1) ; int j2min = VL_MAX(i2 - R, 0 ) ; int j2max = VL_MIN(i2 + R, N2-1) ; float Ei = 0; /* Cache the center value in registers */ float v[3]; for (int k = 0 ; k < K ; ++k) { v[k] = TEXI(i1,i2,k); } /* For each pixel in the window compute the distance between it and the * source pixel */ for (j2 = j2min ; j2 <= j2max ; ++ j2) { for (j1 = j1min ; j1 <= j1max ; ++ j1) { float Dij; distance(I, N1, N2, K,v ,j1, j2, Dij) ; /* Make distance a similarity */ float Fij = - exp(- Dij / (2*sigma*sigma)) ; /* E is E_i above */ Ei += -Fij; } /* j1 */ } /* j2 */ /* Normalize */ E [i1 + N1 * i2] = Ei / ((j1max-j1min)*(j2max-j2min)); } extern "C" void quickshift_gpu(image_t im, float sigma, float tau, float * map, float * gaps, float * E) { #if USE_TEX_I cudaArray * cu_array_I; // Allocate array cudaChannelFormatDesc descriptionI = cudaCreateChannelDesc<float>(); cudaExtent const ext = {im.N1, im.N2, im.K}; cudaMalloc3DArray(&cu_array_I, &descriptionI, ext); cudaMemcpy3DParms copyParams = {0}; copyParams.extent = make_cudaExtent(im.N1, im.N2, im.K); copyParams.kind = cudaMemcpyHostToDevice; copyParams.dstArray = cu_array_I; // The pitched pointer is really tricky to get right. We give the // pitch of a row, then the number of elements in a row, then the // height, and we omit the 3rd dimension. copyParams.srcPtr = make_cudaPitchedPtr( (void*)&im.I[0], ext.width*sizeof(float), ext.width, ext.height); cudaMemcpy3D(&copyParams); cudaBindTextureToArray(texI, cu_array_I, descriptionI); texI.normalized = false; texI.filterMode = cudaFilterModePoint; #endif float *map_d, *E_d, *gaps_d, *I; int verb = 0 ; float tau2; int K; int N1,N2, R, tR; N1 = im.N1; N2 = im.N2; K = im.K; //d = 2 + K ; /* Total dimensions include spatial component (x,y) */ tau2 = tau*tau; unsigned int size = im.N1*im.N2 * sizeof(float); cudaMalloc( (void**) &I, size*im.K); cudaMalloc( (void**) &map_d, size); cudaMalloc( (void**) &gaps_d, size); cudaMalloc( (void**) &E_d, size); cudaMemcpy( I, im.I, size*im.K, cudaMemcpyHostToDevice); cudaMemset( E_d, 0, size); R = (int) ceil (3 * sigma) ; tR = (int) ceil (tau) ; if (verb) { printf("quickshiftGPU: [N1,N2,K]: [%d,%d,%d]\n", N1,N2,K) ; printf("quickshiftGPU: type: quick\n"); printf("quickshiftGPU: sigma: %g\n", sigma) ; /* R is ceil(3 * sigma) and determines the window size to accumulate * similarity */ printf("quickshiftGPU: R: %d\n", R) ; printf("quickshiftGPU: tau: %g\n", tau) ; printf("quickshiftGPU: tR: %d\n", tR) ; } dim3 dimBlock(32,4,1); dim3 dimGrid(iDivUp(N2, dimBlock.x), iDivUp(N1, dimBlock.y), 1); cudaDeviceSynchronize(); { Timer stopwatch("Compute E"); compute_E_gpu << <dimGrid, dimBlock >> > (I, N1, N2, K, R, sigma, E_d, 0, 0); cudaDeviceSynchronize(); } cudaThreadSynchronize(); cudaMemcpy(E, E_d, size, cudaMemcpyDeviceToHost); /* Texture map E */ #if USE_TEX_E /*printf("quickshiftGPU: using texture for E\n");*/ cudaChannelFormatDesc descriptionE = cudaCreateChannelDesc<float>(); cudaArray * cu_array_E; cudaMallocArray(&cu_array_E, &descriptionE, im.N1, im.N2); cudaMemcpyToArray(cu_array_E, 0, 0, E, sizeof(float)*im.N1*im.N2, cudaMemcpyHostToDevice); texE.normalized = false; texE.filterMode = cudaFilterModePoint; cudaBindTextureToArray(texE, cu_array_E, descriptionE); cudaThreadSynchronize(); #endif /* ----------------------------------------------------------------- * Find best neighbors * -------------------------------------------------------------- */ cudaDeviceSynchronize(); { Timer stopwatch("find neighbours"); find_neighbors_gpu << <dimGrid, dimBlock >> > (I, N1, N2, K, E_d, tau2, tR, map_d, gaps_d); cudaDeviceSynchronize(); } cudaThreadSynchronize(); cudaMemcpy(map, map_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(gaps, gaps_d, size, cudaMemcpyDeviceToHost); cudaFree(I); cudaFree(map_d); cudaFree(gaps_d); cudaFree(E_d); cudaUnbindTexture(texI); cudaFreeArray(cu_array_I); cudaUnbindTexture(texE); cudaFreeArray(cu_array_E); }
4f14a84bcf0e1febe26b6741a49ea054f8df7040.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // boundary.cu // // CUDA kernels for applying periodic boundary conditions. // __global__ void periodic_bc_x_kernel(real *f) { int yi = threadIdx.x + blockDim.x * blockIdx.x + NGHOST; int zi = threadIdx.y + blockDim.y * blockIdx.y + NGHOST; f[vfidx(0, yi, zi)] = f[vfidx(NX + NGHOST - 4, yi, zi)]; f[vfidx(1, yi, zi)] = f[vfidx(NX + NGHOST - 3, yi, zi)]; f[vfidx(2, yi, zi)] = f[vfidx(NX + NGHOST - 2, yi, zi)]; f[vfidx(NX + NGHOST, yi, zi)] = f[vfidx(NGHOST + 1, yi, zi)]; f[vfidx(NX + NGHOST + 1, yi, zi)] = f[vfidx(NGHOST + 2, yi, zi)]; f[vfidx(NX + NGHOST + 2, yi, zi)] = f[vfidx(NGHOST + 3, yi, zi)]; } __global__ void periodic_bc_y_kernel(real *f) { int xi = threadIdx.x + blockDim.x * blockIdx.x + NGHOST; int zi = threadIdx.y + blockDim.y * blockIdx.y + NGHOST; f[vfidx(xi, 0, zi)] = f[vfidx(xi, NY + NGHOST - 4, zi)]; f[vfidx(xi, 1, zi)] = f[vfidx(xi, NY + NGHOST - 3, zi)]; f[vfidx(xi, 2, zi)] = f[vfidx(xi, NY + NGHOST - 2, zi)]; f[vfidx(xi, NY + NGHOST, zi)] = f[vfidx(xi, NGHOST + 1, zi)]; f[vfidx(xi, NY + NGHOST + 1, zi)] = f[vfidx(xi, NGHOST + 2, zi)]; f[vfidx(xi, NY + NGHOST + 2, zi)] = f[vfidx(xi, NGHOST + 3, zi)]; } __global__ void periodic_bc_z_kernel(real *f) { int xi = threadIdx.x + blockDim.x * blockIdx.x + NGHOST; int yi = threadIdx.y + blockDim.y * blockIdx.y + NGHOST; f[vfidx(xi, yi, 0)] = f[vfidx(xi, yi, NZ + NGHOST - 4)]; f[vfidx(xi, yi, 1)] = f[vfidx(xi, yi, NZ + NGHOST - 3)]; f[vfidx(xi, yi, 2)] = f[vfidx(xi, yi, NZ + NGHOST - 2)]; f[vfidx(xi, yi, NZ + NGHOST)] = f[vfidx(xi, yi, NGHOST + 1)]; f[vfidx(xi, yi, NZ + NGHOST + 1)] = f[vfidx(xi, yi, NGHOST + 2)]; f[vfidx(xi, yi, NZ + NGHOST + 2)] = f[vfidx(xi, yi, NGHOST + 3)]; } void apply_periodic_bc(vf3dgpu &vf) { dim3 nblocks; dim3 nthreads; for (int vi = 0; vi < vf.varcount(); vi++) { vf3dgpu vfi = vf.subfield(vi, 1); nblocks = dim3(1, NZ); nthreads = dim3(NY); hipLaunchKernelGGL(( periodic_bc_x_kernel), dim3(nblocks), dim3(nthreads), 0, 0, vfi.mem()); nblocks = dim3(1, NZ); nthreads = dim3(NX); hipLaunchKernelGGL(( periodic_bc_y_kernel), dim3(nblocks), dim3(nthreads), 0, 0, vfi.mem()); nblocks = dim3(1, NY); nthreads = dim3(NX); hipLaunchKernelGGL(( periodic_bc_z_kernel), dim3(nblocks), dim3(nthreads), 0, 0, vfi.mem()); } }
4f14a84bcf0e1febe26b6741a49ea054f8df7040.cu
// boundary.cu // // CUDA kernels for applying periodic boundary conditions. // __global__ void periodic_bc_x_kernel(real *f) { int yi = threadIdx.x + blockDim.x * blockIdx.x + NGHOST; int zi = threadIdx.y + blockDim.y * blockIdx.y + NGHOST; f[vfidx(0, yi, zi)] = f[vfidx(NX + NGHOST - 4, yi, zi)]; f[vfidx(1, yi, zi)] = f[vfidx(NX + NGHOST - 3, yi, zi)]; f[vfidx(2, yi, zi)] = f[vfidx(NX + NGHOST - 2, yi, zi)]; f[vfidx(NX + NGHOST, yi, zi)] = f[vfidx(NGHOST + 1, yi, zi)]; f[vfidx(NX + NGHOST + 1, yi, zi)] = f[vfidx(NGHOST + 2, yi, zi)]; f[vfidx(NX + NGHOST + 2, yi, zi)] = f[vfidx(NGHOST + 3, yi, zi)]; } __global__ void periodic_bc_y_kernel(real *f) { int xi = threadIdx.x + blockDim.x * blockIdx.x + NGHOST; int zi = threadIdx.y + blockDim.y * blockIdx.y + NGHOST; f[vfidx(xi, 0, zi)] = f[vfidx(xi, NY + NGHOST - 4, zi)]; f[vfidx(xi, 1, zi)] = f[vfidx(xi, NY + NGHOST - 3, zi)]; f[vfidx(xi, 2, zi)] = f[vfidx(xi, NY + NGHOST - 2, zi)]; f[vfidx(xi, NY + NGHOST, zi)] = f[vfidx(xi, NGHOST + 1, zi)]; f[vfidx(xi, NY + NGHOST + 1, zi)] = f[vfidx(xi, NGHOST + 2, zi)]; f[vfidx(xi, NY + NGHOST + 2, zi)] = f[vfidx(xi, NGHOST + 3, zi)]; } __global__ void periodic_bc_z_kernel(real *f) { int xi = threadIdx.x + blockDim.x * blockIdx.x + NGHOST; int yi = threadIdx.y + blockDim.y * blockIdx.y + NGHOST; f[vfidx(xi, yi, 0)] = f[vfidx(xi, yi, NZ + NGHOST - 4)]; f[vfidx(xi, yi, 1)] = f[vfidx(xi, yi, NZ + NGHOST - 3)]; f[vfidx(xi, yi, 2)] = f[vfidx(xi, yi, NZ + NGHOST - 2)]; f[vfidx(xi, yi, NZ + NGHOST)] = f[vfidx(xi, yi, NGHOST + 1)]; f[vfidx(xi, yi, NZ + NGHOST + 1)] = f[vfidx(xi, yi, NGHOST + 2)]; f[vfidx(xi, yi, NZ + NGHOST + 2)] = f[vfidx(xi, yi, NGHOST + 3)]; } void apply_periodic_bc(vf3dgpu &vf) { dim3 nblocks; dim3 nthreads; for (int vi = 0; vi < vf.varcount(); vi++) { vf3dgpu vfi = vf.subfield(vi, 1); nblocks = dim3(1, NZ); nthreads = dim3(NY); periodic_bc_x_kernel<<<nblocks, nthreads>>>(vfi.mem()); nblocks = dim3(1, NZ); nthreads = dim3(NX); periodic_bc_y_kernel<<<nblocks, nthreads>>>(vfi.mem()); nblocks = dim3(1, NY); nthreads = dim3(NX); periodic_bc_z_kernel<<<nblocks, nthreads>>>(vfi.mem()); } }
29ac929eefb419a6e83e82ff355ba608f4bd1ce6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "kernels_hip.cuh" __device__ __forceinline__ int get_polarity(int id) { // If id is an even number, 1 will be returned. // If id is an odd number, -1 will be returned. return 1 - (2 * (id & 1)); } __device__ __forceinline__ bool automata_action(unsigned int automata_state, unsigned int max_state) { // Returns true if the automata is in an include state, while false if the automata is in an exclude state return (automata_state > (static_cast<unsigned int>(max_state / 2))); } __global__ void validate_clauses(unsigned int* model, bool* clauses_output, unsigned int* x_data, unsigned int sample_id, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int class_id, unsigned int max_state, bool prediction) { // Declare some shared variables // shared[0] = The output of the clause // shared[1] = Boolean flag if all is in exclude mode __shared__ bool shared[2]; // Calculate the clause id to work on const int thread_id = threadIdx.x; // Initialize some "private variables" unsigned int sample_value; unsigned int automata_value; bool action; int automata_polarity; for (unsigned int clause_id = blockIdx.x; clause_id < clauses_amount; clause_id += gridDim.x) { // Set the clause output to be true if(thread_id == 0) { shared[0] = true; shared[1] = true; } // Wait until all threads are ready __syncthreads(); // Loop over each of the automata and "stride" through for(unsigned int automata_id = thread_id; automata_id < automatas_amount; automata_id += blockDim.x) { // Check if any of the other threads have evaluated the clause to false. This way we could skip checking. if(shared[0] == false) { break; } // Get the automatas value automata_value = model[(class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_id]; // Get the action of the automata action = automata_action(automata_value, max_state); // Check if the automata is in an include state, if so, investigate further... if(action == true) { // Calculate the polarity of the automata automata_polarity = get_polarity(automata_id); // Get the sample's value sample_value = x_data[(sample_id * features_amount) + (automata_id / 2)]; // Flip the flag that says that all automatas are in exclude mode shared[1] = false; // Since the automata is in an include state, lets check if the DOES NOT match the desired value if(((automata_polarity == 1) && (sample_value != 1)) || ((automata_polarity == -1) && (sample_value != 0))){ // A condition has been met that would falsify the entire clause. Therefore, evaluate the entire clause to false shared[0] = false; break; } } } // Wait until all threads to evaluate until finished __syncthreads(); // Check if we are thread id 0 if(thread_id == 0) { // Check if the clause was, when finished evaluating, evaluated to false if(shared[0] == false || (prediction == true && shared[1] == true)) { clauses_output[clause_id] = false; } // Assuming it was not false, then it is true else { clauses_output[clause_id] = true; } } } } __global__ void reduce_votes(int* scores, unsigned int scores_index, bool* clauses_output, unsigned int clauses_amount, unsigned int threshold) { // Tempromary shared results extern __shared__ int results[]; // Declare some private variables int thread_result = 0; for(unsigned int clause_id = threadIdx.x; clause_id < clauses_amount; clause_id += blockDim.x) { // Add the score to this threads tempromary score thread_result += (get_polarity(clause_id) * clauses_output[clause_id]); } // Move the threads result into shared memory results[threadIdx.x] = thread_result; // Wait until all the threads have completed the summation of all clause outputs __syncthreads(); // Start to reduce the threads and score for(unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) { // Check if this thread is doing some reduction if(threadIdx.x < offset) { results[threadIdx.x] += results[threadIdx.x + offset]; } __syncthreads(); } // Thread 0 will store the result in the scores list if(threadIdx.x == 0) { if(threshold != 0) { if(results[threadIdx.x] > threshold) { results[threadIdx.x] = static_cast<int>(threshold); } else if(results[threadIdx.x] < -threshold) { results[threadIdx.x] = -static_cast<int>(threshold); } } scores[scores_index] = results[threadIdx.x]; } } __global__ void calculate_feedback(unsigned int* clauses_feedback, int* scores, unsigned int threshold, float s, unsigned int class_id, bool correct_class, unsigned int clauses_amount, hiprandState_t* random_states) { // Calculate the position of the thread unsigned int global_thread_id = (blockIdx.x * blockDim.x) + threadIdx.x; // Declare some private variables hiprandState_t rnd_state = random_states[global_thread_id]; float clause_polarity; int class_score = scores[0]; // Loop all clauses for (unsigned int clause_id = global_thread_id; clause_id < clauses_amount; clause_id += gridDim.x) { // Determine the polarity of the clause clause_polarity = static_cast<float>(get_polarity(clause_id)); // Check if we are on the correct class if (correct_class == true) { // Check if we are to skip feedback for this clause if(hiprand_uniform(&rnd_state) > (((1.0f * threshold) - class_score) / (2.0f * threshold))) { // No feedback will be given to this clause clauses_feedback[clause_id] = 0; } else { // A small performant operation that calculates that will return the following // Clauses for = Type 1 feedback // Clauses against = Type 2 feedback clauses_feedback[clause_id] = 1 + static_cast<int>(signbit(clause_polarity)); } } else { // Check if we are to skip feedback for this clause if(hiprand_uniform(&rnd_state) > (((1.0f * threshold) + class_score) / (2.0f * threshold))) { // No feedback will be given to this clause clauses_feedback[clause_id] = 0; } else { // A small performant operation that calculates that will return the following // Clauses for = Type 2 feedback // Clauses against = Type 1 feedback clauses_feedback[clause_id] = 2 - static_cast<int>(signbit(clause_polarity)); } } } // Copy the random state back to global memory random_states[global_thread_id] = rnd_state; } __global__ void give_feedback_to_clauses(unsigned int* model, unsigned int* clauses_feedback, unsigned int* x_data, bool* clauses_output, unsigned int class_id, unsigned int sample_id, const bool correct_class, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int max_state, unsigned int threshold, float s, hiprandState_t* random_states) { // Calculate and declare some "private variables" // Get the clause id, based on the block id in the grid unsigned int global_thread_id = (blockIdx.x * blockDim.x) + threadIdx.x; // Used to calculate the absolute index of an automata unsigned int automata_model_index; unsigned int automata_temp; // Used to tempromary store whether an automata is in include or exclude state bool action; // Used to tempromary store the polarity of an automata int automata_polarity; // Used to tempromary store the feature id of which feature an automata is associated with unsigned int sample_value; // Get the random state from the random values matrix (used to generate "random" numbers) hiprandState_t rnd_state = random_states[global_thread_id]; // In case there are more clauses than blocks, we need to loop them for(unsigned int clause_id = blockIdx.x; clause_id < clauses_amount; clause_id += gridDim.x) { // Check if we are to do type 1 feedback if(clauses_feedback[clause_id] == 1){ // If the clause output was evaluated to false if(clauses_output[clause_id] == 0) { // Loop and potentially punish all automatas for(unsigned int automata_index = threadIdx.x; automata_index < automatas_amount; automata_index += blockDim.x) { // Calculate the position of the current automata automata_model_index = (class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_index; // Get the value for the automata automata_temp = model[automata_model_index]; if((automata_temp > 1) && (hiprand_uniform(&rnd_state) <= (1.0 / s))) { model[automata_model_index] = automata_temp - 1; } } } else { // Loop over each of the automatas for(unsigned int automata_index = threadIdx.x; automata_index < automatas_amount; automata_index += blockDim.x){ // Calculate the position of the current automata automata_model_index = (class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_index; // Get the value of the sample for the current automata sample_value = x_data[(sample_id * features_amount) + static_cast<unsigned int>(automata_index / 2)]; // Calculate the polarity of the automata automata_polarity = get_polarity(automata_index); // Get the value for the automata automata_temp = model[automata_model_index]; // Check if the sample was False if(sample_value == 0) { // Check if the automata is an against automata if(automata_polarity == -1){ // Increment state if((hiprand_uniform(&rnd_state) <= ((s - 1.0) / s)) && (automata_temp < max_state)) { model[automata_model_index] = automata_temp + 1; } } // Assumes that the automata is a for automata (since it is not an against automata) else { // Decrement state if((hiprand_uniform(&rnd_state) <= (1.0 / s)) && automata_temp > 1) { model[automata_model_index] = automata_temp - 1; } } } // Assumes that the sample is 1 (since it was not 0) else { // Check if the automata is a for automata if(automata_polarity == 1) { // Decrement the state if((hiprand_uniform(&rnd_state) <= ((s - 1.0) / s)) && (automata_temp < max_state)) { model[automata_model_index] = automata_temp + 1; } } // Assumes that the automata is an against automata (since it is not an for automata) else { // Decrement state if((hiprand_uniform(&rnd_state) <= (1.0 / s)) && automata_temp > 1) { model[automata_model_index] = automata_temp - 1; } } } } } } // Check if we are to do type 2 feedback else if(clauses_feedback[clause_id] == 2) { // Check if the clause was evaluated to true in the evaluation phase. if(clauses_output[clause_id] == 1) { // Loop over all the automatas for(unsigned int automata_id = threadIdx.x; automata_id < automatas_amount; automata_id += blockDim.x) { // Calculate the automata model index automata_model_index = (class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_id; // Get the automata value automata_temp = model[automata_model_index]; // Get the sample's value sample_value = x_data[(sample_id * features_amount) + (automata_id / 2)]; // Calculate the polarity of the automata automata_polarity = get_polarity(automata_id); // Get the include/exclude action for the automata action = automata_action(automata_temp, max_state); // Check if the automata is an for automata and that the feature is 0 if((automata_polarity == 1) && (sample_value == 0) && (action == false) && (automata_temp < max_state)){ model[automata_model_index] = automata_temp + 1; } else if((automata_polarity == -1) && (sample_value == 1) && (action == false) && (automata_temp < max_state)){ model[automata_model_index] = automata_temp + 1; } } } } } // Some cleanup and persistence before exiting // Copy back the random state random_states[global_thread_id] = rnd_state; } __global__ void improved_feedback(unsigned int* model, unsigned int* clauses_feedback, unsigned int* x_data, bool* clauses_output, unsigned int class_id, unsigned int sample_id, const bool correct_class, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int max_state, unsigned int threshold, float s, hiprandState_t* random_states) { // Data indexing unsigned int global_thread_id = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int clause_id; unsigned int automata_id; unsigned int feature_id; // Model indexing unsigned int model_index; // Used to tempromary store the polarity of an automata int automata_polarity; unsigned int clause_feedback_type; bool clause_output; unsigned int automata_value; unsigned int sample_value; // Get the random state from the random values matrix (used to generate "random" numbers) hiprandState_t rnd_state = random_states[global_thread_id]; // In case there are more clauses than blocks, we need to loop them for(unsigned int model_relative_index = global_thread_id; model_relative_index < (clauses_amount * automatas_amount); model_relative_index += gridDim.x) { // Calculate the position in the model model_index = (class_id * clauses_amount * automatas_amount) + model_relative_index; clause_id = model_relative_index / automatas_amount; automata_id = model_relative_index % automatas_amount; feature_id = automata_id / 2; // Get the feedback type clause_feedback_type = clauses_feedback[clause_id]; clause_output = clauses_output[clause_id]; // Get the clause value and sample value automata_polarity = get_polarity(automata_id); automata_value = model[model_index]; sample_value = x_data[(sample_id * features_amount) + feature_id]; // Check if we are to do type 1 feedback if(clause_feedback_type == 1){ // If the clause output was evaluated to false if(clause_output == false) { // Punish the automata if the following conditions are met model[model_index] = automata_value - static_cast<int>((hiprand_uniform(&rnd_state) <= (1.0f / s)) && (automata_value > 1)); } else { // Check if the sample was False if(sample_value == 0) { // Check if the automata is an against automata if(automata_polarity == -1){ // Increment state if the conditions are met model[model_index] = automata_value + static_cast<int>((hiprand_uniform(&rnd_state) <= ((s - 1.0f) / s)) && (automata_value < max_state)); } // Assumes that the automata is a for automata (since it is not an against automata) else { // Decrement the state if the conditions are met model[model_index] = automata_value - static_cast<int>((hiprand_uniform(&rnd_state) <= (1.0f / s)) && automata_value > 1); } } // Assumes that the sample is 1 (since it was not 0) else { // Check if the automata is a for automata if(automata_polarity == 1) { // Decrement the state if the conditions are met model[model_index] = automata_value + static_cast<int>((hiprand_uniform(&rnd_state) <= ((s - 1.0f) / s)) && (automata_value < max_state)); } // Assumes that the automata is an against automata (since it is not an for automata) else { // Increment state if the conditions are met model[model_index] = automata_value - static_cast<int>((hiprand_uniform(&rnd_state) <= (1.0f / s)) && automata_value > 1); } } } } // Check if we are to do type 2 feedback else if(clause_feedback_type == 2) { // Check if the clause was evaluated to true in the evaluation phase. if(clause_output == true) { // Increment the state if the conditions are met model[model_index] = automata_value + static_cast<int>(((automata_polarity == 1) && (sample_value == 0)) || ((automata_polarity == -1) && (sample_value == 1)) && (automata_action(automata_value, max_state) == false) && (automata_value < max_state)); } } } // Some cleanup and persistence before exiting // Copy back the random state random_states[global_thread_id] = rnd_state; } __global__ void initialize_random_states(hiprandState_t* states, int seed, unsigned int amount_of_states) { // Calculate the global thread id unsigned int global_thread_id = ((blockIdx.x * blockDim.x) + threadIdx.x); // Calculate the offset (to make it a "bit more random") int offset = seed+global_thread_id; for(unsigned int index = global_thread_id; index < amount_of_states; index += gridDim.x) { // Initialize the random state hiprand_init(seed, index, offset, &states[index]); } }
29ac929eefb419a6e83e82ff355ba608f4bd1ce6.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <curand.h> #include <curand_kernel.h> #include "kernels.cuh" __device__ __forceinline__ int get_polarity(int id) { // If id is an even number, 1 will be returned. // If id is an odd number, -1 will be returned. return 1 - (2 * (id & 1)); } __device__ __forceinline__ bool automata_action(unsigned int automata_state, unsigned int max_state) { // Returns true if the automata is in an include state, while false if the automata is in an exclude state return (automata_state > (static_cast<unsigned int>(max_state / 2))); } __global__ void validate_clauses(unsigned int* model, bool* clauses_output, unsigned int* x_data, unsigned int sample_id, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int class_id, unsigned int max_state, bool prediction) { // Declare some shared variables // shared[0] = The output of the clause // shared[1] = Boolean flag if all is in exclude mode __shared__ bool shared[2]; // Calculate the clause id to work on const int thread_id = threadIdx.x; // Initialize some "private variables" unsigned int sample_value; unsigned int automata_value; bool action; int automata_polarity; for (unsigned int clause_id = blockIdx.x; clause_id < clauses_amount; clause_id += gridDim.x) { // Set the clause output to be true if(thread_id == 0) { shared[0] = true; shared[1] = true; } // Wait until all threads are ready __syncthreads(); // Loop over each of the automata and "stride" through for(unsigned int automata_id = thread_id; automata_id < automatas_amount; automata_id += blockDim.x) { // Check if any of the other threads have evaluated the clause to false. This way we could skip checking. if(shared[0] == false) { break; } // Get the automatas value automata_value = model[(class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_id]; // Get the action of the automata action = automata_action(automata_value, max_state); // Check if the automata is in an include state, if so, investigate further... if(action == true) { // Calculate the polarity of the automata automata_polarity = get_polarity(automata_id); // Get the sample's value sample_value = x_data[(sample_id * features_amount) + (automata_id / 2)]; // Flip the flag that says that all automatas are in exclude mode shared[1] = false; // Since the automata is in an include state, lets check if the DOES NOT match the desired value if(((automata_polarity == 1) && (sample_value != 1)) || ((automata_polarity == -1) && (sample_value != 0))){ // A condition has been met that would falsify the entire clause. Therefore, evaluate the entire clause to false shared[0] = false; break; } } } // Wait until all threads to evaluate until finished __syncthreads(); // Check if we are thread id 0 if(thread_id == 0) { // Check if the clause was, when finished evaluating, evaluated to false if(shared[0] == false || (prediction == true && shared[1] == true)) { clauses_output[clause_id] = false; } // Assuming it was not false, then it is true else { clauses_output[clause_id] = true; } } } } __global__ void reduce_votes(int* scores, unsigned int scores_index, bool* clauses_output, unsigned int clauses_amount, unsigned int threshold) { // Tempromary shared results extern __shared__ int results[]; // Declare some private variables int thread_result = 0; for(unsigned int clause_id = threadIdx.x; clause_id < clauses_amount; clause_id += blockDim.x) { // Add the score to this threads tempromary score thread_result += (get_polarity(clause_id) * clauses_output[clause_id]); } // Move the threads result into shared memory results[threadIdx.x] = thread_result; // Wait until all the threads have completed the summation of all clause outputs __syncthreads(); // Start to reduce the threads and score for(unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) { // Check if this thread is doing some reduction if(threadIdx.x < offset) { results[threadIdx.x] += results[threadIdx.x + offset]; } __syncthreads(); } // Thread 0 will store the result in the scores list if(threadIdx.x == 0) { if(threshold != 0) { if(results[threadIdx.x] > threshold) { results[threadIdx.x] = static_cast<int>(threshold); } else if(results[threadIdx.x] < -threshold) { results[threadIdx.x] = -static_cast<int>(threshold); } } scores[scores_index] = results[threadIdx.x]; } } __global__ void calculate_feedback(unsigned int* clauses_feedback, int* scores, unsigned int threshold, float s, unsigned int class_id, bool correct_class, unsigned int clauses_amount, curandState* random_states) { // Calculate the position of the thread unsigned int global_thread_id = (blockIdx.x * blockDim.x) + threadIdx.x; // Declare some private variables curandState rnd_state = random_states[global_thread_id]; float clause_polarity; int class_score = scores[0]; // Loop all clauses for (unsigned int clause_id = global_thread_id; clause_id < clauses_amount; clause_id += gridDim.x) { // Determine the polarity of the clause clause_polarity = static_cast<float>(get_polarity(clause_id)); // Check if we are on the correct class if (correct_class == true) { // Check if we are to skip feedback for this clause if(curand_uniform(&rnd_state) > (((1.0f * threshold) - class_score) / (2.0f * threshold))) { // No feedback will be given to this clause clauses_feedback[clause_id] = 0; } else { // A small performant operation that calculates that will return the following // Clauses for = Type 1 feedback // Clauses against = Type 2 feedback clauses_feedback[clause_id] = 1 + static_cast<int>(signbit(clause_polarity)); } } else { // Check if we are to skip feedback for this clause if(curand_uniform(&rnd_state) > (((1.0f * threshold) + class_score) / (2.0f * threshold))) { // No feedback will be given to this clause clauses_feedback[clause_id] = 0; } else { // A small performant operation that calculates that will return the following // Clauses for = Type 2 feedback // Clauses against = Type 1 feedback clauses_feedback[clause_id] = 2 - static_cast<int>(signbit(clause_polarity)); } } } // Copy the random state back to global memory random_states[global_thread_id] = rnd_state; } __global__ void give_feedback_to_clauses(unsigned int* model, unsigned int* clauses_feedback, unsigned int* x_data, bool* clauses_output, unsigned int class_id, unsigned int sample_id, const bool correct_class, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int max_state, unsigned int threshold, float s, curandState* random_states) { // Calculate and declare some "private variables" // Get the clause id, based on the block id in the grid unsigned int global_thread_id = (blockIdx.x * blockDim.x) + threadIdx.x; // Used to calculate the absolute index of an automata unsigned int automata_model_index; unsigned int automata_temp; // Used to tempromary store whether an automata is in include or exclude state bool action; // Used to tempromary store the polarity of an automata int automata_polarity; // Used to tempromary store the feature id of which feature an automata is associated with unsigned int sample_value; // Get the random state from the random values matrix (used to generate "random" numbers) curandState rnd_state = random_states[global_thread_id]; // In case there are more clauses than blocks, we need to loop them for(unsigned int clause_id = blockIdx.x; clause_id < clauses_amount; clause_id += gridDim.x) { // Check if we are to do type 1 feedback if(clauses_feedback[clause_id] == 1){ // If the clause output was evaluated to false if(clauses_output[clause_id] == 0) { // Loop and potentially punish all automatas for(unsigned int automata_index = threadIdx.x; automata_index < automatas_amount; automata_index += blockDim.x) { // Calculate the position of the current automata automata_model_index = (class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_index; // Get the value for the automata automata_temp = model[automata_model_index]; if((automata_temp > 1) && (curand_uniform(&rnd_state) <= (1.0 / s))) { model[automata_model_index] = automata_temp - 1; } } } else { // Loop over each of the automatas for(unsigned int automata_index = threadIdx.x; automata_index < automatas_amount; automata_index += blockDim.x){ // Calculate the position of the current automata automata_model_index = (class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_index; // Get the value of the sample for the current automata sample_value = x_data[(sample_id * features_amount) + static_cast<unsigned int>(automata_index / 2)]; // Calculate the polarity of the automata automata_polarity = get_polarity(automata_index); // Get the value for the automata automata_temp = model[automata_model_index]; // Check if the sample was False if(sample_value == 0) { // Check if the automata is an against automata if(automata_polarity == -1){ // Increment state if((curand_uniform(&rnd_state) <= ((s - 1.0) / s)) && (automata_temp < max_state)) { model[automata_model_index] = automata_temp + 1; } } // Assumes that the automata is a for automata (since it is not an against automata) else { // Decrement state if((curand_uniform(&rnd_state) <= (1.0 / s)) && automata_temp > 1) { model[automata_model_index] = automata_temp - 1; } } } // Assumes that the sample is 1 (since it was not 0) else { // Check if the automata is a for automata if(automata_polarity == 1) { // Decrement the state if((curand_uniform(&rnd_state) <= ((s - 1.0) / s)) && (automata_temp < max_state)) { model[automata_model_index] = automata_temp + 1; } } // Assumes that the automata is an against automata (since it is not an for automata) else { // Decrement state if((curand_uniform(&rnd_state) <= (1.0 / s)) && automata_temp > 1) { model[automata_model_index] = automata_temp - 1; } } } } } } // Check if we are to do type 2 feedback else if(clauses_feedback[clause_id] == 2) { // Check if the clause was evaluated to true in the evaluation phase. if(clauses_output[clause_id] == 1) { // Loop over all the automatas for(unsigned int automata_id = threadIdx.x; automata_id < automatas_amount; automata_id += blockDim.x) { // Calculate the automata model index automata_model_index = (class_id * clauses_amount * automatas_amount) + (clause_id * automatas_amount) + automata_id; // Get the automata value automata_temp = model[automata_model_index]; // Get the sample's value sample_value = x_data[(sample_id * features_amount) + (automata_id / 2)]; // Calculate the polarity of the automata automata_polarity = get_polarity(automata_id); // Get the include/exclude action for the automata action = automata_action(automata_temp, max_state); // Check if the automata is an for automata and that the feature is 0 if((automata_polarity == 1) && (sample_value == 0) && (action == false) && (automata_temp < max_state)){ model[automata_model_index] = automata_temp + 1; } else if((automata_polarity == -1) && (sample_value == 1) && (action == false) && (automata_temp < max_state)){ model[automata_model_index] = automata_temp + 1; } } } } } // Some cleanup and persistence before exiting // Copy back the random state random_states[global_thread_id] = rnd_state; } __global__ void improved_feedback(unsigned int* model, unsigned int* clauses_feedback, unsigned int* x_data, bool* clauses_output, unsigned int class_id, unsigned int sample_id, const bool correct_class, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int max_state, unsigned int threshold, float s, curandState* random_states) { // Data indexing unsigned int global_thread_id = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int clause_id; unsigned int automata_id; unsigned int feature_id; // Model indexing unsigned int model_index; // Used to tempromary store the polarity of an automata int automata_polarity; unsigned int clause_feedback_type; bool clause_output; unsigned int automata_value; unsigned int sample_value; // Get the random state from the random values matrix (used to generate "random" numbers) curandState rnd_state = random_states[global_thread_id]; // In case there are more clauses than blocks, we need to loop them for(unsigned int model_relative_index = global_thread_id; model_relative_index < (clauses_amount * automatas_amount); model_relative_index += gridDim.x) { // Calculate the position in the model model_index = (class_id * clauses_amount * automatas_amount) + model_relative_index; clause_id = model_relative_index / automatas_amount; automata_id = model_relative_index % automatas_amount; feature_id = automata_id / 2; // Get the feedback type clause_feedback_type = clauses_feedback[clause_id]; clause_output = clauses_output[clause_id]; // Get the clause value and sample value automata_polarity = get_polarity(automata_id); automata_value = model[model_index]; sample_value = x_data[(sample_id * features_amount) + feature_id]; // Check if we are to do type 1 feedback if(clause_feedback_type == 1){ // If the clause output was evaluated to false if(clause_output == false) { // Punish the automata if the following conditions are met model[model_index] = automata_value - static_cast<int>((curand_uniform(&rnd_state) <= (1.0f / s)) && (automata_value > 1)); } else { // Check if the sample was False if(sample_value == 0) { // Check if the automata is an against automata if(automata_polarity == -1){ // Increment state if the conditions are met model[model_index] = automata_value + static_cast<int>((curand_uniform(&rnd_state) <= ((s - 1.0f) / s)) && (automata_value < max_state)); } // Assumes that the automata is a for automata (since it is not an against automata) else { // Decrement the state if the conditions are met model[model_index] = automata_value - static_cast<int>((curand_uniform(&rnd_state) <= (1.0f / s)) && automata_value > 1); } } // Assumes that the sample is 1 (since it was not 0) else { // Check if the automata is a for automata if(automata_polarity == 1) { // Decrement the state if the conditions are met model[model_index] = automata_value + static_cast<int>((curand_uniform(&rnd_state) <= ((s - 1.0f) / s)) && (automata_value < max_state)); } // Assumes that the automata is an against automata (since it is not an for automata) else { // Increment state if the conditions are met model[model_index] = automata_value - static_cast<int>((curand_uniform(&rnd_state) <= (1.0f / s)) && automata_value > 1); } } } } // Check if we are to do type 2 feedback else if(clause_feedback_type == 2) { // Check if the clause was evaluated to true in the evaluation phase. if(clause_output == true) { // Increment the state if the conditions are met model[model_index] = automata_value + static_cast<int>(((automata_polarity == 1) && (sample_value == 0)) || ((automata_polarity == -1) && (sample_value == 1)) && (automata_action(automata_value, max_state) == false) && (automata_value < max_state)); } } } // Some cleanup and persistence before exiting // Copy back the random state random_states[global_thread_id] = rnd_state; } __global__ void initialize_random_states(curandState* states, int seed, unsigned int amount_of_states) { // Calculate the global thread id unsigned int global_thread_id = ((blockIdx.x * blockDim.x) + threadIdx.x); // Calculate the offset (to make it a "bit more random") int offset = seed+global_thread_id; for(unsigned int index = global_thread_id; index < amount_of_states; index += gridDim.x) { // Initialize the random state curand_init(seed, index, offset, &states[index]); } }
8_share_reduce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<addTest.h> #define DIM 1024 __global__ void reduceUnroll4(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x * 4 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 4; if (id + 3 * blockDim.x < num) { src[id] += src[id + blockDim.x]; src[id] += src[id + blockDim.x * 2]; src[id] += src[id + blockDim.x * 3]; } __syncthreads(); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { data[tid] += data[tid + stride]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceUnroll8(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 8; if (id + 7 * blockDim.x < num) { src[id] += src[id + blockDim.x]; src[id] += src[id + blockDim.x * 2]; src[id] += src[id + blockDim.x * 3]; src[id] += src[id + blockDim.x * 4]; src[id] += src[id + blockDim.x * 5]; src[id] += src[id + blockDim.x * 6]; src[id] += src[id + blockDim.x * 7]; } __syncthreads(); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { data[tid] += data[tid + stride]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceUnrollWarps8(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 8; if (id + 7 * blockDim.x < num) { src[id] += src[id + blockDim.x]; src[id] += src[id + blockDim.x * 2]; src[id] += src[id + blockDim.x * 3]; src[id] += src[id + blockDim.x * 4]; src[id] += src[id + blockDim.x * 5]; src[id] += src[id + blockDim.x * 6]; src[id] += src[id + blockDim.x * 7]; } __syncthreads(); for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { data[tid] += data[tid + stride]; } __syncthreads(); } if (tid < 32) { volatile int *vmen = data; vmen[tid] += vmen[tid + 32]; vmen[tid] += vmen[tid + 16]; vmen[tid] += vmen[tid + 8]; vmen[tid] += vmen[tid + 4]; vmen[tid] += vmen[tid + 2]; vmen[tid] += vmen[tid + 1]; } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceCompleteUnrollWarps8(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 8; if (id + 7 * blockDim.x < num) { src[id] += src[id + blockDim.x]; src[id] += src[id + blockDim.x * 2]; src[id] += src[id + blockDim.x * 3]; src[id] += src[id + blockDim.x * 4]; src[id] += src[id + blockDim.x * 5]; src[id] += src[id + blockDim.x * 6]; src[id] += src[id + blockDim.x * 7]; } __syncthreads(); if (blockDim.x >= 1024 && tid < 512) data[tid] += data[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) data[tid] += data[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) data[tid] += data[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) data[tid] += data[tid + 64]; __syncthreads(); if (tid < 32) { volatile int *vmen = data; vmen[tid] += vmen[tid + 32]; vmen[tid] += vmen[tid + 16]; vmen[tid] += vmen[tid + 8]; vmen[tid] += vmen[tid + 4]; vmen[tid] += vmen[tid + 2]; vmen[tid] += vmen[tid + 1]; } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceCompleteUnroll(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x; if (blockDim.x >= 1024 && tid < 512) data[tid] += data[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) data[tid] += data[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) data[tid] += data[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) data[tid] += data[tid + 64]; __syncthreads(); if (tid < 32) { volatile int *vmen = data; vmen[tid] += vmen[tid + 32]; vmen[tid] += vmen[tid + 16]; vmen[tid] += vmen[tid + 8]; vmen[tid] += vmen[tid + 4]; vmen[tid] += vmen[tid + 2]; vmen[tid] += vmen[tid + 1]; } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceCompleteShareMem(int *src, int *dst, int num) { __shared__ int mem[DIM]; unsigned int tid = threadIdx.x; if (tid >= num) return; unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 8; if (id + 7 * blockDim.x < num) { src[id] += src[id + blockDim.x]; src[id] += src[id + blockDim.x * 2]; src[id] += src[id + blockDim.x * 3]; src[id] += src[id + blockDim.x * 4]; src[id] += src[id + blockDim.x * 5]; src[id] += src[id + blockDim.x * 6]; src[id] += src[id + blockDim.x * 7]; } mem[tid] = data[tid]; __syncthreads(); // if (blockDim.x >= 1024 && tid < 512) mem[tid] += mem[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) mem[tid] += mem[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) mem[tid] += mem[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) mem[tid] += mem[tid + 64]; __syncthreads(); if (tid < 32) { volatile int *vmen = mem; vmen[tid] += vmen[tid + 32]; vmen[tid] += vmen[tid + 16]; vmen[tid] += vmen[tid + 8]; vmen[tid] += vmen[tid + 4]; vmen[tid] += vmen[tid + 2]; vmen[tid] += vmen[tid + 1]; } if (tid == 0) dst[blockIdx.x] = mem[0]; } __global__ void reduceUnroll2(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x * 2 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 2; if (id + blockDim.x < num) { src[id] += src[id + blockDim.x]; } __syncthreads(); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { data[tid] += data[tid + stride]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceNeighboredLess(int* src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x + threadIdx.x; int* data = src + blockIdx.x * blockDim.x; if (id >= num) return; for (int stride = 1; stride < blockDim.x; stride *= 2) { int idx = 2 * tid * stride; if (idx < blockDim.x) { data[idx] += data[idx + stride]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceInterieaved(int* src, int *dst, int num) { // set threadId unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= num) return; int* data = src + blockIdx.x * blockDim.x; for (int stride = blockDim.x / 2 ; stride > 0; stride >>= 1) { if (tid < stride) { data[tid] += data[tid + stride]; } __syncthreads(); } if (tid == 0) { dst[blockIdx.x] = data[0]; } } __global__ void reduceNeighbored(int* src, int *dst, int num) { // set threadId unsigned int id_thread = threadIdx.x; if (id_thread >= num) return; int* data = src + blockIdx.x * blockDim.x; for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((id_thread % (2 * stride)) == 0) { data[id_thread] += data[stride + id_thread]; } __syncthreads(); } if (id_thread == 0) { dst[blockIdx.x] = data[0]; } } // CPU int reduceNeighbored_cpu(int *data, int num) { if (num == 1) return data[0]; int const stride = num / 2; for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; } if (num % 2 == 1) { data[0] += data[num - 1]; } return reduceNeighbored_cpu(data, stride); } int main(void) { int dev = 0; initDevice(dev); int num = 1 << 20; int* x_h = (int *)malloc(num * sizeof(int)); int* dst_cpu = (int *)malloc(num * sizeof(int)); int* dst_dev_cpu = (int *)malloc(num * sizeof(int)); for(int i = 0; i < num; i++) { x_h[i] = i % 3; } int *x_d, *dst_d; CHECK(hipMalloc((int**)&x_d, num * sizeof(int))); CHECK(hipMalloc((int**)&dst_d, num * sizeof(int))); CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice)); int block = 1024; int grid = (num + block -1) / block; printf("grid : %d , block : %d\n", grid, block); int sum_dev = 0; // // reduceNeighbored<<<grid, block>>>(x_d, dst_d, num); // CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost)); // for (int i = 0; i < grid; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice)); // reduceNeighboredLess<<<grid, block>>>(x_d, dst_d, num); // CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error Less kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice)); // reduceInterieaved<<<grid, block>>>(x_d, dst_d, num); // CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice)); // reduceUnroll2<<<grid / 2, block>>>(x_d, dst_d, num); // CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid / 2; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice)); // reduceUnroll4<<<grid / 4, block>>>(x_d, dst_d, num); // CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid / 4; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error unroll4 kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice)); // reduceUnroll8<<<grid / 8, block>>>(x_d, dst_d, num); // CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid / 8; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error unroll8 kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice)); // reduceUnrollWarps8<<<grid / 8, block>>>(x_d, dst_d, num); // CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid / 8; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice)); // reduceCompleteUnrollWarps8<<<grid / 8, block>>>(x_d, dst_d, num); // CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid / 8; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error Completewarps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]); CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceCompleteShareMem), dim3(grid/8), dim3(block), 0, 0, x_d, dst_d, num); CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost)); sum_dev = 0; for (int i = 0; i < grid/8; i++) { sum_dev += dst_dev_cpu[i]; } reduceNeighbored_cpu(x_h, num); if (sum_dev != x_h[0]) printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]); CHECK(hipMemcpy(x_d, x_h, num * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( reduceCompleteUnroll), dim3(grid), dim3(block), 0, 0, x_d, dst_d, num); CHECK(hipMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), hipMemcpyDeviceToHost)); sum_dev = 0; for (int i = 0; i < grid; i++) { sum_dev += dst_dev_cpu[i]; } reduceNeighbored_cpu(x_h, num); if (sum_dev != x_h[0]) printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]); hipFree(x_d); hipFree(dst_d); free(x_h); free(dst_cpu); free(dst_dev_cpu); return 0; }
8_share_reduce.cu
#include<iostream> #include<addTest.h> #define DIM 1024 __global__ void reduceUnroll4(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x * 4 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 4; if (id + 3 * blockDim.x < num) { src[id] += src[id + blockDim.x]; src[id] += src[id + blockDim.x * 2]; src[id] += src[id + blockDim.x * 3]; } __syncthreads(); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { data[tid] += data[tid + stride]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceUnroll8(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 8; if (id + 7 * blockDim.x < num) { src[id] += src[id + blockDim.x]; src[id] += src[id + blockDim.x * 2]; src[id] += src[id + blockDim.x * 3]; src[id] += src[id + blockDim.x * 4]; src[id] += src[id + blockDim.x * 5]; src[id] += src[id + blockDim.x * 6]; src[id] += src[id + blockDim.x * 7]; } __syncthreads(); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { data[tid] += data[tid + stride]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceUnrollWarps8(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 8; if (id + 7 * blockDim.x < num) { src[id] += src[id + blockDim.x]; src[id] += src[id + blockDim.x * 2]; src[id] += src[id + blockDim.x * 3]; src[id] += src[id + blockDim.x * 4]; src[id] += src[id + blockDim.x * 5]; src[id] += src[id + blockDim.x * 6]; src[id] += src[id + blockDim.x * 7]; } __syncthreads(); for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { data[tid] += data[tid + stride]; } __syncthreads(); } if (tid < 32) { volatile int *vmen = data; vmen[tid] += vmen[tid + 32]; vmen[tid] += vmen[tid + 16]; vmen[tid] += vmen[tid + 8]; vmen[tid] += vmen[tid + 4]; vmen[tid] += vmen[tid + 2]; vmen[tid] += vmen[tid + 1]; } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceCompleteUnrollWarps8(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 8; if (id + 7 * blockDim.x < num) { src[id] += src[id + blockDim.x]; src[id] += src[id + blockDim.x * 2]; src[id] += src[id + blockDim.x * 3]; src[id] += src[id + blockDim.x * 4]; src[id] += src[id + blockDim.x * 5]; src[id] += src[id + blockDim.x * 6]; src[id] += src[id + blockDim.x * 7]; } __syncthreads(); if (blockDim.x >= 1024 && tid < 512) data[tid] += data[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) data[tid] += data[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) data[tid] += data[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) data[tid] += data[tid + 64]; __syncthreads(); if (tid < 32) { volatile int *vmen = data; vmen[tid] += vmen[tid + 32]; vmen[tid] += vmen[tid + 16]; vmen[tid] += vmen[tid + 8]; vmen[tid] += vmen[tid + 4]; vmen[tid] += vmen[tid + 2]; vmen[tid] += vmen[tid + 1]; } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceCompleteUnroll(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x; if (blockDim.x >= 1024 && tid < 512) data[tid] += data[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) data[tid] += data[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) data[tid] += data[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) data[tid] += data[tid + 64]; __syncthreads(); if (tid < 32) { volatile int *vmen = data; vmen[tid] += vmen[tid + 32]; vmen[tid] += vmen[tid + 16]; vmen[tid] += vmen[tid + 8]; vmen[tid] += vmen[tid + 4]; vmen[tid] += vmen[tid + 2]; vmen[tid] += vmen[tid + 1]; } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceCompleteShareMem(int *src, int *dst, int num) { __shared__ int mem[DIM]; unsigned int tid = threadIdx.x; if (tid >= num) return; unsigned int id = blockIdx.x * blockDim.x * 8 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 8; if (id + 7 * blockDim.x < num) { src[id] += src[id + blockDim.x]; src[id] += src[id + blockDim.x * 2]; src[id] += src[id + blockDim.x * 3]; src[id] += src[id + blockDim.x * 4]; src[id] += src[id + blockDim.x * 5]; src[id] += src[id + blockDim.x * 6]; src[id] += src[id + blockDim.x * 7]; } mem[tid] = data[tid]; __syncthreads(); // 该同步操作保证该线程快的所有线程此处保持一致,即复制完成 if (blockDim.x >= 1024 && tid < 512) mem[tid] += mem[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) mem[tid] += mem[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) mem[tid] += mem[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) mem[tid] += mem[tid + 64]; __syncthreads(); if (tid < 32) { volatile int *vmen = mem; vmen[tid] += vmen[tid + 32]; vmen[tid] += vmen[tid + 16]; vmen[tid] += vmen[tid + 8]; vmen[tid] += vmen[tid + 4]; vmen[tid] += vmen[tid + 2]; vmen[tid] += vmen[tid + 1]; } if (tid == 0) dst[blockIdx.x] = mem[0]; } __global__ void reduceUnroll2(int *src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x * 2 + threadIdx.x; if (tid >= num) return; int* data = src + blockIdx.x * blockDim.x * 2; if (id + blockDim.x < num) { src[id] += src[id + blockDim.x]; } __syncthreads(); for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { data[tid] += data[tid + stride]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceNeighboredLess(int* src, int *dst, int num) { unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x + threadIdx.x; int* data = src + blockIdx.x * blockDim.x; if (id >= num) return; for (int stride = 1; stride < blockDim.x; stride *= 2) { int idx = 2 * tid * stride; if (idx < blockDim.x) { data[idx] += data[idx + stride]; } __syncthreads(); } if (tid == 0) dst[blockIdx.x] = data[0]; } __global__ void reduceInterieaved(int* src, int *dst, int num) { // set threadId unsigned int tid = threadIdx.x; unsigned int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= num) return; int* data = src + blockIdx.x * blockDim.x; for (int stride = blockDim.x / 2 ; stride > 0; stride >>= 1) { if (tid < stride) { data[tid] += data[tid + stride]; } __syncthreads(); } if (tid == 0) { dst[blockIdx.x] = data[0]; } } __global__ void reduceNeighbored(int* src, int *dst, int num) { // set threadId unsigned int id_thread = threadIdx.x; if (id_thread >= num) return; int* data = src + blockIdx.x * blockDim.x; for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((id_thread % (2 * stride)) == 0) { data[id_thread] += data[stride + id_thread]; } __syncthreads(); } if (id_thread == 0) { dst[blockIdx.x] = data[0]; } } // CPU int reduceNeighbored_cpu(int *data, int num) { if (num == 1) return data[0]; int const stride = num / 2; for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; } if (num % 2 == 1) { data[0] += data[num - 1]; } return reduceNeighbored_cpu(data, stride); } int main(void) { int dev = 0; initDevice(dev); int num = 1 << 20; int* x_h = (int *)malloc(num * sizeof(int)); int* dst_cpu = (int *)malloc(num * sizeof(int)); int* dst_dev_cpu = (int *)malloc(num * sizeof(int)); for(int i = 0; i < num; i++) { x_h[i] = i % 3; } int *x_d, *dst_d; CHECK(cudaMalloc((int**)&x_d, num * sizeof(int))); CHECK(cudaMalloc((int**)&dst_d, num * sizeof(int))); CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice)); int block = 1024; int grid = (num + block -1) / block; printf("grid : %d , block : %d\n", grid, block); int sum_dev = 0; // // reduceNeighbored<<<grid, block>>>(x_d, dst_d, num); // CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost)); // for (int i = 0; i < grid; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice)); // reduceNeighboredLess<<<grid, block>>>(x_d, dst_d, num); // CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error Less kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice)); // reduceInterieaved<<<grid, block>>>(x_d, dst_d, num); // CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice)); // reduceUnroll2<<<grid / 2, block>>>(x_d, dst_d, num); // CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid / 2; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error interieaved kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice)); // reduceUnroll4<<<grid / 4, block>>>(x_d, dst_d, num); // CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid / 4; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error unroll4 kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice)); // reduceUnroll8<<<grid / 8, block>>>(x_d, dst_d, num); // CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid / 8; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error unroll8 kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice)); // reduceUnrollWarps8<<<grid / 8, block>>>(x_d, dst_d, num); // CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid / 8; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]); // // CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice)); // reduceCompleteUnrollWarps8<<<grid / 8, block>>>(x_d, dst_d, num); // CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost)); // sum_dev = 0; // for (int i = 0; i < grid / 8; i++) { // sum_dev += dst_dev_cpu[i]; // } // reduceNeighbored_cpu(x_h, num); // if (sum_dev != x_h[0]) // printf("Error Completewarps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]); CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice)); reduceCompleteShareMem<<<grid/8, block>>>(x_d, dst_d, num); CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost)); sum_dev = 0; for (int i = 0; i < grid/8; i++) { sum_dev += dst_dev_cpu[i]; } reduceNeighbored_cpu(x_h, num); if (sum_dev != x_h[0]) printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]); CHECK(cudaMemcpy(x_d, x_h, num * sizeof(int), cudaMemcpyHostToDevice)); reduceCompleteUnroll<<<grid, block>>>(x_d, dst_d, num); CHECK(cudaMemcpy(dst_dev_cpu, dst_d, num * sizeof(int), cudaMemcpyDeviceToHost)); sum_dev = 0; for (int i = 0; i < grid; i++) { sum_dev += dst_dev_cpu[i]; } reduceNeighbored_cpu(x_h, num); if (sum_dev != x_h[0]) printf("Error warps8 kernel data device: %d host:%d \n", sum_dev, x_h[0]); cudaFree(x_d); cudaFree(dst_d); free(x_h); free(dst_cpu); free(dst_dev_cpu); return 0; }
0b3d9ba93cf43c4d1e773128441f4f5d68269cdc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * spmm_csc_driver.cu * Copyright (C) 2020 * Aravind SUKUMARAN RAJAM (asr) <[email protected]> * * Distributed under terms of the GNU LGPL3 license. */ #include "mm_helper.hpp" #include "sparse_representation.hpp" #include <iostream> #define BLK_SIZE 32 void check_dmat(double* a, double *b, unsigned int n, unsigned int K, bool quit_on_err = true ) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int k = 0; k < K; ++k) { if(std::abs(a[i * K + k] - b[i * K + k]) > 1e-1) { std::cerr << "Possible error at " << i << std::endl; if(quit_on_err) { exit(-1); } } } } if(quit_on_err) std::cout << "Verification succeeded\n"; else std::cout << "Check error messages to see if verification succeeded. (No error msg == success)\n"; } static unsigned int g_seed = 0X4B1D; inline int fastrand() { g_seed = (214013 * g_seed + 2531011); return (g_seed >> 16) & 0x7FFF; } void init_dmat(double *a, unsigned int n, unsigned int K, double offset) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int k = 0; k < K; ++k) { a[i * K + k] = i * K + k + offset; //a[i * K + j] = fastrand() + offset; } } } void print_dmat(double *a, unsigned int n, unsigned int K) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int j = 0; j < K; ++j) { std::cout << a[i * K + j] << ' '; } std::cout << '\n'; } } void host_csc_spmm(CSC mat, double * dmat_in, double * dmat_out, unsigned int K) { for (unsigned int r = 0; r < mat.nrows; ++r) { for (unsigned int k = 0; k < K; ++k) { dmat_out[r * K + k] = 0; } } for (unsigned int c = 0; c < mat.ncols; ++c) { unsigned int col_start = mat.col_indx[c]; unsigned int col_end = mat.col_indx[c + 1]; for (unsigned int r = col_start; r < col_end; ++r) { unsigned int row_id = mat.row_id[r]; double val = mat.values[r]; for (unsigned int k = 0; k < K; ++k) { dmat_out[row_id * K + k] += val * dmat_in[c * K + k]; } } } } __global__ void dev_csc_spmm(double *values, int *row_id, int *col_indx, int nnz, int ncols, int nrows, int K, const double *D, double *O){ const int row = blockIdx.y * blockDim.y + threadIdx.y; const int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < K && col < nrows) { double dotProduct = 0; const int col_start = col_indx[col]; const int col_end = col_indx[col+1]; for (int element = col_start; element < col_end; ++element) { dotProduct += values[element] * D[K * row_id[element] + row]; } O[col * K + row] = dotProduct; } } int main(int argc, char *argv[]) { if(argc < 3) { std::cerr << "usage ./exec inputfile K " << std::endl; exit(-1); } unsigned int K = std::atoi(argv[2]); CSC mat = read_matrix_market_to_CSC(argv[1]); std::cout << mat.nrows << ' ' << mat.ncols << ' ' << mat.nnz << ' ' << K << '\n'; double *dmat_in = (double*)malloc(mat.ncols * K * sizeof(double)); double *dmat_out = (double*)malloc(mat.nrows * K * sizeof(double)); init_dmat(dmat_in, mat.ncols, K, 1.0); //print_dmat(dmat_in, mat.ncols, K); host_csc_spmm(mat, dmat_in, dmat_out, K); //device array pointers double *d_values; int *d_row_id; int *d_col_indx; double *d_dmat_in; double *d_dmat_out; hipMalloc(&d_values, sizeof(double)* mat.nnz); hipMalloc(&d_row_id, sizeof(int) * (mat.nnz)); hipMalloc(&d_col_indx, sizeof(int)* mat.ncols+1); hipMalloc(&d_dmat_in, sizeof(double)* K * mat.ncols); hipMalloc(&d_dmat_out, sizeof(double)* K * (mat.nrows)); //----------- Begin kernel call for SpMM_CSR ------------ float time_ms; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //cudamemcopy functions hipMemcpy(d_values, mat.values, sizeof(double) * mat.nnz, hipMemcpyHostToDevice); hipMemcpy(d_col_indx, mat.col_indx, sizeof(int) * mat.ncols+1, hipMemcpyHostToDevice); hipMemcpy(d_row_id, mat.row_id, sizeof(int) * (mat.nnz), hipMemcpyHostToDevice); hipMemcpy(d_dmat_in, dmat_in, sizeof(double)*K*mat.ncols, hipMemcpyHostToDevice); //hipMemcpy(d_dmat_out, dmat_out, sizeof(double)*K*(mat.nrows), hipMemcpyHostToDevice); //define blk and grid size dim3 threads(BLK_SIZE, BLK_SIZE); dim3 grid((int) ceil((float) K/BLK_SIZE), (int) ceil((float) mat.ncols/BLK_SIZE)); //call gpu kernel hipLaunchKernelGGL(( dev_csc_spmm), dim3(grid), dim3(threads), 0, 0, d_values, d_row_id, d_col_indx, mat.nnz, mat.ncols, mat.nrows, K, d_dmat_in, d_dmat_out); hipDeviceSynchronize(); //cudamemcopy gpu result from device to host double *gpu_result = (double*)malloc((mat.nrows) * K * sizeof(double)); hipMemcpy(gpu_result, d_dmat_out, sizeof(double)*K*(mat.nrows), hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time_ms, start, stop); //compute GFLOPS double gflop = abs(((2 * K * mat.nnz)/1e9)); double op_time_s = time_ms * 1e-3; double gflops = gflop/op_time_s; printf("Kernel time : %f ms \n", time_ms); printf("GFLOPS : %f \n", gflops); /* for(int i =0; i<mat.nrows*K; i++){ printf("point: %d, gpu: %lf, cpu: %lf \n", i, gpu_result[i], dmat_out[i]); } */ //std::cout << "replace one argument to the below function with the values from gpu " << std::endl; check_dmat(dmat_out, gpu_result, mat.nrows, K); //print_dmat(dmat_out, mat.nrows, K); free(mat.col_indx); free(mat.row_id); free(mat.values); free(gpu_result); hipFree(d_values); hipFree(d_col_indx); hipFree(d_row_id); hipFree(d_dmat_in); hipFree(d_dmat_out); return 0; }
0b3d9ba93cf43c4d1e773128441f4f5d68269cdc.cu
/* * spmm_csc_driver.cu * Copyright (C) 2020 * Aravind SUKUMARAN RAJAM (asr) <[email protected]> * * Distributed under terms of the GNU LGPL3 license. */ #include "mm_helper.hpp" #include "sparse_representation.hpp" #include <iostream> #define BLK_SIZE 32 void check_dmat(double* a, double *b, unsigned int n, unsigned int K, bool quit_on_err = true ) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int k = 0; k < K; ++k) { if(std::abs(a[i * K + k] - b[i * K + k]) > 1e-1) { std::cerr << "Possible error at " << i << std::endl; if(quit_on_err) { exit(-1); } } } } if(quit_on_err) std::cout << "Verification succeeded\n"; else std::cout << "Check error messages to see if verification succeeded. (No error msg == success)\n"; } static unsigned int g_seed = 0X4B1D; inline int fastrand() { g_seed = (214013 * g_seed + 2531011); return (g_seed >> 16) & 0x7FFF; } void init_dmat(double *a, unsigned int n, unsigned int K, double offset) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int k = 0; k < K; ++k) { a[i * K + k] = i * K + k + offset; //a[i * K + j] = fastrand() + offset; } } } void print_dmat(double *a, unsigned int n, unsigned int K) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int j = 0; j < K; ++j) { std::cout << a[i * K + j] << ' '; } std::cout << '\n'; } } void host_csc_spmm(CSC mat, double * dmat_in, double * dmat_out, unsigned int K) { for (unsigned int r = 0; r < mat.nrows; ++r) { for (unsigned int k = 0; k < K; ++k) { dmat_out[r * K + k] = 0; } } for (unsigned int c = 0; c < mat.ncols; ++c) { unsigned int col_start = mat.col_indx[c]; unsigned int col_end = mat.col_indx[c + 1]; for (unsigned int r = col_start; r < col_end; ++r) { unsigned int row_id = mat.row_id[r]; double val = mat.values[r]; for (unsigned int k = 0; k < K; ++k) { dmat_out[row_id * K + k] += val * dmat_in[c * K + k]; } } } } __global__ void dev_csc_spmm(double *values, int *row_id, int *col_indx, int nnz, int ncols, int nrows, int K, const double *D, double *O){ const int row = blockIdx.y * blockDim.y + threadIdx.y; const int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < K && col < nrows) { double dotProduct = 0; const int col_start = col_indx[col]; const int col_end = col_indx[col+1]; for (int element = col_start; element < col_end; ++element) { dotProduct += values[element] * D[K * row_id[element] + row]; } O[col * K + row] = dotProduct; } } int main(int argc, char *argv[]) { if(argc < 3) { std::cerr << "usage ./exec inputfile K " << std::endl; exit(-1); } unsigned int K = std::atoi(argv[2]); CSC mat = read_matrix_market_to_CSC(argv[1]); std::cout << mat.nrows << ' ' << mat.ncols << ' ' << mat.nnz << ' ' << K << '\n'; double *dmat_in = (double*)malloc(mat.ncols * K * sizeof(double)); double *dmat_out = (double*)malloc(mat.nrows * K * sizeof(double)); init_dmat(dmat_in, mat.ncols, K, 1.0); //print_dmat(dmat_in, mat.ncols, K); host_csc_spmm(mat, dmat_in, dmat_out, K); //device array pointers double *d_values; int *d_row_id; int *d_col_indx; double *d_dmat_in; double *d_dmat_out; cudaMalloc(&d_values, sizeof(double)* mat.nnz); cudaMalloc(&d_row_id, sizeof(int) * (mat.nnz)); cudaMalloc(&d_col_indx, sizeof(int)* mat.ncols+1); cudaMalloc(&d_dmat_in, sizeof(double)* K * mat.ncols); cudaMalloc(&d_dmat_out, sizeof(double)* K * (mat.nrows)); //----------- Begin kernel call for SpMM_CSR ------------ float time_ms; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //cudamemcopy functions cudaMemcpy(d_values, mat.values, sizeof(double) * mat.nnz, cudaMemcpyHostToDevice); cudaMemcpy(d_col_indx, mat.col_indx, sizeof(int) * mat.ncols+1, cudaMemcpyHostToDevice); cudaMemcpy(d_row_id, mat.row_id, sizeof(int) * (mat.nnz), cudaMemcpyHostToDevice); cudaMemcpy(d_dmat_in, dmat_in, sizeof(double)*K*mat.ncols, cudaMemcpyHostToDevice); //cudaMemcpy(d_dmat_out, dmat_out, sizeof(double)*K*(mat.nrows), cudaMemcpyHostToDevice); //define blk and grid size dim3 threads(BLK_SIZE, BLK_SIZE); dim3 grid((int) ceil((float) K/BLK_SIZE), (int) ceil((float) mat.ncols/BLK_SIZE)); //call gpu kernel dev_csc_spmm<<<grid, threads>>>(d_values, d_row_id, d_col_indx, mat.nnz, mat.ncols, mat.nrows, K, d_dmat_in, d_dmat_out); cudaDeviceSynchronize(); //cudamemcopy gpu result from device to host double *gpu_result = (double*)malloc((mat.nrows) * K * sizeof(double)); cudaMemcpy(gpu_result, d_dmat_out, sizeof(double)*K*(mat.nrows), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_ms, start, stop); //compute GFLOPS double gflop = abs(((2 * K * mat.nnz)/1e9)); double op_time_s = time_ms * 1e-3; double gflops = gflop/op_time_s; printf("Kernel time : %f ms \n", time_ms); printf("GFLOPS : %f \n", gflops); /* for(int i =0; i<mat.nrows*K; i++){ printf("point: %d, gpu: %lf, cpu: %lf \n", i, gpu_result[i], dmat_out[i]); } */ //std::cout << "replace one argument to the below function with the values from gpu " << std::endl; check_dmat(dmat_out, gpu_result, mat.nrows, K); //print_dmat(dmat_out, mat.nrows, K); free(mat.col_indx); free(mat.row_id); free(mat.values); free(gpu_result); cudaFree(d_values); cudaFree(d_col_indx); cudaFree(d_row_id); cudaFree(d_dmat_in); cudaFree(d_dmat_out); return 0; }
18274d7d9a24b1b85f03e4ef803798957b97699f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ int f () { return 21; } __global__ void vDisp(const float *A, const float *B, int ds) { int idx = blockIdx.x * block_size + threadIdx.x; // create typical 1D thread index from built-in variables printf("idx = %d, ds = %d\n", idx, ds); if (idx < ds) printf("Device: [%d], \t%f\t%f \n", idx, A[idx], B[idx]); // do the vector (element) add here }
18274d7d9a24b1b85f03e4ef803798957b97699f.cu
#include "includes.h" __device__ int f () { return 21; } __global__ void vDisp(const float *A, const float *B, int ds) { int idx = blockIdx.x * block_size + threadIdx.x; // create typical 1D thread index from built-in variables printf("idx = %d, ds = %d\n", idx, ds); if (idx < ds) printf("Device: [%d], \t%f\t%f \n", idx, A[idx], B[idx]); // do the vector (element) add here }
8674a8c6333d21cabd3a28b4b0bd668bdb0671c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> __global__ void thread(float *a) { a[threadIdx.x] = threadIdx.x; } int main(void) { const int N = 1025; float *a; hipMallocManaged(&a, N*sizeof(float)); hipLaunchKernelGGL(( thread), dim3(4),dim3(N), 0, 0, a); hipDeviceSynchronize(); for (int i=0; i<N; i++) printf("%d %g\n",i,a[i]); hipFree(a); }
8674a8c6333d21cabd3a28b4b0bd668bdb0671c1.cu
#include <cstdio> __global__ void thread(float *a) { a[threadIdx.x] = threadIdx.x; } int main(void) { const int N = 1025; float *a; cudaMallocManaged(&a, N*sizeof(float)); thread<<<4,N>>>(a); cudaDeviceSynchronize(); for (int i=0; i<N; i++) printf("%d %g\n",i,a[i]); cudaFree(a); }
98d841bf450717db1ba2b96a5f1cfb1bb2a094ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/core/TensorAccessor.h> #include <ATen/hip/detail/KernelUtils.h> #include <c10/hip/HIPException.h> #include <c10/macros/Macros.h> #include <ATen/native/Resize.h> #include <ATen/native/hip/block_reduce.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/empty_like.h> #include <ATen/ops/nll_loss2d_forward_native.h> #include <ATen/ops/nll_loss2d_backward_native.h> #endif namespace at::native { namespace { // Returns a contiguous tensor if the source tensor // is defined. Otherwise returns the undefined // source tensor unmodified. inline Tensor optional_contiguous(const Tensor& source) { return source.defined() ? source.contiguous() : source; } // Returns the address of the first element of a tensor // or nullptr if the tensor is undefined. template <typename scalar_t> inline scalar_t* optional_data(const Tensor& source) { return source.defined() ? source.data_ptr<scalar_t>() : nullptr; } using at::cuda::detail::CUDA_NUM_THREADS; using at::cuda::detail::GET_BLOCKS; // TODO(crcrpar): Think about introducing `canUse32BitIndexMath` and choose int or int64_t for `target`. template <typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_NUM_THREADS) __global__ void nll_loss2d_forward_no_reduce_kernel( int64_t n_threads, PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<int64_t, 3> target, PackedTensorAccessor64<scalar_t, 3> output, scalar_t* weight, int64_t ignore_index ) { int64_t batch_size = input.size(0); int64_t H = input.size(2); int64_t W = input.size(3); CUDA_KERNEL_LOOP(index, n_threads) { const int64_t b = index % batch_size; const int64_t h = (index / batch_size) % H; const int64_t w = (index / (batch_size * H)) % W; int64_t cur_target = target[b][h][w]; if (cur_target == ignore_index) { output[b][h][w] = static_cast<scalar_t>(0); continue; } scalar_t value = input[b][cur_target][h][w]; scalar_t cur_weight = weight != nullptr ? weight[cur_target] : static_cast<scalar_t>(1); output[b][h][w] = -value * cur_weight; } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_NUM_THREADS) __global__ void nll_loss2d_forward_kernel( scalar_t* output, scalar_t* total_weight, scalar_t* input, int64_t* target, scalar_t* weight, int n_classes, int map_nelem, int blocks_per_sample, int64_t ignore_index) { scalar_t cur_weight; accscalar_t input_sum = 0; accscalar_t acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (int i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { int64_t t = target[toffset + i]; if (t != ignore_index) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); cur_weight = weight != nullptr ? weight[t] : static_cast<scalar_t>(1); const auto input_index = ioffset + i + map_nelem * t; CUDA_KERNEL_ASSERT(input_index >= 0); input_sum -= input[input_index] * cur_weight; acc_weight += cur_weight; } } __shared__ accscalar_t acc_weight_smem[CUDA_NUM_THREADS]; __shared__ accscalar_t input_sum_smem[CUDA_NUM_THREADS]; auto acc_weight_ = cuda_utils::BlockReduceSum(acc_weight, acc_weight_smem); auto input_sum_ = cuda_utils::BlockReduceSum(input_sum, input_sum_smem); if (threadIdx.x == 0) { gpuAtomicAdd(total_weight, static_cast<scalar_t>(acc_weight_)); gpuAtomicAdd(output, static_cast<scalar_t>(input_sum_)); } } template <typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_NUM_THREADS) __global__ void nll_loss2d_forward_size_average_kernel( scalar_t* output, scalar_t* total_weight ) { *output /= *total_weight; } template <typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_NUM_THREADS) __global__ void nll_loss2d_backward_no_reduce_kernel( int64_t n_threads, PackedTensorAccessor64<int64_t, 3> target, PackedTensorAccessor64<scalar_t, 3> grad_output, PackedTensorAccessor64<scalar_t, 4> grad_input, scalar_t* weight, int64_t ignore_index ) { int64_t batch_size = target.size(0); int64_t H = target.size(1); int64_t W = target.size(2); CUDA_KERNEL_LOOP(index, n_threads) { const int64_t b = index % batch_size; const int64_t h = (index / batch_size) % H; const int64_t w = (index / (batch_size * H)) % W; int64_t cur_target = target[b][h][w]; if (cur_target == ignore_index) { continue; } scalar_t value = -(weight != nullptr ? weight[cur_target] : static_cast<scalar_t>(1)); grad_input[b][cur_target][h][w] = value * grad_output[b][h][w]; } } template <typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_NUM_THREADS) __global__ void nll_loss2d_backward_kernel( scalar_t* grad_input, scalar_t* grad_output, int64_t* target, scalar_t* weights, scalar_t* total_weight, bool size_average, int n_classes, int map_nelem, int blocks_per_sample, int64_t ignore_index ) { const auto grad = -(size_average ? *grad_output / *total_weight : *grad_output); const int sample = blockIdx.x / blocks_per_sample; const int step = blockDim.x * blocks_per_sample; const int toffset = sample * map_nelem; const auto* const target_thread = target + toffset; const int ioffset = sample * map_nelem * n_classes; auto* const grad_input_thread = grad_input + ioffset; for (int i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { const int64_t t = target_thread[i]; if (t != ignore_index) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); const auto grad_input_index = i + map_nelem * t; CUDA_KERNEL_ASSERT(grad_input_index >= 0); grad_input_thread[i + map_nelem * t] = weights != nullptr ? weights[t] * grad : grad; } } } void check_inputs_nll_loss2d( const Tensor& input, const Tensor& target, const Tensor& weight) { TORCH_CHECK( target.dim() == 3, "only batches of spatial targets supported (3D tensors)" " but got targets of size: : ", target.sizes()); TORCH_CHECK( input.dim() == 4, "only batches of spatial inputs supported (4D tensors), " "but got input of size: ", input.sizes()); TORCH_CHECK( !weight.defined() || weight.numel() == input.size(1), "weight tensor should be defined either for all or no classes"); TORCH_CHECK( input.size(0) == target.size(0) && input.size(2) == target.size(1) && input.size(3) == target.size(2), "input and target batch or spatial sizes don't match: target ", target.sizes(), ", input ", input.sizes()); } void nll_loss2d_forward_out_cuda_template( Tensor& output, Tensor& total_weight, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage in 'sum' or 'mean' reductions. if (reduction != at::Reduction::None) { at::globalContext().alertNotDeterministic("nll_loss2d_forward_out_cuda_template"); } // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; check_inputs_nll_loss2d(input, target, weight); total_weight.resize_({}); if (reduction == at::Reduction::None) { int64_t batch_size = input.size(0); int64_t H = input.size(2); int64_t W = input.size(3); int64_t count = batch_size * H * W; at::native::resize_output(output, {batch_size, H, W}); if (count == 0) { // This guards from unnecessary operations and launching CUDA kernel with // 0 blocks. return; } auto weight_ = optional_contiguous(weight); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss2d_forward_no_reduce_kernel", [&] { hipLaunchKernelGGL(( nll_loss2d_forward_no_reduce_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input.packed_accessor64<scalar_t, 4>(), target.packed_accessor64<int64_t, 3>(), output.packed_accessor64<scalar_t, 3>(), optional_data<scalar_t>(weight_), ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); return; } // produce scalar outputs for the reduction case at::native::resize_output(output, {}); if (target.numel() == 0) { // Here target (and input) have zero elements // Mean reduction on empty tensors produces NaN. See the discussion in // https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162 if (reduction == Reduction::Mean) { output.fill_(std::numeric_limits<double>::quiet_NaN()); } else { output.zero_(); } total_weight.zero_(); return; } auto input_ = input.contiguous(); auto weight_ = optional_contiguous(weight); auto target_ = target.contiguous(); output.zero_(); total_weight.zero_(); auto batch_size = target.size(0); int64_t map_nelem = target.numel() / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss2d_forward_kernel", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( nll_loss2d_forward_kernel<scalar_t, accscalar_t>) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>(), input_.data_ptr<scalar_t>(), target_.data_ptr<int64_t>(), optional_data<scalar_t>(weight_), input_.size(1), input_.size(2) * input_.size(3), blocks_per_sample, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); // Divide by total_weight if (reduction == at::Reduction::Mean) { hipLaunchKernelGGL(( nll_loss2d_forward_size_average_kernel<scalar_t>) , dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } void nll_loss2d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; check_inputs_nll_loss2d(input, target, weight); grad_input.resize_as_(input); grad_input.zero_(); TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); TORCH_CHECK( total_weight.numel() == 1, "expected total_weight to be a single element tensor, got: ", total_weight.sizes(), " (", total_weight.numel(), " elements)"); if (reduction == at::Reduction::None) { TORCH_CHECK( grad_output.dim() == 3, "grad_output must have same dimension as target (3) but got dimension: ", grad_output.sizes()); TORCH_CHECK( grad_output.size(0) == target.size(0) && grad_output.size(1) == target.size(1) && grad_output.size(2) == target.size(2), "grad_output sizes don't match target sizes: target ", target.sizes(), ", grad_output ", grad_output.sizes()) int64_t batch_size = input.size(0); int64_t H = input.size(2); int64_t W = input.size(3); int64_t count = batch_size * H * W; if (count == 0) { // This guards from unnecessary operations and launching CUDA kernel with // 0 blocks. return; } auto weight_ = optional_contiguous(weight); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss2d_backward_no_reduce_kernel", [&] { hipLaunchKernelGGL(( nll_loss2d_backward_no_reduce_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, target.packed_accessor64<int64_t, 3>(), grad_output.packed_accessor64<scalar_t, 3>(), grad_input.packed_accessor64<scalar_t, 4>(), optional_data<scalar_t>(weight_), ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); return; } int64_t batch_size = target.size(0); auto target_numel = target.numel(); if (batch_size != 0 && target_numel != 0) { // This guards from unnecessary operations and launching CUDA kernel with 1 // blocks. auto target_ = target.contiguous(); auto weight_ = optional_contiguous(weight); int64_t map_nelem = target_numel / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss2d_backward_kernel", [&] { hipLaunchKernelGGL(( nll_loss2d_backward_kernel<scalar_t>) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), target_.data_ptr<int64_t>(), optional_data<scalar_t>(weight_), total_weight.data_ptr<scalar_t>(), reduction == at::Reduction::Mean, input.size(1), map_nelem, blocks_per_sample, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } } } // namespace std::tuple<Tensor&, Tensor&> nll_loss2d_forward_out_cuda( const Tensor& self, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index, Tensor& output, Tensor& total_weight) { nll_loss2d_forward_out_cuda_template( output, total_weight, self, target, weight_opt, reduction, ignore_index); return std::tuple<Tensor&, Tensor&>(output, total_weight); } std::tuple<Tensor, Tensor> nll_loss2d_forward_cuda( const Tensor& self, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index) { auto output = at::empty({0}, self.options()); auto total_weight = at::empty({0}, self.options()); nll_loss2d_forward_out_cuda_template( output, total_weight, self, target, weight_opt, reduction, ignore_index); return std::make_tuple(output, total_weight); } Tensor& nll_loss2d_backward_out_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight, Tensor& grad_input) { nll_loss2d_backward_out_cuda_template( grad_input, grad_output, self, target, weight_opt, reduction, ignore_index, total_weight); return grad_input; } Tensor nll_loss2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight) { auto grad_input = at::empty_like(self); nll_loss2d_backward_out_cuda_template( grad_input, grad_output, self, target, weight_opt, reduction, ignore_index, total_weight); return grad_input; } } // namespace at::native
98d841bf450717db1ba2b96a5f1cfb1bb2a094ba.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/core/TensorAccessor.h> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/cuda/CUDAException.h> #include <c10/macros/Macros.h> #include <ATen/native/Resize.h> #include <ATen/native/cuda/block_reduce.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/empty_like.h> #include <ATen/ops/nll_loss2d_forward_native.h> #include <ATen/ops/nll_loss2d_backward_native.h> #endif namespace at::native { namespace { // Returns a contiguous tensor if the source tensor // is defined. Otherwise returns the undefined // source tensor unmodified. inline Tensor optional_contiguous(const Tensor& source) { return source.defined() ? source.contiguous() : source; } // Returns the address of the first element of a tensor // or nullptr if the tensor is undefined. template <typename scalar_t> inline scalar_t* optional_data(const Tensor& source) { return source.defined() ? source.data_ptr<scalar_t>() : nullptr; } using at::cuda::detail::CUDA_NUM_THREADS; using at::cuda::detail::GET_BLOCKS; // TODO(crcrpar): Think about introducing `canUse32BitIndexMath` and choose int or int64_t for `target`. template <typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_NUM_THREADS) __global__ void nll_loss2d_forward_no_reduce_kernel( int64_t n_threads, PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<int64_t, 3> target, PackedTensorAccessor64<scalar_t, 3> output, scalar_t* weight, int64_t ignore_index ) { int64_t batch_size = input.size(0); int64_t H = input.size(2); int64_t W = input.size(3); CUDA_KERNEL_LOOP(index, n_threads) { const int64_t b = index % batch_size; const int64_t h = (index / batch_size) % H; const int64_t w = (index / (batch_size * H)) % W; int64_t cur_target = target[b][h][w]; if (cur_target == ignore_index) { output[b][h][w] = static_cast<scalar_t>(0); continue; } scalar_t value = input[b][cur_target][h][w]; scalar_t cur_weight = weight != nullptr ? weight[cur_target] : static_cast<scalar_t>(1); output[b][h][w] = -value * cur_weight; } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_NUM_THREADS) __global__ void nll_loss2d_forward_kernel( scalar_t* output, scalar_t* total_weight, scalar_t* input, int64_t* target, scalar_t* weight, int n_classes, int map_nelem, int blocks_per_sample, int64_t ignore_index) { scalar_t cur_weight; accscalar_t input_sum = 0; accscalar_t acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (int i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { int64_t t = target[toffset + i]; if (t != ignore_index) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); cur_weight = weight != nullptr ? weight[t] : static_cast<scalar_t>(1); const auto input_index = ioffset + i + map_nelem * t; CUDA_KERNEL_ASSERT(input_index >= 0); input_sum -= input[input_index] * cur_weight; acc_weight += cur_weight; } } __shared__ accscalar_t acc_weight_smem[CUDA_NUM_THREADS]; __shared__ accscalar_t input_sum_smem[CUDA_NUM_THREADS]; auto acc_weight_ = cuda_utils::BlockReduceSum(acc_weight, acc_weight_smem); auto input_sum_ = cuda_utils::BlockReduceSum(input_sum, input_sum_smem); if (threadIdx.x == 0) { gpuAtomicAdd(total_weight, static_cast<scalar_t>(acc_weight_)); gpuAtomicAdd(output, static_cast<scalar_t>(input_sum_)); } } template <typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_NUM_THREADS) __global__ void nll_loss2d_forward_size_average_kernel( scalar_t* output, scalar_t* total_weight ) { *output /= *total_weight; } template <typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_NUM_THREADS) __global__ void nll_loss2d_backward_no_reduce_kernel( int64_t n_threads, PackedTensorAccessor64<int64_t, 3> target, PackedTensorAccessor64<scalar_t, 3> grad_output, PackedTensorAccessor64<scalar_t, 4> grad_input, scalar_t* weight, int64_t ignore_index ) { int64_t batch_size = target.size(0); int64_t H = target.size(1); int64_t W = target.size(2); CUDA_KERNEL_LOOP(index, n_threads) { const int64_t b = index % batch_size; const int64_t h = (index / batch_size) % H; const int64_t w = (index / (batch_size * H)) % W; int64_t cur_target = target[b][h][w]; if (cur_target == ignore_index) { continue; } scalar_t value = -(weight != nullptr ? weight[cur_target] : static_cast<scalar_t>(1)); grad_input[b][cur_target][h][w] = value * grad_output[b][h][w]; } } template <typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_NUM_THREADS) __global__ void nll_loss2d_backward_kernel( scalar_t* grad_input, scalar_t* grad_output, int64_t* target, scalar_t* weights, scalar_t* total_weight, bool size_average, int n_classes, int map_nelem, int blocks_per_sample, int64_t ignore_index ) { const auto grad = -(size_average ? *grad_output / *total_weight : *grad_output); const int sample = blockIdx.x / blocks_per_sample; const int step = blockDim.x * blocks_per_sample; const int toffset = sample * map_nelem; const auto* const target_thread = target + toffset; const int ioffset = sample * map_nelem * n_classes; auto* const grad_input_thread = grad_input + ioffset; for (int i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { const int64_t t = target_thread[i]; if (t != ignore_index) { CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); const auto grad_input_index = i + map_nelem * t; CUDA_KERNEL_ASSERT(grad_input_index >= 0); grad_input_thread[i + map_nelem * t] = weights != nullptr ? weights[t] * grad : grad; } } } void check_inputs_nll_loss2d( const Tensor& input, const Tensor& target, const Tensor& weight) { TORCH_CHECK( target.dim() == 3, "only batches of spatial targets supported (3D tensors)" " but got targets of size: : ", target.sizes()); TORCH_CHECK( input.dim() == 4, "only batches of spatial inputs supported (4D tensors), " "but got input of size: ", input.sizes()); TORCH_CHECK( !weight.defined() || weight.numel() == input.size(1), "weight tensor should be defined either for all or no classes"); TORCH_CHECK( input.size(0) == target.size(0) && input.size(2) == target.size(1) && input.size(3) == target.size(2), "input and target batch or spatial sizes don't match: target ", target.sizes(), ", input ", input.sizes()); } void nll_loss2d_forward_out_cuda_template( Tensor& output, Tensor& total_weight, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage in 'sum' or 'mean' reductions. if (reduction != at::Reduction::None) { at::globalContext().alertNotDeterministic("nll_loss2d_forward_out_cuda_template"); } // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; check_inputs_nll_loss2d(input, target, weight); total_weight.resize_({}); if (reduction == at::Reduction::None) { int64_t batch_size = input.size(0); int64_t H = input.size(2); int64_t W = input.size(3); int64_t count = batch_size * H * W; at::native::resize_output(output, {batch_size, H, W}); if (count == 0) { // This guards from unnecessary operations and launching CUDA kernel with // 0 blocks. return; } auto weight_ = optional_contiguous(weight); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss2d_forward_no_reduce_kernel", [&] { nll_loss2d_forward_no_reduce_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, input.packed_accessor64<scalar_t, 4>(), target.packed_accessor64<int64_t, 3>(), output.packed_accessor64<scalar_t, 3>(), optional_data<scalar_t>(weight_), ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); return; } // produce scalar outputs for the reduction case at::native::resize_output(output, {}); if (target.numel() == 0) { // Here target (and input) have zero elements // Mean reduction on empty tensors produces NaN. See the discussion in // https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162 if (reduction == Reduction::Mean) { output.fill_(std::numeric_limits<double>::quiet_NaN()); } else { output.zero_(); } total_weight.zero_(); return; } auto input_ = input.contiguous(); auto weight_ = optional_contiguous(weight); auto target_ = target.contiguous(); output.zero_(); total_weight.zero_(); auto batch_size = target.size(0); int64_t map_nelem = target.numel() / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss2d_forward_kernel", [&] { using accscalar_t = acc_type<scalar_t, true>; nll_loss2d_forward_kernel<scalar_t, accscalar_t> <<<total_blocks, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>(), input_.data_ptr<scalar_t>(), target_.data_ptr<int64_t>(), optional_data<scalar_t>(weight_), input_.size(1), input_.size(2) * input_.size(3), blocks_per_sample, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); // Divide by total_weight if (reduction == at::Reduction::Mean) { nll_loss2d_forward_size_average_kernel<scalar_t> <<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( output.data_ptr<scalar_t>(), total_weight.data_ptr<scalar_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } void nll_loss2d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; check_inputs_nll_loss2d(input, target, weight); grad_input.resize_as_(input); grad_input.zero_(); TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); TORCH_CHECK( total_weight.numel() == 1, "expected total_weight to be a single element tensor, got: ", total_weight.sizes(), " (", total_weight.numel(), " elements)"); if (reduction == at::Reduction::None) { TORCH_CHECK( grad_output.dim() == 3, "grad_output must have same dimension as target (3) but got dimension: ", grad_output.sizes()); TORCH_CHECK( grad_output.size(0) == target.size(0) && grad_output.size(1) == target.size(1) && grad_output.size(2) == target.size(2), "grad_output sizes don't match target sizes: target ", target.sizes(), ", grad_output ", grad_output.sizes()) int64_t batch_size = input.size(0); int64_t H = input.size(2); int64_t W = input.size(3); int64_t count = batch_size * H * W; if (count == 0) { // This guards from unnecessary operations and launching CUDA kernel with // 0 blocks. return; } auto weight_ = optional_contiguous(weight); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss2d_backward_no_reduce_kernel", [&] { nll_loss2d_backward_no_reduce_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, target.packed_accessor64<int64_t, 3>(), grad_output.packed_accessor64<scalar_t, 3>(), grad_input.packed_accessor64<scalar_t, 4>(), optional_data<scalar_t>(weight_), ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); return; } int64_t batch_size = target.size(0); auto target_numel = target.numel(); if (batch_size != 0 && target_numel != 0) { // This guards from unnecessary operations and launching CUDA kernel with 1 // blocks. auto target_ = target.contiguous(); auto weight_ = optional_contiguous(weight); int64_t map_nelem = target_numel / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "nll_loss2d_backward_kernel", [&] { nll_loss2d_backward_kernel<scalar_t> <<<total_blocks, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data_ptr<scalar_t>(), grad_output.data_ptr<scalar_t>(), target_.data_ptr<int64_t>(), optional_data<scalar_t>(weight_), total_weight.data_ptr<scalar_t>(), reduction == at::Reduction::Mean, input.size(1), map_nelem, blocks_per_sample, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } } // namespace std::tuple<Tensor&, Tensor&> nll_loss2d_forward_out_cuda( const Tensor& self, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index, Tensor& output, Tensor& total_weight) { nll_loss2d_forward_out_cuda_template( output, total_weight, self, target, weight_opt, reduction, ignore_index); return std::tuple<Tensor&, Tensor&>(output, total_weight); } std::tuple<Tensor, Tensor> nll_loss2d_forward_cuda( const Tensor& self, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index) { auto output = at::empty({0}, self.options()); auto total_weight = at::empty({0}, self.options()); nll_loss2d_forward_out_cuda_template( output, total_weight, self, target, weight_opt, reduction, ignore_index); return std::make_tuple(output, total_weight); } Tensor& nll_loss2d_backward_out_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight, Tensor& grad_input) { nll_loss2d_backward_out_cuda_template( grad_input, grad_output, self, target, weight_opt, reduction, ignore_index, total_weight); return grad_input; } Tensor nll_loss2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight) { auto grad_input = at::empty_like(self); nll_loss2d_backward_out_cuda_template( grad_input, grad_output, self, target, weight_opt, reduction, ignore_index, total_weight); return grad_input; } } // namespace at::native
8aa4a20dba1e6bf1883c044ac5b21c693254ce99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "conv_op.hpp" namespace Shadow { namespace Vision { #if defined(USE_ROCM) template <typename T> __global__ void KernelIm2Col(const T *in_data, int offset, int count, int in_c, int in_h, int in_w, int kernel_size, int stride, int pad, int dilation, int zero_point, int out_h, int out_w, T *col_data) { CUDA_KERNEL_LOOP(globalid, count) { int h_index = globalid / out_w; int h_col = h_index % out_h; int w_col = globalid % out_w; int c_im = h_index / out_h; int c_col = c_im * kernel_size * kernel_size; int h_offset = h_col * stride - pad; int w_offset = w_col * stride - pad; col_data += (c_col * out_h + h_col) * out_w + w_col; in_data += offset + (c_im * in_h + h_offset) * in_w + w_offset; for (int i = 0; i < kernel_size; ++i) { for (int j = 0; j < kernel_size; ++j) { int h_im = h_offset + i * dilation; int w_im = w_offset + j * dilation; *col_data = (h_im >= 0 && w_im >= 0 && h_im < in_h && w_im < in_w) ? in_data[i * dilation * in_w + j * dilation] : static_cast<T>(zero_point); col_data += out_h * out_w; } } } } template <typename T> void Im2Col(const T *in_data, const VecInt &in_shape, int offset, int kernel_size, int stride, int pad, int dilation, int zero_point, const VecInt &out_shape, T *col_data) { int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3]; int out_h = out_shape[2], out_w = out_shape[3]; int count = in_c * out_h * out_w; hipLaunchKernelGGL(( KernelIm2Col<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, 0, in_data, offset, count, in_c, in_h, in_w, kernel_size, stride, pad, dilation, zero_point, out_h, out_w, col_data); CUDA_CHECK(hipPeekAtLastError()); } template void Im2Col(const float *in_data, const VecInt &in_shape, int offset, int kernel_size, int stride, int pad, int dilation, int zero_point, const VecInt &out_shape, float *col_data); template <typename T> __global__ void KernelDepthwise(const T *in_data, int count, const T *weight_data, const T *bias_data, int in_c, int in_h, int in_w, int out_h, int out_w, int kernel_size, int stride, int pad, int bias_term, T *out_data) { CUDA_KERNEL_LOOP(globalid, count) { int w = globalid % out_w; int h = (globalid / out_w) % out_h; int c = (globalid / out_w / out_h) % in_c; int n = globalid / out_w / out_h / in_c; const T *in_offset_data = in_data + (n * in_c + c) * in_h * in_w; const T *weight_offset_data = weight_data + c * kernel_size * kernel_size; int hstart = h * stride - pad, wstart = w * stride - pad; int hend = min(hstart + kernel_size, in_h + pad); int wend = min(wstart + kernel_size, in_w + pad); hstart = max(hstart, 0), wstart = max(wstart, 0); hend = min(hend, in_h), wend = min(wend, in_w); int khstart = hend < kernel_size ? (kernel_size - hend) : 0; int kwstart = wend < kernel_size ? (kernel_size - wend) : 0; auto sum_val = T(0); for (int kh = hstart; kh < hend; ++kh) { for (int kw = wstart; kw < wend; ++kw) { sum_val += in_offset_data[kh * in_w + kw] * weight_offset_data[(khstart + kh - hstart) * kernel_size + kwstart + kw - wstart]; } } if (bias_term) { sum_val += bias_data[c]; } out_data[globalid] = sum_val; } } template <typename T> void Depthwise(const T *in_data, const VecInt &in_shape, const T *weight_data, const T *bias_data, int kernel_size, int stride, int pad, int bias_term, const VecInt &out_shape, T *out_data) { int batch = in_shape[0]; int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3]; int out_h = out_shape[2], out_w = out_shape[3]; int count = batch * in_c * out_h * out_w; hipLaunchKernelGGL(( KernelDepthwise<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, 0, in_data, count, weight_data, bias_data, in_c, in_h, in_w, out_h, out_w, kernel_size, stride, pad, bias_term, out_data); CUDA_CHECK(hipPeekAtLastError()); } template void Depthwise(const float *in_data, const VecInt &in_shape, const float *weight_data, const float *bias_data, int kernel_size, int stride, int pad, int bias_term, const VecInt &out_shape, float *out_data); #endif } // namespace Vision } // namespace Shadow
8aa4a20dba1e6bf1883c044ac5b21c693254ce99.cu
#include "conv_op.hpp" namespace Shadow { namespace Vision { #if defined(USE_CUDA) template <typename T> __global__ void KernelIm2Col(const T *in_data, int offset, int count, int in_c, int in_h, int in_w, int kernel_size, int stride, int pad, int dilation, int zero_point, int out_h, int out_w, T *col_data) { CUDA_KERNEL_LOOP(globalid, count) { int h_index = globalid / out_w; int h_col = h_index % out_h; int w_col = globalid % out_w; int c_im = h_index / out_h; int c_col = c_im * kernel_size * kernel_size; int h_offset = h_col * stride - pad; int w_offset = w_col * stride - pad; col_data += (c_col * out_h + h_col) * out_w + w_col; in_data += offset + (c_im * in_h + h_offset) * in_w + w_offset; for (int i = 0; i < kernel_size; ++i) { for (int j = 0; j < kernel_size; ++j) { int h_im = h_offset + i * dilation; int w_im = w_offset + j * dilation; *col_data = (h_im >= 0 && w_im >= 0 && h_im < in_h && w_im < in_w) ? in_data[i * dilation * in_w + j * dilation] : static_cast<T>(zero_point); col_data += out_h * out_w; } } } } template <typename T> void Im2Col(const T *in_data, const VecInt &in_shape, int offset, int kernel_size, int stride, int pad, int dilation, int zero_point, const VecInt &out_shape, T *col_data) { int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3]; int out_h = out_shape[2], out_w = out_shape[3]; int count = in_c * out_h * out_w; KernelIm2Col<T><<<GetBlocks(count), NumThreads>>>( in_data, offset, count, in_c, in_h, in_w, kernel_size, stride, pad, dilation, zero_point, out_h, out_w, col_data); CUDA_CHECK(cudaPeekAtLastError()); } template void Im2Col(const float *in_data, const VecInt &in_shape, int offset, int kernel_size, int stride, int pad, int dilation, int zero_point, const VecInt &out_shape, float *col_data); template <typename T> __global__ void KernelDepthwise(const T *in_data, int count, const T *weight_data, const T *bias_data, int in_c, int in_h, int in_w, int out_h, int out_w, int kernel_size, int stride, int pad, int bias_term, T *out_data) { CUDA_KERNEL_LOOP(globalid, count) { int w = globalid % out_w; int h = (globalid / out_w) % out_h; int c = (globalid / out_w / out_h) % in_c; int n = globalid / out_w / out_h / in_c; const T *in_offset_data = in_data + (n * in_c + c) * in_h * in_w; const T *weight_offset_data = weight_data + c * kernel_size * kernel_size; int hstart = h * stride - pad, wstart = w * stride - pad; int hend = min(hstart + kernel_size, in_h + pad); int wend = min(wstart + kernel_size, in_w + pad); hstart = max(hstart, 0), wstart = max(wstart, 0); hend = min(hend, in_h), wend = min(wend, in_w); int khstart = hend < kernel_size ? (kernel_size - hend) : 0; int kwstart = wend < kernel_size ? (kernel_size - wend) : 0; auto sum_val = T(0); for (int kh = hstart; kh < hend; ++kh) { for (int kw = wstart; kw < wend; ++kw) { sum_val += in_offset_data[kh * in_w + kw] * weight_offset_data[(khstart + kh - hstart) * kernel_size + kwstart + kw - wstart]; } } if (bias_term) { sum_val += bias_data[c]; } out_data[globalid] = sum_val; } } template <typename T> void Depthwise(const T *in_data, const VecInt &in_shape, const T *weight_data, const T *bias_data, int kernel_size, int stride, int pad, int bias_term, const VecInt &out_shape, T *out_data) { int batch = in_shape[0]; int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3]; int out_h = out_shape[2], out_w = out_shape[3]; int count = batch * in_c * out_h * out_w; KernelDepthwise<T><<<GetBlocks(count), NumThreads>>>( in_data, count, weight_data, bias_data, in_c, in_h, in_w, out_h, out_w, kernel_size, stride, pad, bias_term, out_data); CUDA_CHECK(cudaPeekAtLastError()); } template void Depthwise(const float *in_data, const VecInt &in_shape, const float *weight_data, const float *bias_data, int kernel_size, int stride, int pad, int bias_term, const VecInt &out_shape, float *out_data); #endif } // namespace Vision } // namespace Shadow
3f2fb60f4f07b055e56ac58f20a8f9653f14767a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2017-2020 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include "ColorSpace.h" __constant__ float matYuv2Rgb[3][3]; __constant__ float matRgb2Yuv[3][3]; void inline GetConstants(int iMatrix, float &wr, float &wb, int &black, int &white, int &max) { black = 16; white = 235; max = 255; switch (iMatrix) { case ColorSpaceStandard_BT709: default: wr = 0.2126f; wb = 0.0722f; break; case ColorSpaceStandard_FCC: wr = 0.30f; wb = 0.11f; break; case ColorSpaceStandard_BT470: case ColorSpaceStandard_BT601: wr = 0.2990f; wb = 0.1140f; break; case ColorSpaceStandard_SMPTE240M: wr = 0.212f; wb = 0.087f; break; case ColorSpaceStandard_BT2020: case ColorSpaceStandard_BT2020C: wr = 0.2627f; wb = 0.0593f; // 10-bit only black = 64 << 6; white = 940 << 6; max = (1 << 16) - 1; break; } } void SetMatYuv2Rgb(int iMatrix) { float wr, wb; int black, white, max; GetConstants(iMatrix, wr, wb, black, white, max); float mat[3][3] = { 1.0f, 0.0f, (1.0f - wr) / 0.5f, 1.0f, -wb * (1.0f - wb) / 0.5f / (1 - wb - wr), -wr * (1 - wr) / 0.5f / (1 - wb - wr), 1.0f, (1.0f - wb) / 0.5f, 0.0f, }; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { mat[i][j] = (float)(1.0 * max / (white - black) * mat[i][j]); } } hipMemcpyToSymbol(matYuv2Rgb, mat, sizeof(mat)); } void SetMatRgb2Yuv(int iMatrix) { float wr, wb; int black, white, max; GetConstants(iMatrix, wr, wb, black, white, max); float mat[3][3] = { wr, 1.0f - wb - wr, wb, -0.5f * wr / (1.0f - wb), -0.5f * (1 - wb - wr) / (1.0f - wb), 0.5f, 0.5f, -0.5f * (1.0f - wb - wr) / (1.0f - wr), -0.5f * wb / (1.0f - wr), }; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { mat[i][j] = (float)(1.0 * (white - black) / max * mat[i][j]); } } hipMemcpyToSymbol(matRgb2Yuv, mat, sizeof(mat)); } template<class T> __device__ static T Clamp(T x, T lower, T upper) { return x < lower ? lower : (x > upper ? upper : x); } template<class Rgb, class YuvUnit> __device__ inline Rgb YuvToRgbForPixel(YuvUnit y, YuvUnit u, YuvUnit v) { const int low = 1 << (sizeof(YuvUnit) * 8 - 4), mid = 1 << (sizeof(YuvUnit) * 8 - 1); float fy = (int)y - low, fu = (int)u - mid, fv = (int)v - mid; const float maxf = (1 << sizeof(YuvUnit) * 8) - 1.0f; YuvUnit r = (YuvUnit)Clamp(matYuv2Rgb[0][0] * fy + matYuv2Rgb[0][1] * fu + matYuv2Rgb[0][2] * fv, 0.0f, maxf), g = (YuvUnit)Clamp(matYuv2Rgb[1][0] * fy + matYuv2Rgb[1][1] * fu + matYuv2Rgb[1][2] * fv, 0.0f, maxf), b = (YuvUnit)Clamp(matYuv2Rgb[2][0] * fy + matYuv2Rgb[2][1] * fu + matYuv2Rgb[2][2] * fv, 0.0f, maxf); Rgb rgb{}; const int nShift = abs((int)sizeof(YuvUnit) - (int)sizeof(rgb.c.r)) * 8; if (sizeof(YuvUnit) >= sizeof(rgb.c.r)) { rgb.c.r = r >> nShift; rgb.c.g = g >> nShift; rgb.c.b = b >> nShift; } else { rgb.c.r = r << nShift; rgb.c.g = g << nShift; rgb.c.b = b << nShift; } return rgb; } template<class YuvUnitx2, class Rgb, class RgbIntx2> __global__ static void YuvToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2; if (x + 1 >= nWidth || y + 1 >= nHeight) { return; } uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch; YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc; YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch); YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch); *(RgbIntx2 *)pDst = RgbIntx2 { YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y).d, YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y).d, }; *(RgbIntx2 *)(pDst + nRgbPitch) = RgbIntx2 { YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y).d, YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y).d, }; } template<class YuvUnitx2, class Rgb, class RgbIntx2> __global__ static void Yuv444ToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y); if (x + 1 >= nWidth || y >= nHeight) { return; } uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch; YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc; YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch)); YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch)); *(RgbIntx2 *)pDst = RgbIntx2{ YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x).d, YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y).d, }; } template<class YuvUnitx2, class Rgb, class RgbUnitx2> __global__ static void YuvToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2; if (x + 1 >= nWidth || y + 1 >= nHeight) { return; } uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc; YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch); YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch); Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y), rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y), rgb2 = YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y), rgb3 = YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y); uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch; *(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.x, rgb1.v.x}; *(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.x, rgb3.v.x}; pDst += nRgbpPitch * nHeight; *(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.y, rgb1.v.y}; *(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.y, rgb3.v.y}; pDst += nRgbpPitch * nHeight; *(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.z, rgb1.v.z}; *(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.z, rgb3.v.z}; } template<class YuvUnitx2, class Rgb, class RgbUnitx2> __global__ static void Yuv444ToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y); if (x + 1 >= nWidth || y >= nHeight) { return; } uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc; YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch)); YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch)); Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x), rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y); uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch; *(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.x, rgb1.v.x }; pDst += nRgbpPitch * nHeight; *(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.y, rgb1.v.y }; pDst += nRgbpPitch * nHeight; *(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.z, rgb1.v.z }; } template <class COLOR32> void Nv12ToColor32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( YuvToRgbKernel<uchar2, COLOR32, uint2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0, dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR64> void Nv12ToColor64(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( YuvToRgbKernel<uchar2, COLOR64, ulonglong2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0, dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR32> void YUV444ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( Yuv444ToRgbKernel<uchar2, COLOR32, uint2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0, dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR64> void YUV444ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( Yuv444ToRgbKernel<uchar2, COLOR64, ulonglong2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0, dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR32> void P016ToColor32(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( YuvToRgbKernel<ushort2, COLOR32, uint2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0, dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR64> void P016ToColor64(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( YuvToRgbKernel<ushort2, COLOR64, ulonglong2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0, dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR32> void YUV444P16ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( Yuv444ToRgbKernel<ushort2, COLOR32, uint2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0, dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR64> void YUV444P16ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( Yuv444ToRgbKernel<ushort2, COLOR64, ulonglong2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0, dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR32> void Nv12ToColorPlanar(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( YuvToRgbPlanarKernel<uchar2, COLOR32, uchar2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0, dpNv12, nNv12Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight); } template <class COLOR32> void P016ToColorPlanar(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( YuvToRgbPlanarKernel<ushort2, COLOR32, uchar2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0, dpP016, nP016Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight); } template <class COLOR32> void YUV444ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( Yuv444ToRgbPlanarKernel<uchar2, COLOR32, uchar2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0, dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight); } template <class COLOR32> void YUV444P16ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); Yuv444ToRgbPlanarKernel<ushort2, COLOR32, uchar2> << <dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >> > (dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight); } // Explicit Instantiation template void Nv12ToColor32<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void Nv12ToColor32<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void Nv12ToColor64<BGRA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void Nv12ToColor64<RGBA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColor32<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColor32<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColor64<BGRA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColor64<RGBA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void Nv12ToColorPlanar<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void Nv12ToColorPlanar<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColorPlanar<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColorPlanar<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template<class YuvUnit, class RgbUnit> __device__ inline YuvUnit RgbToY(RgbUnit r, RgbUnit g, RgbUnit b) { const YuvUnit low = 1 << (sizeof(YuvUnit) * 8 - 4); return matRgb2Yuv[0][0] * r + matRgb2Yuv[0][1] * g + matRgb2Yuv[0][2] * b + low; } template<class YuvUnit, class RgbUnit> __device__ inline YuvUnit RgbToU(RgbUnit r, RgbUnit g, RgbUnit b) { const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1); return matRgb2Yuv[1][0] * r + matRgb2Yuv[1][1] * g + matRgb2Yuv[1][2] * b + mid; } template<class YuvUnit, class RgbUnit> __device__ inline YuvUnit RgbToV(RgbUnit r, RgbUnit g, RgbUnit b) { const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1); return matRgb2Yuv[2][0] * r + matRgb2Yuv[2][1] * g + matRgb2Yuv[2][2] * b + mid; } template<class YuvUnitx2, class Rgb, class RgbIntx2> __global__ static void RgbToYuvKernel(uint8_t *pRgb, int nRgbPitch, uint8_t *pYuv, int nYuvPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2; if (x + 1 >= nWidth || y + 1 >= nHeight) { return; } uint8_t *pSrc = pRgb + x * sizeof(Rgb) + y * nRgbPitch; RgbIntx2 int2a = *(RgbIntx2 *)pSrc; RgbIntx2 int2b = *(RgbIntx2 *)(pSrc + nRgbPitch); Rgb rgb[4] = {int2a.x, int2a.y, int2b.x, int2b.y}; decltype(Rgb::c.r) r = (rgb[0].c.r + rgb[1].c.r + rgb[2].c.r + rgb[3].c.r) / 4, g = (rgb[0].c.g + rgb[1].c.g + rgb[2].c.g + rgb[3].c.g) / 4, b = (rgb[0].c.b + rgb[1].c.b + rgb[2].c.b + rgb[3].c.b) / 4; uint8_t *pDst = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; *(YuvUnitx2 *)pDst = YuvUnitx2 { RgbToY<decltype(YuvUnitx2::x)>(rgb[0].c.r, rgb[0].c.g, rgb[0].c.b), RgbToY<decltype(YuvUnitx2::x)>(rgb[1].c.r, rgb[1].c.g, rgb[1].c.b), }; *(YuvUnitx2 *)(pDst + nYuvPitch) = YuvUnitx2 { RgbToY<decltype(YuvUnitx2::x)>(rgb[2].c.r, rgb[2].c.g, rgb[2].c.b), RgbToY<decltype(YuvUnitx2::x)>(rgb[3].c.r, rgb[3].c.g, rgb[3].c.b), }; *(YuvUnitx2 *)(pDst + (nHeight - y / 2) * nYuvPitch) = YuvUnitx2 { RgbToU<decltype(YuvUnitx2::x)>(r, g, b), RgbToV<decltype(YuvUnitx2::x)>(r, g, b), }; } void Bgra64ToP016(uint8_t *dpBgra, int nBgraPitch, uint8_t *dpP016, int nP016Pitch, int nWidth, int nHeight, int iMatrix) { SetMatRgb2Yuv(iMatrix); hipLaunchKernelGGL(( RgbToYuvKernel<ushort2, BGRA64, ulonglong2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0, dpBgra, nBgraPitch, dpP016, nP016Pitch, nWidth, nHeight); } __global__ static void BGRA32ToBgr24Kernel(const uint8_t* dpBgra, uint8_t* dpBgr, int nWidth, int nHeight, int nStride) { const int x = (blockIdx.x * blockDim.x) + threadIdx.x; const int y = (blockIdx.y * blockDim.y) + threadIdx.y; if(x < nWidth && y < nHeight) { const uint8_t *pSrc = dpBgra + (y * nWidth + x) * 4; uint8_t *pDst = dpBgr + y * nStride + x * 3; pDst[0] = pSrc[0]; pDst[1] = pSrc[1]; pDst[2] = pSrc[2]; } } inline int DivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } void BGRA32ToBgr24(const uint8_t* dpBgra, uint8_t* dpBgr, int nWidth, int nHeight, int nStride) { const dim3 blockDim(32, 8, 1); const dim3 gridDim(DivUp(nWidth, blockDim.x), DivUp(nHeight, blockDim.y), 1); hipLaunchKernelGGL(( BGRA32ToBgr24Kernel), dim3(gridDim), dim3(blockDim), 0, 0, dpBgra, dpBgr, nWidth, nHeight, nStride); }
3f2fb60f4f07b055e56ac58f20a8f9653f14767a.cu
/* * Copyright 2017-2020 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include "ColorSpace.h" __constant__ float matYuv2Rgb[3][3]; __constant__ float matRgb2Yuv[3][3]; void inline GetConstants(int iMatrix, float &wr, float &wb, int &black, int &white, int &max) { black = 16; white = 235; max = 255; switch (iMatrix) { case ColorSpaceStandard_BT709: default: wr = 0.2126f; wb = 0.0722f; break; case ColorSpaceStandard_FCC: wr = 0.30f; wb = 0.11f; break; case ColorSpaceStandard_BT470: case ColorSpaceStandard_BT601: wr = 0.2990f; wb = 0.1140f; break; case ColorSpaceStandard_SMPTE240M: wr = 0.212f; wb = 0.087f; break; case ColorSpaceStandard_BT2020: case ColorSpaceStandard_BT2020C: wr = 0.2627f; wb = 0.0593f; // 10-bit only black = 64 << 6; white = 940 << 6; max = (1 << 16) - 1; break; } } void SetMatYuv2Rgb(int iMatrix) { float wr, wb; int black, white, max; GetConstants(iMatrix, wr, wb, black, white, max); float mat[3][3] = { 1.0f, 0.0f, (1.0f - wr) / 0.5f, 1.0f, -wb * (1.0f - wb) / 0.5f / (1 - wb - wr), -wr * (1 - wr) / 0.5f / (1 - wb - wr), 1.0f, (1.0f - wb) / 0.5f, 0.0f, }; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { mat[i][j] = (float)(1.0 * max / (white - black) * mat[i][j]); } } cudaMemcpyToSymbol(matYuv2Rgb, mat, sizeof(mat)); } void SetMatRgb2Yuv(int iMatrix) { float wr, wb; int black, white, max; GetConstants(iMatrix, wr, wb, black, white, max); float mat[3][3] = { wr, 1.0f - wb - wr, wb, -0.5f * wr / (1.0f - wb), -0.5f * (1 - wb - wr) / (1.0f - wb), 0.5f, 0.5f, -0.5f * (1.0f - wb - wr) / (1.0f - wr), -0.5f * wb / (1.0f - wr), }; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { mat[i][j] = (float)(1.0 * (white - black) / max * mat[i][j]); } } cudaMemcpyToSymbol(matRgb2Yuv, mat, sizeof(mat)); } template<class T> __device__ static T Clamp(T x, T lower, T upper) { return x < lower ? lower : (x > upper ? upper : x); } template<class Rgb, class YuvUnit> __device__ inline Rgb YuvToRgbForPixel(YuvUnit y, YuvUnit u, YuvUnit v) { const int low = 1 << (sizeof(YuvUnit) * 8 - 4), mid = 1 << (sizeof(YuvUnit) * 8 - 1); float fy = (int)y - low, fu = (int)u - mid, fv = (int)v - mid; const float maxf = (1 << sizeof(YuvUnit) * 8) - 1.0f; YuvUnit r = (YuvUnit)Clamp(matYuv2Rgb[0][0] * fy + matYuv2Rgb[0][1] * fu + matYuv2Rgb[0][2] * fv, 0.0f, maxf), g = (YuvUnit)Clamp(matYuv2Rgb[1][0] * fy + matYuv2Rgb[1][1] * fu + matYuv2Rgb[1][2] * fv, 0.0f, maxf), b = (YuvUnit)Clamp(matYuv2Rgb[2][0] * fy + matYuv2Rgb[2][1] * fu + matYuv2Rgb[2][2] * fv, 0.0f, maxf); Rgb rgb{}; const int nShift = abs((int)sizeof(YuvUnit) - (int)sizeof(rgb.c.r)) * 8; if (sizeof(YuvUnit) >= sizeof(rgb.c.r)) { rgb.c.r = r >> nShift; rgb.c.g = g >> nShift; rgb.c.b = b >> nShift; } else { rgb.c.r = r << nShift; rgb.c.g = g << nShift; rgb.c.b = b << nShift; } return rgb; } template<class YuvUnitx2, class Rgb, class RgbIntx2> __global__ static void YuvToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2; if (x + 1 >= nWidth || y + 1 >= nHeight) { return; } uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch; YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc; YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch); YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch); *(RgbIntx2 *)pDst = RgbIntx2 { YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y).d, YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y).d, }; *(RgbIntx2 *)(pDst + nRgbPitch) = RgbIntx2 { YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y).d, YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y).d, }; } template<class YuvUnitx2, class Rgb, class RgbIntx2> __global__ static void Yuv444ToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y); if (x + 1 >= nWidth || y >= nHeight) { return; } uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch; YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc; YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch)); YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch)); *(RgbIntx2 *)pDst = RgbIntx2{ YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x).d, YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y).d, }; } template<class YuvUnitx2, class Rgb, class RgbUnitx2> __global__ static void YuvToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2; if (x + 1 >= nWidth || y + 1 >= nHeight) { return; } uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc; YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch); YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch); Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y), rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y), rgb2 = YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y), rgb3 = YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y); uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch; *(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.x, rgb1.v.x}; *(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.x, rgb3.v.x}; pDst += nRgbpPitch * nHeight; *(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.y, rgb1.v.y}; *(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.y, rgb3.v.y}; pDst += nRgbpPitch * nHeight; *(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.z, rgb1.v.z}; *(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.z, rgb3.v.z}; } template<class YuvUnitx2, class Rgb, class RgbUnitx2> __global__ static void Yuv444ToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y); if (x + 1 >= nWidth || y >= nHeight) { return; } uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc; YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch)); YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch)); Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x), rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y); uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch; *(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.x, rgb1.v.x }; pDst += nRgbpPitch * nHeight; *(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.y, rgb1.v.y }; pDst += nRgbpPitch * nHeight; *(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.z, rgb1.v.z }; } template <class COLOR32> void Nv12ToColor32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); YuvToRgbKernel<uchar2, COLOR32, uint2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>> (dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR64> void Nv12ToColor64(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); YuvToRgbKernel<uchar2, COLOR64, ulonglong2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>> (dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR32> void YUV444ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); Yuv444ToRgbKernel<uchar2, COLOR32, uint2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>> (dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR64> void YUV444ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); Yuv444ToRgbKernel<uchar2, COLOR64, ulonglong2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>> (dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR32> void P016ToColor32(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); YuvToRgbKernel<ushort2, COLOR32, uint2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>> (dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR64> void P016ToColor64(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); YuvToRgbKernel<ushort2, COLOR64, ulonglong2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>> (dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR32> void YUV444P16ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); Yuv444ToRgbKernel<ushort2, COLOR32, uint2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>> (dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR64> void YUV444P16ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); Yuv444ToRgbKernel<ushort2, COLOR64, ulonglong2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>> (dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight); } template <class COLOR32> void Nv12ToColorPlanar(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); YuvToRgbPlanarKernel<uchar2, COLOR32, uchar2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>> (dpNv12, nNv12Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight); } template <class COLOR32> void P016ToColorPlanar(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); YuvToRgbPlanarKernel<ushort2, COLOR32, uchar2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>> (dpP016, nP016Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight); } template <class COLOR32> void YUV444ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); Yuv444ToRgbPlanarKernel<uchar2, COLOR32, uchar2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>> (dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight); } template <class COLOR32> void YUV444P16ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) { SetMatYuv2Rgb(iMatrix); Yuv444ToRgbPlanarKernel<ushort2, COLOR32, uchar2> << <dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >> > (dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight); } // Explicit Instantiation template void Nv12ToColor32<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void Nv12ToColor32<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void Nv12ToColor64<BGRA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void Nv12ToColor64<RGBA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColor32<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColor32<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColor64<BGRA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColor64<RGBA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix); template void Nv12ToColorPlanar<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void Nv12ToColorPlanar<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColorPlanar<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void P016ToColorPlanar<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void YUV444ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template void YUV444P16ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix); template<class YuvUnit, class RgbUnit> __device__ inline YuvUnit RgbToY(RgbUnit r, RgbUnit g, RgbUnit b) { const YuvUnit low = 1 << (sizeof(YuvUnit) * 8 - 4); return matRgb2Yuv[0][0] * r + matRgb2Yuv[0][1] * g + matRgb2Yuv[0][2] * b + low; } template<class YuvUnit, class RgbUnit> __device__ inline YuvUnit RgbToU(RgbUnit r, RgbUnit g, RgbUnit b) { const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1); return matRgb2Yuv[1][0] * r + matRgb2Yuv[1][1] * g + matRgb2Yuv[1][2] * b + mid; } template<class YuvUnit, class RgbUnit> __device__ inline YuvUnit RgbToV(RgbUnit r, RgbUnit g, RgbUnit b) { const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1); return matRgb2Yuv[2][0] * r + matRgb2Yuv[2][1] * g + matRgb2Yuv[2][2] * b + mid; } template<class YuvUnitx2, class Rgb, class RgbIntx2> __global__ static void RgbToYuvKernel(uint8_t *pRgb, int nRgbPitch, uint8_t *pYuv, int nYuvPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2; if (x + 1 >= nWidth || y + 1 >= nHeight) { return; } uint8_t *pSrc = pRgb + x * sizeof(Rgb) + y * nRgbPitch; RgbIntx2 int2a = *(RgbIntx2 *)pSrc; RgbIntx2 int2b = *(RgbIntx2 *)(pSrc + nRgbPitch); Rgb rgb[4] = {int2a.x, int2a.y, int2b.x, int2b.y}; decltype(Rgb::c.r) r = (rgb[0].c.r + rgb[1].c.r + rgb[2].c.r + rgb[3].c.r) / 4, g = (rgb[0].c.g + rgb[1].c.g + rgb[2].c.g + rgb[3].c.g) / 4, b = (rgb[0].c.b + rgb[1].c.b + rgb[2].c.b + rgb[3].c.b) / 4; uint8_t *pDst = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; *(YuvUnitx2 *)pDst = YuvUnitx2 { RgbToY<decltype(YuvUnitx2::x)>(rgb[0].c.r, rgb[0].c.g, rgb[0].c.b), RgbToY<decltype(YuvUnitx2::x)>(rgb[1].c.r, rgb[1].c.g, rgb[1].c.b), }; *(YuvUnitx2 *)(pDst + nYuvPitch) = YuvUnitx2 { RgbToY<decltype(YuvUnitx2::x)>(rgb[2].c.r, rgb[2].c.g, rgb[2].c.b), RgbToY<decltype(YuvUnitx2::x)>(rgb[3].c.r, rgb[3].c.g, rgb[3].c.b), }; *(YuvUnitx2 *)(pDst + (nHeight - y / 2) * nYuvPitch) = YuvUnitx2 { RgbToU<decltype(YuvUnitx2::x)>(r, g, b), RgbToV<decltype(YuvUnitx2::x)>(r, g, b), }; } void Bgra64ToP016(uint8_t *dpBgra, int nBgraPitch, uint8_t *dpP016, int nP016Pitch, int nWidth, int nHeight, int iMatrix) { SetMatRgb2Yuv(iMatrix); RgbToYuvKernel<ushort2, BGRA64, ulonglong2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>> (dpBgra, nBgraPitch, dpP016, nP016Pitch, nWidth, nHeight); } __global__ static void BGRA32ToBgr24Kernel(const uint8_t* dpBgra, uint8_t* dpBgr, int nWidth, int nHeight, int nStride) { const int x = (blockIdx.x * blockDim.x) + threadIdx.x; const int y = (blockIdx.y * blockDim.y) + threadIdx.y; if(x < nWidth && y < nHeight) { const uint8_t *pSrc = dpBgra + (y * nWidth + x) * 4; uint8_t *pDst = dpBgr + y * nStride + x * 3; pDst[0] = pSrc[0]; pDst[1] = pSrc[1]; pDst[2] = pSrc[2]; } } inline int DivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } void BGRA32ToBgr24(const uint8_t* dpBgra, uint8_t* dpBgr, int nWidth, int nHeight, int nStride) { const dim3 blockDim(32, 8, 1); const dim3 gridDim(DivUp(nWidth, blockDim.x), DivUp(nHeight, blockDim.y), 1); BGRA32ToBgr24Kernel<<<gridDim, blockDim>>>(dpBgra, dpBgr, nWidth, nHeight, nStride); }
0d12f35fa68a4073639e1154772a87df21b82f44.hip
// !!! This is a file automatically generated by hipify!!! #include <sys/time.h> #include <hip/hip_runtime.h> #include <stdio.h> #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) ) static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "Error: %s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } // host side matrix addition void h_addmat(float *A, float *B, float *C, int nx, int ny){ for (int i =0;i<nx;i++){ for(int j=0;j<ny;j++){ C[i*ny+j] = A[i*ny+j]+B[i*ny+j]; } } return; } // device-side matrix addition __global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ int ix = threadIdx.x + blockIdx.x*(blockDim.x) ; int iy = threadIdx.y + blockIdx.y*(blockDim.y) ; //printf("In add\n"); if( (ix<nx) && (iy<ny) ){ int idx = iy*nx + ix; C[idx] = A[idx] + B[idx] ; //printf("Thread %d %d\n",ix,iy); } } void initData(float *M, int x, int y, int flag ){ if(flag) { //printf("A\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*y+j] = (float)(i+j)/3.0; } } } else { //printf("B\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*y+j] = (float)3.14*(i+j) ; } } } } int main( int argc, char *argv[] ) { if (argc!=3){ printf("Error: Invalid number of arguments.\n"); exit(1); } int nx = atoi( argv[1] ) ; // should check validity int ny = atoi( argv[2] ) ; // should check validity if(nx <=0 || ny <=0){ printf("Error: Dimension lessThanOrEqualto Zero.\n"); exit(1); } if(ny>nx) { nx=nx^ny; ny=nx^ny; nx=nx^ny; } int noElems = (nx)*(ny) ; int bytes = noElems * sizeof(float) ; // GPU and CPU memory Allocations float *d_A, *d_B, *d_C ; HANDLE_ERROR(hipMalloc( (float **) &d_A, bytes )) ; HANDLE_ERROR(hipMalloc( (float **) &d_B, bytes )) ; HANDLE_ERROR(hipMalloc( (float **) &d_C, bytes )) ; float *h_hC = (float *) malloc( bytes ) ; // host result float *h_Ap, *h_Bp, *h_dCp; HANDLE_ERROR(hipHostMalloc( (float **) &h_Ap, bytes )) ; HANDLE_ERROR(hipHostMalloc( (float **) &h_Bp, bytes )) ; HANDLE_ERROR(hipHostMalloc( (float **) &h_dCp, bytes )) ; // init matrices with random data initData(h_Ap,nx,ny,1); initData(h_Bp,nx,ny,0); double timeStampA = getTimeStamp() ; //transfer data to dev HANDLE_ERROR (hipMemcpy( d_A, h_Ap, bytes, hipMemcpyHostToDevice )) ; HANDLE_ERROR (hipMemcpy( d_B, h_Bp, bytes, hipMemcpyHostToDevice )) ; double timeStampB = getTimeStamp() ; // invoke Kernel dim3 block( 1024, 1) ; // you will want to configure this dim3 grid( (nx+block.x-1)/block.x, (ny+block.y-1)/block.y) ; //printf("reached add %d %d %d %d %lu %d %d \n",(nx+block.x-1)/block.x, (ny+block.y-1)/block.y, nx, ny, sizeof(float), noElems, bytes); hipLaunchKernelGGL(( f_addmat), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nx, ny ) ; hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); HANDLE_ERROR(hipDeviceSynchronize()) ; double timeStampC = getTimeStamp() ; //copy data back HANDLE_ERROR(hipMemcpy(h_dCp, d_C, bytes, hipMemcpyDeviceToHost)); double timeStampD = getTimeStamp() ; // free GPU resources hipFree( d_A ) ; hipFree( d_B ) ; hipFree( d_C ) ; // CPU Matrix add h_addmat( h_Ap, h_Bp, h_hC, nx, ny ) ; // Check results int flag = 0; for(int i=0;i<(nx);i++){ for(int j=0;j<(ny);j++){ if(h_hC[i*(ny)+j] != h_dCp[i*(ny)+j]) flag++; } } if (flag == 0){ printf("%.6f %.6f %.6f %.6f\n",(timeStampD-timeStampA),(timeStampB-timeStampA),(timeStampC-timeStampB),(timeStampD-timeStampC)); } //free other resourses hipHostFree(h_Ap); hipHostFree(h_Bp); hipHostFree(h_dCp); free(h_hC); hipDeviceReset() ; }
0d12f35fa68a4073639e1154772a87df21b82f44.cu
#include <sys/time.h> #include <cuda.h> #include <stdio.h> #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) ) static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "Error: %s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } // host side matrix addition void h_addmat(float *A, float *B, float *C, int nx, int ny){ for (int i =0;i<nx;i++){ for(int j=0;j<ny;j++){ C[i*ny+j] = A[i*ny+j]+B[i*ny+j]; } } return; } // device-side matrix addition __global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ int ix = threadIdx.x + blockIdx.x*(blockDim.x) ; int iy = threadIdx.y + blockIdx.y*(blockDim.y) ; //printf("In add\n"); if( (ix<nx) && (iy<ny) ){ int idx = iy*nx + ix; C[idx] = A[idx] + B[idx] ; //printf("Thread %d %d\n",ix,iy); } } void initData(float *M, int x, int y, int flag ){ if(flag) { //printf("A\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*y+j] = (float)(i+j)/3.0; } } } else { //printf("B\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*y+j] = (float)3.14*(i+j) ; } } } } int main( int argc, char *argv[] ) { if (argc!=3){ printf("Error: Invalid number of arguments.\n"); exit(1); } int nx = atoi( argv[1] ) ; // should check validity int ny = atoi( argv[2] ) ; // should check validity if(nx <=0 || ny <=0){ printf("Error: Dimension lessThanOrEqualto Zero.\n"); exit(1); } if(ny>nx) { nx=nx^ny; ny=nx^ny; nx=nx^ny; } int noElems = (nx)*(ny) ; int bytes = noElems * sizeof(float) ; // GPU and CPU memory Allocations float *d_A, *d_B, *d_C ; HANDLE_ERROR(cudaMalloc( (float **) &d_A, bytes )) ; HANDLE_ERROR(cudaMalloc( (float **) &d_B, bytes )) ; HANDLE_ERROR(cudaMalloc( (float **) &d_C, bytes )) ; float *h_hC = (float *) malloc( bytes ) ; // host result float *h_Ap, *h_Bp, *h_dCp; HANDLE_ERROR(cudaMallocHost( (float **) &h_Ap, bytes )) ; HANDLE_ERROR(cudaMallocHost( (float **) &h_Bp, bytes )) ; HANDLE_ERROR(cudaMallocHost( (float **) &h_dCp, bytes )) ; // init matrices with random data initData(h_Ap,nx,ny,1); initData(h_Bp,nx,ny,0); double timeStampA = getTimeStamp() ; //transfer data to dev HANDLE_ERROR (cudaMemcpy( d_A, h_Ap, bytes, cudaMemcpyHostToDevice )) ; HANDLE_ERROR (cudaMemcpy( d_B, h_Bp, bytes, cudaMemcpyHostToDevice )) ; double timeStampB = getTimeStamp() ; // invoke Kernel dim3 block( 1024, 1) ; // you will want to configure this dim3 grid( (nx+block.x-1)/block.x, (ny+block.y-1)/block.y) ; //printf("reached add %d %d %d %d %lu %d %d \n",(nx+block.x-1)/block.x, (ny+block.y-1)/block.y, nx, ny, sizeof(float), noElems, bytes); f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny ) ; cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); HANDLE_ERROR(cudaDeviceSynchronize()) ; double timeStampC = getTimeStamp() ; //copy data back HANDLE_ERROR(cudaMemcpy(h_dCp, d_C, bytes, cudaMemcpyDeviceToHost)); double timeStampD = getTimeStamp() ; // free GPU resources cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ; // CPU Matrix add h_addmat( h_Ap, h_Bp, h_hC, nx, ny ) ; // Check results int flag = 0; for(int i=0;i<(nx);i++){ for(int j=0;j<(ny);j++){ if(h_hC[i*(ny)+j] != h_dCp[i*(ny)+j]) flag++; } } if (flag == 0){ printf("%.6f %.6f %.6f %.6f\n",(timeStampD-timeStampA),(timeStampB-timeStampA),(timeStampC-timeStampB),(timeStampD-timeStampC)); } //free other resourses cudaFreeHost(h_Ap); cudaFreeHost(h_Bp); cudaFreeHost(h_dCp); free(h_hC); cudaDeviceReset() ; }
a1c8780fa995364f15526d0667037d1b39502688.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #ifndef __HIPCC__ #define __HIPCC__ #endif #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include<time.h> #include <stdio.h> #define nano 1000000L __global__ void dijkstras(int *a, int *b, int *n) { int i; int d[10], p[10], v[10];// d este costul fiecarei cai ; p :Stocheaz calea luat; v Stocheaz nodurile deja vizitate int k, u, s, check = 0; __shared__ int temp[20]; for (i = 0; i < (*n)*(*n); i++) { temp[i] = a[i]; } check = check + 1; __syncthreads(); s = threadIdx.x; for (i = 0; i<(*n); i++) { d[i] = temp[s*(*n) + i]; if (d[i] != 999) p[i] = 1; else p[i] = 0; v[i] = 0; } p[s] = 0; v[s] = 1; for (i = 0; i<((*n) - 1); i++) { int i1, j1, min = 0; for (i1 = 0; i1<(*n); i1++) { if (v[i1] == 0) { min = i1; break; } } for (j1 = min + 1; j1<(*n); j1++) { if ((v[j1] == 0) && (d[j1]<d[min])) min = j1; } k = min; v[k] = 1; for (u = 0; u<(*n); u++) { if ((v[u] == 0) && (temp[k*(*n) + u] != 999)) { if (d[u]>d[k] + temp[k*(*n) + u]) { d[u] = d[k] + temp[k*(*n) + u]; p[u] = k; } } } //storing output int count = 0; for (i = (s*(*n)); i< (s + 1) * (*n); i++) { b[i] = d[count]; count++; } } *n = check; } void main() { int *a, *b, *n; int *d_a, *d_b, *d_n; int i, j, c; int check = 0; printf("Introducei numrul de noduri.... : "); n = (int*)malloc(sizeof(int)); scanf("%d", n); int size = (*n) * (*n) * sizeof(int); //allocating device memory hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_n, sizeof(int)); a = (int*)malloc(size); b = (int*)malloc(size); check = check + 1; for (i = 0; i<(*n); i++) for (j = 0; j <= i; j++) if (i == j) a[(i*(*n) + j)] = 0; else a[(i*(*n) + j)] = a[(j*(*n) + i)] = 999; printf("\nMatricea initiala: \n"); for (i = 0; i<(*n); i++) { for (j = 0; j<(*n); j++) { printf("%d ", a[i*(*n) + j]); } printf("\n"); } while (1) { printf("\n Introduceti nodul sursa, nodul destinatie si costul \n"); scanf("%d %d %d", &i, &j, &c); if (i == -1) break; a[(i*(*n) + j)] = a[(j*(*n) + i)] = c; } printf("\n \n"); for (i = 0; i<(*n); i++) { for (j = 0; j<(*n); j++) { printf("%d ", a[i*(*n) + j]); } printf("\n"); } check = check + 1; // copying input matrix to device hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_n, n, sizeof(int), hipMemcpyHostToDevice); check++; int N = *n; hipLaunchKernelGGL(( dijkstras) , dim3(1), dim3(N) , 0, 0, d_a, d_b, d_n); check++; // copying result from device to host hipMemcpy(b, d_b, size, hipMemcpyDeviceToHost); hipMemcpy(n, d_n, sizeof(int), hipMemcpyDeviceToHost); check++; // printing result printf("Cele mai scurte ci sunt...."); for (i = 0; i<(N); i++) { for (j = 0; j<(N); j++) { if (i != j) printf("\n Costul cii de la %d la %d = %d\n", i, j, b[i*(N)+j]); } printf("\n\n"); } free(a); free(b); free(n); hipFree(d_a); hipFree(d_b); hipFree(d_n); }
a1c8780fa995364f15526d0667037d1b39502688.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #ifndef __CUDACC__ #define __CUDACC__ #endif #include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include<time.h> #include <stdio.h> #define nano 1000000L __global__ void dijkstras(int *a, int *b, int *n) { int i; int d[10], p[10], v[10];// d este costul fiecarei cai ; p :Stochează calea luată; v Stochează nodurile deja vizitate int k, u, s, check = 0; __shared__ int temp[20]; for (i = 0; i < (*n)*(*n); i++) { temp[i] = a[i]; } check = check + 1; __syncthreads(); s = threadIdx.x; for (i = 0; i<(*n); i++) { d[i] = temp[s*(*n) + i]; if (d[i] != 999) p[i] = 1; else p[i] = 0; v[i] = 0; } p[s] = 0; v[s] = 1; for (i = 0; i<((*n) - 1); i++) { int i1, j1, min = 0; for (i1 = 0; i1<(*n); i1++) { if (v[i1] == 0) { min = i1; break; } } for (j1 = min + 1; j1<(*n); j1++) { if ((v[j1] == 0) && (d[j1]<d[min])) min = j1; } k = min; v[k] = 1; for (u = 0; u<(*n); u++) { if ((v[u] == 0) && (temp[k*(*n) + u] != 999)) { if (d[u]>d[k] + temp[k*(*n) + u]) { d[u] = d[k] + temp[k*(*n) + u]; p[u] = k; } } } //storing output int count = 0; for (i = (s*(*n)); i< (s + 1) * (*n); i++) { b[i] = d[count]; count++; } } *n = check; } void main() { int *a, *b, *n; int *d_a, *d_b, *d_n; int i, j, c; int check = 0; printf("Introduceți numărul de noduri.... : "); n = (int*)malloc(sizeof(int)); scanf("%d", n); int size = (*n) * (*n) * sizeof(int); //allocating device memory cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_n, sizeof(int)); a = (int*)malloc(size); b = (int*)malloc(size); check = check + 1; for (i = 0; i<(*n); i++) for (j = 0; j <= i; j++) if (i == j) a[(i*(*n) + j)] = 0; else a[(i*(*n) + j)] = a[(j*(*n) + i)] = 999; printf("\nMatricea initiala: \n"); for (i = 0; i<(*n); i++) { for (j = 0; j<(*n); j++) { printf("%d ", a[i*(*n) + j]); } printf("\n"); } while (1) { printf("\n Introduceti nodul sursa, nodul destinatie si costul \n"); scanf("%d %d %d", &i, &j, &c); if (i == -1) break; a[(i*(*n) + j)] = a[(j*(*n) + i)] = c; } printf("\n \n"); for (i = 0; i<(*n); i++) { for (j = 0; j<(*n); j++) { printf("%d ", a[i*(*n) + j]); } printf("\n"); } check = check + 1; // copying input matrix to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_n, n, sizeof(int), cudaMemcpyHostToDevice); check++; int N = *n; dijkstras <<< 1, N >>>(d_a, d_b, d_n); check++; // copying result from device to host cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost); cudaMemcpy(n, d_n, sizeof(int), cudaMemcpyDeviceToHost); check++; // printing result printf("Cele mai scurte căi sunt...."); for (i = 0; i<(N); i++) { for (j = 0; j<(N); j++) { if (i != j) printf("\n Costul căii de la %d la %d = %d\n", i, j, b[i*(N)+j]); } printf("\n\n"); } free(a); free(b); free(n); cudaFree(d_a); cudaFree(d_b); cudaFree(d_n); }
24701e647dd0bdec3fd4a375e022814dd61065db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" #include <inttypes.h> #include "binomial.h" extern "C" __global__ void finalPut_kernel(double* output, double* uPow, double* dPow, int32_t n, double u, double d, double strike, double S0) { for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i <= n; i += blockDim.x * gridDim.x) { double uPow_i = pow(u, (double)i); double dPow_i = pow(d, (double)(n-i)); uPow[i] = uPow_i; dPow[i] = pow(d, (double)(i)); output[i] = max(strike - S0 * uPow_i * dPow_i, 0.0); } } extern "C" __global__ void prevPut_kernel(double* last, double* uPow, double* dPow, int32_t i, double* output, double qUR, double qDR, double S0, double strike, double u, double d) { for (int32_t j = blockIdx.x * blockDim.x + threadIdx.x; j <= i; j += blockDim.x * gridDim.x) { double e = qUR * last[j+1] + qDR * last[j]; output[j] = max(strike - S0 * uPow[j] * dPow[i-j], e); } } void finalPut(double* output, double* uPow, double* dPow, int32_t n, double u, double d, double strike, double S0) { dim3 gdims; dim3 tdims; gdims.x = 128; gdims.y = 1; gdims.z = 1; tdims.x = 480; tdims.y = 1; tdims.z = 1; hipLaunchKernelGGL(( finalPut_kernel), dim3(gdims), dim3(tdims), 0, 0, output, uPow, dPow, n, u, d, strike, S0); } void prevPut(double* last, double* uPow, double* dPow, int32_t i, double* output, double qUR, double qDR, double S0, double strike, double u, double d) { dim3 gdims; dim3 tdims; gdims.x = 128; gdims.y = 1; gdims.z = 1; tdims.x = 480; tdims.y = 1; tdims.z = 1; hipLaunchKernelGGL(( prevPut_kernel), dim3(gdims), dim3(tdims), 0, 0, last, uPow, dPow, i, output, qUR, qDR, S0, strike, u, d); }
24701e647dd0bdec3fd4a375e022814dd61065db.cu
#include "cuda.h" #include "cuda_runtime_api.h" #include <inttypes.h> #include "binomial.h" extern "C" __global__ void finalPut_kernel(double* output, double* uPow, double* dPow, int32_t n, double u, double d, double strike, double S0) { for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i <= n; i += blockDim.x * gridDim.x) { double uPow_i = pow(u, (double)i); double dPow_i = pow(d, (double)(n-i)); uPow[i] = uPow_i; dPow[i] = pow(d, (double)(i)); output[i] = max(strike - S0 * uPow_i * dPow_i, 0.0); } } extern "C" __global__ void prevPut_kernel(double* last, double* uPow, double* dPow, int32_t i, double* output, double qUR, double qDR, double S0, double strike, double u, double d) { for (int32_t j = blockIdx.x * blockDim.x + threadIdx.x; j <= i; j += blockDim.x * gridDim.x) { double e = qUR * last[j+1] + qDR * last[j]; output[j] = max(strike - S0 * uPow[j] * dPow[i-j], e); } } void finalPut(double* output, double* uPow, double* dPow, int32_t n, double u, double d, double strike, double S0) { dim3 gdims; dim3 tdims; gdims.x = 128; gdims.y = 1; gdims.z = 1; tdims.x = 480; tdims.y = 1; tdims.z = 1; finalPut_kernel<<<gdims, tdims>>>(output, uPow, dPow, n, u, d, strike, S0); } void prevPut(double* last, double* uPow, double* dPow, int32_t i, double* output, double qUR, double qDR, double S0, double strike, double u, double d) { dim3 gdims; dim3 tdims; gdims.x = 128; gdims.y = 1; gdims.z = 1; tdims.x = 480; tdims.y = 1; tdims.z = 1; prevPut_kernel<<<gdims, tdims>>>(last, uPow, dPow, i, output, qUR, qDR, S0, strike, u, d); }
70d5525fb10df7246ca3c3f23a54400cec96ac54.hip
// !!! This is a file automatically generated by hipify!!! /** * @file cudasafe.cu * @author lvaro Snchez Gonzlez <[email protected]> * @date Mon Jul 23 2012 * * Copyright (c) 2012 lvaro Snchez Gonzlez * * @brief Implementacin de la cabecera cudasafe.h. Este fichero se compila con el compilador de CUDA. * */ #include "cudasafe.h" bool CudaSafe(hipError_t error, char* message){ if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s : %s\n",message,hipGetErrorString( error )); fflush(stderr); return false; } return true; } bool CudaSafeKernel(char* message){ hipError_t error = hipGetLastError(); if ( hipSuccess != error ){ fprintf(stderr, "ERROR: %s : %s\n", message , hipGetErrorString( error ) ); fflush(stderr); return false; } return true; } bool CudaSafeFFT(hipfftResult error, char* message){ if(error!=HIPFFT_SUCCESS) { fprintf(stderr,"ERROR: %s : %s\n",message,cuFFTGetErrorString( error )); fflush(stderr); return false; } return true; } char * cuFFTGetErrorString(hipfftResult error){ char * errorS; errorS=(char *)malloc(30 *sizeof(char)); switch(error){ case HIPFFT_SUCCESS: strcpy(errorS,"HIPFFT_SUCCESS"); break; case HIPFFT_INVALID_PLAN: strcpy(errorS,"HIPFFT_INVALID_PLAN"); break; case HIPFFT_ALLOC_FAILED: strcpy(errorS,"HIPFFT_ALLOC_FAILED"); break; case HIPFFT_INVALID_TYPE: strcpy(errorS,"HIPFFT_INVALID_TYPE"); break; case HIPFFT_INVALID_VALUE: strcpy(errorS,"HIPFFT_INVALID_VALUE"); break; case HIPFFT_INTERNAL_ERROR: strcpy(errorS,"HIPFFT_INTERNAL_ERROR"); break; case HIPFFT_EXEC_FAILED: strcpy(errorS,"HIPFFT_EXEC_FAILED"); break; case HIPFFT_SETUP_FAILED: strcpy(errorS,"HIPFFT_SETUP_FAILED"); break; case HIPFFT_INVALID_SIZE: strcpy(errorS,"HIPFFT_INVALID_SIZE"); break; default: strcpy(errorS,"UNKNOWN"); } return errorS; }
70d5525fb10df7246ca3c3f23a54400cec96ac54.cu
/** * @file cudasafe.cu * @author Álvaro Sánchez González <[email protected]> * @date Mon Jul 23 2012 * * Copyright (c) 2012 Álvaro Sánchez González * * @brief Implementación de la cabecera cudasafe.h. Este fichero se compila con el compilador de CUDA. * */ #include "cudasafe.h" bool CudaSafe(cudaError_t error, char* message){ if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s : %s\n",message,cudaGetErrorString( error )); fflush(stderr); return false; } return true; } bool CudaSafeKernel(char* message){ cudaError error = cudaGetLastError(); if ( cudaSuccess != error ){ fprintf(stderr, "ERROR: %s : %s\n", message , cudaGetErrorString( error ) ); fflush(stderr); return false; } return true; } bool CudaSafeFFT(cufftResult error, char* message){ if(error!=CUFFT_SUCCESS) { fprintf(stderr,"ERROR: %s : %s\n",message,cuFFTGetErrorString( error )); fflush(stderr); return false; } return true; } char * cuFFTGetErrorString(cufftResult error){ char * errorS; errorS=(char *)malloc(30 *sizeof(char)); switch(error){ case CUFFT_SUCCESS: strcpy(errorS,"CUFFT_SUCCESS"); break; case CUFFT_INVALID_PLAN: strcpy(errorS,"CUFFT_INVALID_PLAN"); break; case CUFFT_ALLOC_FAILED: strcpy(errorS,"CUFFT_ALLOC_FAILED"); break; case CUFFT_INVALID_TYPE: strcpy(errorS,"CUFFT_INVALID_TYPE"); break; case CUFFT_INVALID_VALUE: strcpy(errorS,"CUFFT_INVALID_VALUE"); break; case CUFFT_INTERNAL_ERROR: strcpy(errorS,"CUFFT_INTERNAL_ERROR"); break; case CUFFT_EXEC_FAILED: strcpy(errorS,"CUFFT_EXEC_FAILED"); break; case CUFFT_SETUP_FAILED: strcpy(errorS,"CUFFT_SETUP_FAILED"); break; case CUFFT_INVALID_SIZE: strcpy(errorS,"CUFFT_INVALID_SIZE"); break; default: strcpy(errorS,"UNKNOWN"); } return errorS; }
0c6d0ddb215f936f0d38b513a9dbbe18b460e9b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/WrapDimUtils.h> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHTensorSort.cuh> #include <THH/THHThrustAllocator.cuh> #include <c10/macros/Macros.h> #include <ATen/AccumulateType.h> #include <ATen/hip/NumericLimits.cuh> #include <type_traits> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/MemoryAccess.cuh> #include <ATen/native/hip/PersistentSoftmax.cuh> namespace at { namespace native { namespace { constexpr int ALIGN_BYTES = 16; template<typename T, typename AccumT, typename OutT> struct LogSoftMaxForwardEpilogue { __device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : max_input(max_input), logsum(::log(sum)) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(input - max_input - logsum); } const AccumT max_input; const AccumT logsum; }; template<typename T, typename AccumT, typename OutT> struct LogSoftMaxBackwardEpilogue { __device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - ::exp(static_cast<AccumT>(output)) * sum); } const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxForwardEpilogue { __device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : max_input(max_input) , sum(sum) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(::exp(input - max_input) / sum); } const AccumT max_input; const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxBackwardEpilogue { __device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} // XXX: gradOutput that we get here is really gradOutput * output // Look for cmul in SoftMax_updateGradInput __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - output * sum); } const AccumT sum; }; //////////////////////////////////////////////////////////////////////////////// // Spatial kernel (fast with large inner_size and small dim_size) //////////////////////////////////////////////////////////////////////////////// // Let's assume that our input has been flattened to have only three dimension: // outer x dim x inner // The spatial algorithm tries to parallelize along all of them. // Within a 2d block threadIdx.y parallelizes over dim slices, and threads that // share it will speed up reductions over dim (along axis x). // The 2d grid is used to parallelize inner dimension over y axis and outer over x. inline dim3 SpatialSoftMax_getGridSize( dim3 block, uint32_t max_active_blocks, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { // First, tile as many blocks as we can over the y axis uint32_t inner_blocks = (inner_size + block.y - 1) / block.y; if (inner_blocks > max_active_blocks) inner_blocks = max_active_blocks; // Fill the x axis with as many blocks as we can fit (a little more is ok too) uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks; if (outer_blocks > outer_size) outer_blocks = outer_size; return dim3(outer_blocks, inner_blocks); } const int max_threads = 1024; inline dim3 SpatialSoftMax_getBlockSize( uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { uint32_t inner_threads = inner_size; inner_threads = ::min(inner_threads, static_cast<uint32_t>(max_threads)); uint32_t dim_threads = 1; if (inner_threads <= 64 && dim_size >= 64) { while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size) dim_threads *= 2; dim_threads /= 2; } return dim3(dim_threads, inner_threads); } template<typename accscalar_t, typename Kernel> void SpatialSoftMax_getLaunchSizes( Kernel k, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size, dim3& grid, dim3& block, uint32_t& smem_size) { block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size); uint32_t block_threads = block.x * block.y; smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t); int max_active_blocks; #ifdef __HIP_PLATFORM_HCC__ // XXX HIP function signature is not compatible yet. uint32_t max_blocks; hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks, k, block_threads, smem_size); max_active_blocks = max_blocks; #else hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, k, block_threads, smem_size); #endif max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount; grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size); } inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) { uint64_t block_size = 1; uint64_t max_block_size = ::min(dim_size / ILP, static_cast<uint64_t>(max_threads)); // In the vectorized case we want to trade off allowing more of the buffers to be accessed // in a vectorized way against wanting a larger block size to get better utilisation. // In general with ILP you can have (ILP-1)/ILP of the buffer accessed vectorised, at the risk // of having a very small block size. We choose to keep >= 1/2 of the buffer vectorised while // allowing a larger block size. if (ILP > 1) { max_block_size /= 2; } while (block_size < (max_block_size)) block_size *= 2; // Launch at least a single warp - the kernel assumes that. block_size = ::max(block_size, static_cast<uint64_t>(C10_WARP_SIZE)); return dim3(block_size); } template<typename T> struct Add { __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } }; template<typename T> struct Max { __device__ __forceinline__ T operator()(T a, T b) const { return a < b ? b : a; } }; // Note that it's not a complete block-wide reduction. // Only threads that share threadIdx.y reduce values. template<typename T, template<typename> class ReduceOp> __forceinline__ __device__ T spatialBlockReduceX(T *shared, T val) { ReduceOp<T> r; shared += threadIdx.y * blockDim.x; __syncthreads(); shared[threadIdx.x] = val; // NOTE: loop starts with __syncthreads() int offset = blockDim.x / 2; while (offset > 0) { __syncthreads(); if (threadIdx.x < offset) shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]); offset /= 2; } __syncthreads(); return shared[0]; } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxForward( outscalar_t *output, scalar_t *input, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; //////////////////////////////////////////////////////////// // These two blocks are really equivalent, but specializing on // blockDim.x == 1 makes the kernel faster when it's unused. // I didn't want to thread an extra template parameter, and nvcc // seems to be smart enough to hoist the if outside of the loops. //////////////////////////////////////////////////////////// if (blockDim.x > 1) { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input); accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } else { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } } } } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxBackward( scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; // See the comment in forward kernel if (blockDim.x > 1) { accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += gradOutput[data_offset + d * dim_stride]; sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } else { accscalar_t sum = 0; for (uint32_t d = 0; d < dim_size; d++) sum += gradOutput[data_offset + d * dim_stride]; Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = 0; d < dim_size; d++) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } } } } //////////////////////////////////////////////////////////////////////////////// // Regular kernel (fast when dim_size is large; requires inner_size == 1) //////////////////////////////////////////////////////////////////////////////// template <typename T, typename AccumT> struct MaxFloat { __device__ __forceinline__ AccumT operator()(AccumT max, T v) const { return ::max(max, (AccumT)v); } }; template<typename T, typename AccumT> struct AddFloat { __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + v; } }; template<typename T, typename AccumT> struct SumExpFloat { __device__ __forceinline__ SumExpFloat(AccumT v) : max_k(v) {} __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + ::exp(v - max_k); } const AccumT max_k; }; template <template<typename> class Reduction, typename AccumT> __device__ __forceinline__ AccumT blockReduce(AccumT* smem, AccumT val, const Reduction<AccumT>& r, AccumT defaultVal) { // To avoid RaW races from chaining blockReduce calls together, we need a sync here __syncthreads(); smem[threadIdx.x] = val; __syncthreads(); AccumT warpVal = defaultVal; // First warp will perform per-warp reductions for the remaining warps uint32_t mask = (((uint64_t)1) << (blockDim.x / C10_WARP_SIZE)) - 1; if (threadIdx.x < C10_WARP_SIZE) { int lane = threadIdx.x % C10_WARP_SIZE; if (lane < blockDim.x / C10_WARP_SIZE) { #pragma unroll for (int i = 0; i < C10_WARP_SIZE; ++i) { warpVal = r(warpVal, smem[lane * C10_WARP_SIZE + i]); } #ifndef __HIP_PLATFORM_HCC__ __syncwarp(mask); #endif smem[lane] = warpVal; } } __syncthreads(); // First thread will perform a reduction of the above per-warp reductions AccumT blockVal = defaultVal; if (threadIdx.x == 0) { for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) { blockVal = r(blockVal, smem[i]); } smem[0] = blockVal; } // Sync and broadcast __syncthreads(); return smem[0]; } template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT> __device__ __forceinline__ AccumT ilpReduce(int shift, T* data, int size, const Reduction<T, AccumT>& r, AccumT defaultVal) { using LoadT = at::native::memory::aligned_vector<T, ILP>; AccumT threadVal = defaultVal; int offset = threadIdx.x; // shift and do 1 if(shift > 0){ data -= shift; size += shift; if(threadIdx.x >= shift){ threadVal = r(threadVal, data[offset]); } size -= blockDim.x; data += blockDim.x; } int last = size % (ILP * blockDim.x); T v[ILP]; LoadT* value = reinterpret_cast<LoadT*>(&v); for (; offset * ILP < (size - last); offset += blockDim.x) { *value = reinterpret_cast<LoadT*>(data)[offset]; #pragma unroll for (int j = 0; j < ILP; ++j) { threadVal = r(threadVal, v[j]); } } offset = size - last + threadIdx.x; // Epilogue for (; offset < size; offset += blockDim.x) threadVal = r(threadVal, data[offset]); return threadVal; } /** * This will apply the Epilogue with vectorized reads & writes when input & output have the same shift */ template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __device__ __forceinline__ void WriteFpropResultsVectorized( int size, const int shift, scalar_t *input, outscalar_t *output, Epilogue<scalar_t, accum_t, outscalar_t> epilogue) { using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>; using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>; int offset = threadIdx.x; // if unaligned, do one value / thread and move on, guaranteeing aligned reads/writes later if (shift > 0) { input -= shift; output -= shift; size += shift; if (threadIdx.x >= shift) { output[offset] = epilogue(input[offset]); } size -= blockDim.x; input += blockDim.x; output += blockDim.x; } const int last = size % (ILP * blockDim.x); scalar_t in_v[ILP]; LoadT* in_value = reinterpret_cast<LoadT*>(&in_v); outscalar_t out_v[ILP]; StoreT* out_value = reinterpret_cast<StoreT*>(&out_v); for (; offset * ILP < (size - last); offset += blockDim.x) { *in_value = reinterpret_cast<LoadT*>(input)[offset]; #pragma unroll for (int j = 0; j < ILP; ++j) { out_v[j] = epilogue(in_v[j]); } reinterpret_cast<StoreT*>(output)[offset] = *out_value; } offset = size - last + threadIdx.x; // handle the tail for (; offset < size; offset += blockDim.x) { output[offset] = epilogue(input[offset]); } } template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __device__ __forceinline__ void WriteBpropResultsVectorized( int size, const int shift, scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, Epilogue<scalar_t, accum_t, outscalar_t> epilogue) { using gradInputT = at::native::memory::aligned_vector<scalar_t, ILP>; using outputT = at::native::memory::aligned_vector<outscalar_t, ILP>; int offset = threadIdx.x; // if unaligned, do one value / thread and move on, guaranteeing aligned reads/writes later if (shift > 0) { gradInput -= shift; output -= shift; gradOutput -= shift; size += shift; if (threadIdx.x >= shift) { gradInput[offset] = epilogue(gradOutput[offset], output[offset]); } size -= blockDim.x; gradInput += blockDim.x; output += blockDim.x; gradOutput += blockDim.x; } const int last = size % (ILP * blockDim.x); scalar_t dX[ILP]; gradInputT *dX_v = reinterpret_cast<gradInputT*>(&dX); outscalar_t Y[ILP]; outputT *Y_v = reinterpret_cast<outputT*>(&Y); outscalar_t dY[ILP]; outputT *dY_v = reinterpret_cast<outputT*>(&dY); for (; offset * ILP < (size - last); offset += blockDim.x) { *Y_v = reinterpret_cast<outputT*>(output)[offset]; *dY_v = reinterpret_cast<outputT*>(gradOutput)[offset]; #pragma unroll for (int j = 0; j < ILP; ++j) { dX[j] = epilogue(dY[j], Y[j]); } reinterpret_cast<gradInputT*>(gradInput)[offset] = *dX_v; } offset = size - last + threadIdx.x; for (; offset < size; offset += blockDim.x) { gradInput[offset] = epilogue(gradOutput[offset], output[offset]); } } /** * This will apply the Epilogue with non-vectrorized reads & writes for the general case */ template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __device__ __forceinline__ void WriteFpropResults( int classes, scalar_t *input, outscalar_t *output, Epilogue<scalar_t, accum_t, outscalar_t> epilogue) { int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); // Main bulk of loop with ILP for (; offset < classes - last; offset += blockDim.x * ILP) { scalar_t tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmp[j] = input[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) { output[offset + j * blockDim.x] = epilogue(tmp[j]); } } // Remainder - no ILP for (; offset < classes; offset += blockDim.x) { output[offset] = epilogue(input[offset]); } } template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __device__ __forceinline__ void WriteBpropResults( int classes, scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, Epilogue<scalar_t, accum_t, outscalar_t> epilogue) { int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { outscalar_t tmpOutput[ILP]; outscalar_t tmpGradOutput[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmpOutput[j] = output[offset + j * blockDim.x]; tmpGradOutput[j] = gradOutput[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) { gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]); } } // Remainder - no ILP for (; offset < classes; offset += blockDim.x) { gradInput[offset] = epilogue(gradOutput[offset], output[offset]); } } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>; using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>; // forward pointers to batch[blockIdx.x] // each block handles a sample in the mini-batch input += blockIdx.x * classes; output += blockIdx.x * classes; const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t); const int output_shift = ((uint64_t)output) % ALIGN_BYTES / sizeof(outscalar_t); // find the max accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>( shift, input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); accscalar_t max_k = blockReduce<Max, accscalar_t>( sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); // reduce all values accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>( shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0)); accscalar_t sumAll = blockReduce<Add, accscalar_t>( sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll); if (shift == output_shift) { WriteFpropResultsVectorized<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, shift, input, output, epilogue); } else { WriteFpropResults<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, input, output, epilogue); } } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes) { using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>; using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>; extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); gradInput += blockIdx.x * classes; output += blockIdx.x * classes; gradOutput += blockIdx.x * classes; const int shift = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t); const int output_shift = ((uint64_t)output) % ALIGN_BYTES / sizeof(outscalar_t); const int grad_output_shift = ((uint64_t)gradOutput) % ALIGN_BYTES / sizeof(outscalar_t); accscalar_t threadSum = ilpReduce<AddFloat, ILP, outscalar_t, accscalar_t>( shift, gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0)); accscalar_t sum_k = blockReduce<Add, accscalar_t>( sdata, threadSum, Add<accscalar_t>(), accscalar_t(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k); if (shift == output_shift && shift == grad_output_shift) { WriteBpropResultsVectorized<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, shift, gradInput, output, gradOutput, epilogue); } else { WriteBpropResults<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, gradInput, output, gradOutput, epilogue); } } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){ if (half_to_float) AT_ASSERTM(input_.scalar_type() == ScalarType::Half,"conversion is supported for Half type only"); auto input = input_.contiguous(); Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (input.dim() == 0) input = input.view(1); int64_t dim = maybe_wrap_dim(dim_, input.dim()); TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions"); int64_t outer_size = 1; int64_t dim_size = input.size(dim); if (input.numel() > 0) { int64_t inner_size = 1; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); for (int64_t i = 0; i < dim; ++i) outer_size *= input.size(i); for (int64_t i = dim + 1; i < input.dim(); ++i) inner_size *= input.size(i); // This kernel spawns a block per each element in the batch. // XXX: it assumes that inner_size == 1 if (inner_size == 1) { dim3 grid(outer_size); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "host_softmax", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { constexpr int ILP = sizeof(float4) / sizeof(scalar_t); dim3 block = SoftMax_getBlockSize(ILP, dim_size); hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, accscalar_t, accscalar_t, is_log_softmax>( output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { constexpr int ILP = sizeof(float4) / sizeof(accscalar_t); dim3 block = SoftMax_getBlockSize(ILP, dim_size); hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size ); } } }); }); // This kernel runs in a 2D grid, where each application along y dimension has a fixed // outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size. // Reductions over dim are done in a single-threaded manner. } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "host_softmax", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } }); }); } AT_CUDA_CHECK(hipGetLastError()); } return output; } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){ int64_t dim = maybe_wrap_dim(dim_, grad_.dim()); Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(grad_, LEGACY_CONTIGUOUS_MEMORY_FORMAT); if (grad_.numel() == 0) { return gI; } auto grad = grad_.contiguous(); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (grad.dim() == 0) grad = grad.view(1); TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions"); auto output = output_.contiguous(); if (output.dim() == 0) output = output.view(1); int64_t outer_size = 1; int64_t dim_size = output.size(dim); int64_t inner_size = 1; for (int64_t i = 0; i < dim; ++i) outer_size *= output.size(i); for (int64_t i = dim + 1; i < output.dim(); ++i) inner_size *= output.size(i); // See descriptions of kernels above. hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (inner_size == 1) { dim3 grid(outer_size); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, gI.scalar_type(), "host_softmax_backward", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { constexpr int ILP = sizeof(float4) / sizeof(scalar_t); dim3 block = SoftMax_getBlockSize(ILP, dim_size); hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<accscalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data_ptr<scalar_t>(), grad.data_ptr<accscalar_t>(), output.data_ptr<accscalar_t>(), dim_size, dim_size, outer_size); } else { constexpr int ILP = sizeof(float4) / sizeof(accscalar_t); dim3 block = SoftMax_getBlockSize(ILP, dim_size); hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream, gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), dim_size ); } } }); }); } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, gI.scalar_type(), "host_softmax_backward", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>) , dim3(grid), dim3(block), smem_size, stream, gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), outer_size, dim_size, inner_size ); } }); }); } AT_CUDA_CHECK(hipGetLastError()); return gI; } } Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float); } Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } return host_softmax_backward<LogSoftMaxBackwardEpilogue,true>(grad, output, dim, half_to_float); } Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<SoftMaxForwardEpilogue,false>(input, dim, half_to_float); } Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } Tensor tmp = grad * output; return host_softmax_backward<SoftMaxBackwardEpilogue,false>(tmp, output, dim, half_to_float); } } }
0c6d0ddb215f936f0d38b513a9dbbe18b460e9b8.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/WrapDimUtils.h> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCTensorSort.cuh> #include <THC/THCThrustAllocator.cuh> #include <c10/macros/Macros.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/NumericLimits.cuh> #include <type_traits> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/MemoryAccess.cuh> #include <ATen/native/cuda/PersistentSoftmax.cuh> namespace at { namespace native { namespace { constexpr int ALIGN_BYTES = 16; template<typename T, typename AccumT, typename OutT> struct LogSoftMaxForwardEpilogue { __device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : max_input(max_input), logsum(std::log(sum)) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(input - max_input - logsum); } const AccumT max_input; const AccumT logsum; }; template<typename T, typename AccumT, typename OutT> struct LogSoftMaxBackwardEpilogue { __device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum); } const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxForwardEpilogue { __device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum) : max_input(max_input) , sum(sum) {} __device__ __forceinline__ OutT operator()(T input) const { return static_cast<OutT>(std::exp(input - max_input) / sum); } const AccumT max_input; const AccumT sum; }; template<typename T, typename AccumT, typename OutT> struct SoftMaxBackwardEpilogue { __device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum) : sum(sum) {} // XXX: gradOutput that we get here is really gradOutput * output // Look for cmul in SoftMax_updateGradInput __device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const { return static_cast<T>(gradOutput - output * sum); } const AccumT sum; }; //////////////////////////////////////////////////////////////////////////////// // Spatial kernel (fast with large inner_size and small dim_size) //////////////////////////////////////////////////////////////////////////////// // Let's assume that our input has been flattened to have only three dimension: // outer x dim x inner // The spatial algorithm tries to parallelize along all of them. // Within a 2d block threadIdx.y parallelizes over dim slices, and threads that // share it will speed up reductions over dim (along axis x). // The 2d grid is used to parallelize inner dimension over y axis and outer over x. inline dim3 SpatialSoftMax_getGridSize( dim3 block, uint32_t max_active_blocks, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { // First, tile as many blocks as we can over the y axis uint32_t inner_blocks = (inner_size + block.y - 1) / block.y; if (inner_blocks > max_active_blocks) inner_blocks = max_active_blocks; // Fill the x axis with as many blocks as we can fit (a little more is ok too) uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks; if (outer_blocks > outer_size) outer_blocks = outer_size; return dim3(outer_blocks, inner_blocks); } const int max_threads = 1024; inline dim3 SpatialSoftMax_getBlockSize( uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { uint32_t inner_threads = inner_size; inner_threads = std::min(inner_threads, static_cast<uint32_t>(max_threads)); uint32_t dim_threads = 1; if (inner_threads <= 64 && dim_size >= 64) { while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size) dim_threads *= 2; dim_threads /= 2; } return dim3(dim_threads, inner_threads); } template<typename accscalar_t, typename Kernel> void SpatialSoftMax_getLaunchSizes( Kernel k, uint64_t outer_size, uint64_t dim_size, uint64_t inner_size, dim3& grid, dim3& block, uint32_t& smem_size) { block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size); uint32_t block_threads = block.x * block.y; smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t); int max_active_blocks; #ifdef __HIP_PLATFORM_HCC__ // XXX HIP function signature is not compatible yet. uint32_t max_blocks; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks, k, block_threads, smem_size); max_active_blocks = max_blocks; #else cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks, k, block_threads, smem_size); #endif max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount; grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size); } inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) { uint64_t block_size = 1; uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads)); // In the vectorized case we want to trade off allowing more of the buffers to be accessed // in a vectorized way against wanting a larger block size to get better utilisation. // In general with ILP you can have (ILP-1)/ILP of the buffer accessed vectorised, at the risk // of having a very small block size. We choose to keep >= 1/2 of the buffer vectorised while // allowing a larger block size. if (ILP > 1) { max_block_size /= 2; } while (block_size < (max_block_size)) block_size *= 2; // Launch at least a single warp - the kernel assumes that. block_size = std::max(block_size, static_cast<uint64_t>(C10_WARP_SIZE)); return dim3(block_size); } template<typename T> struct Add { __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } }; template<typename T> struct Max { __device__ __forceinline__ T operator()(T a, T b) const { return a < b ? b : a; } }; // Note that it's not a complete block-wide reduction. // Only threads that share threadIdx.y reduce values. template<typename T, template<typename> class ReduceOp> __forceinline__ __device__ T spatialBlockReduceX(T *shared, T val) { ReduceOp<T> r; shared += threadIdx.y * blockDim.x; __syncthreads(); shared[threadIdx.x] = val; // NOTE: loop starts with __syncthreads() int offset = blockDim.x / 2; while (offset > 0) { __syncthreads(); if (threadIdx.x < offset) shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]); offset /= 2; } __syncthreads(); return shared[0]; } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxForward( outscalar_t *output, scalar_t *input, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; //////////////////////////////////////////////////////////// // These two blocks are really equivalent, but specializing on // blockDim.x == 1 makes the kernel faster when it's unused. // I didn't want to thread an extra template parameter, and nvcc // seems to be smart enough to hoist the if outside of the loops. //////////////////////////////////////////////////////////// if (blockDim.x > 1) { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input); accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } else { accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest(); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]); max_input = Max<accscalar_t>()(max_input, value); } accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride]) - max_input); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]); } } } } template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SpatialSoftMaxBackward( scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, uint32_t outer_size, uint32_t dim_size, uint32_t inner_size) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); const uint32_t outer_stride = inner_size * dim_size; const uint32_t dim_stride = inner_size; for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) { const uint32_t outer_offset = outer_index * outer_stride; for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) { const uint32_t data_offset = outer_offset + inner_index; // See the comment in forward kernel if (blockDim.x > 1) { accscalar_t sum = 0; for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) sum += gradOutput[data_offset + d * dim_stride]; sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } else { accscalar_t sum = 0; for (uint32_t d = 0; d < dim_size; d++) sum += gradOutput[data_offset + d * dim_stride]; Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum); for (uint32_t d = 0; d < dim_size; d++) { gradInput[data_offset + d * dim_stride] = epilogue(gradOutput[data_offset + d * dim_stride], output[data_offset + d * dim_stride]); } } } } } //////////////////////////////////////////////////////////////////////////////// // Regular kernel (fast when dim_size is large; requires inner_size == 1) //////////////////////////////////////////////////////////////////////////////// template <typename T, typename AccumT> struct MaxFloat { __device__ __forceinline__ AccumT operator()(AccumT max, T v) const { return ::max(max, (AccumT)v); } }; template<typename T, typename AccumT> struct AddFloat { __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + v; } }; template<typename T, typename AccumT> struct SumExpFloat { __device__ __forceinline__ SumExpFloat(AccumT v) : max_k(v) {} __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + std::exp(v - max_k); } const AccumT max_k; }; template <template<typename> class Reduction, typename AccumT> __device__ __forceinline__ AccumT blockReduce(AccumT* smem, AccumT val, const Reduction<AccumT>& r, AccumT defaultVal) { // To avoid RaW races from chaining blockReduce calls together, we need a sync here __syncthreads(); smem[threadIdx.x] = val; __syncthreads(); AccumT warpVal = defaultVal; // First warp will perform per-warp reductions for the remaining warps uint32_t mask = (((uint64_t)1) << (blockDim.x / C10_WARP_SIZE)) - 1; if (threadIdx.x < C10_WARP_SIZE) { int lane = threadIdx.x % C10_WARP_SIZE; if (lane < blockDim.x / C10_WARP_SIZE) { #pragma unroll for (int i = 0; i < C10_WARP_SIZE; ++i) { warpVal = r(warpVal, smem[lane * C10_WARP_SIZE + i]); } #ifndef __HIP_PLATFORM_HCC__ __syncwarp(mask); #endif smem[lane] = warpVal; } } __syncthreads(); // First thread will perform a reduction of the above per-warp reductions AccumT blockVal = defaultVal; if (threadIdx.x == 0) { for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) { blockVal = r(blockVal, smem[i]); } smem[0] = blockVal; } // Sync and broadcast __syncthreads(); return smem[0]; } template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT> __device__ __forceinline__ AccumT ilpReduce(int shift, T* data, int size, const Reduction<T, AccumT>& r, AccumT defaultVal) { using LoadT = at::native::memory::aligned_vector<T, ILP>; AccumT threadVal = defaultVal; int offset = threadIdx.x; // shift and do 1 if(shift > 0){ data -= shift; size += shift; if(threadIdx.x >= shift){ threadVal = r(threadVal, data[offset]); } size -= blockDim.x; data += blockDim.x; } int last = size % (ILP * blockDim.x); T v[ILP]; LoadT* value = reinterpret_cast<LoadT*>(&v); for (; offset * ILP < (size - last); offset += blockDim.x) { *value = reinterpret_cast<LoadT*>(data)[offset]; #pragma unroll for (int j = 0; j < ILP; ++j) { threadVal = r(threadVal, v[j]); } } offset = size - last + threadIdx.x; // Epilogue for (; offset < size; offset += blockDim.x) threadVal = r(threadVal, data[offset]); return threadVal; } /** * This will apply the Epilogue with vectorized reads & writes when input & output have the same shift */ template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __device__ __forceinline__ void WriteFpropResultsVectorized( int size, const int shift, scalar_t *input, outscalar_t *output, Epilogue<scalar_t, accum_t, outscalar_t> epilogue) { using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>; using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>; int offset = threadIdx.x; // if unaligned, do one value / thread and move on, guaranteeing aligned reads/writes later if (shift > 0) { input -= shift; output -= shift; size += shift; if (threadIdx.x >= shift) { output[offset] = epilogue(input[offset]); } size -= blockDim.x; input += blockDim.x; output += blockDim.x; } const int last = size % (ILP * blockDim.x); scalar_t in_v[ILP]; LoadT* in_value = reinterpret_cast<LoadT*>(&in_v); outscalar_t out_v[ILP]; StoreT* out_value = reinterpret_cast<StoreT*>(&out_v); for (; offset * ILP < (size - last); offset += blockDim.x) { *in_value = reinterpret_cast<LoadT*>(input)[offset]; #pragma unroll for (int j = 0; j < ILP; ++j) { out_v[j] = epilogue(in_v[j]); } reinterpret_cast<StoreT*>(output)[offset] = *out_value; } offset = size - last + threadIdx.x; // handle the tail for (; offset < size; offset += blockDim.x) { output[offset] = epilogue(input[offset]); } } template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __device__ __forceinline__ void WriteBpropResultsVectorized( int size, const int shift, scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, Epilogue<scalar_t, accum_t, outscalar_t> epilogue) { using gradInputT = at::native::memory::aligned_vector<scalar_t, ILP>; using outputT = at::native::memory::aligned_vector<outscalar_t, ILP>; int offset = threadIdx.x; // if unaligned, do one value / thread and move on, guaranteeing aligned reads/writes later if (shift > 0) { gradInput -= shift; output -= shift; gradOutput -= shift; size += shift; if (threadIdx.x >= shift) { gradInput[offset] = epilogue(gradOutput[offset], output[offset]); } size -= blockDim.x; gradInput += blockDim.x; output += blockDim.x; gradOutput += blockDim.x; } const int last = size % (ILP * blockDim.x); scalar_t dX[ILP]; gradInputT *dX_v = reinterpret_cast<gradInputT*>(&dX); outscalar_t Y[ILP]; outputT *Y_v = reinterpret_cast<outputT*>(&Y); outscalar_t dY[ILP]; outputT *dY_v = reinterpret_cast<outputT*>(&dY); for (; offset * ILP < (size - last); offset += blockDim.x) { *Y_v = reinterpret_cast<outputT*>(output)[offset]; *dY_v = reinterpret_cast<outputT*>(gradOutput)[offset]; #pragma unroll for (int j = 0; j < ILP; ++j) { dX[j] = epilogue(dY[j], Y[j]); } reinterpret_cast<gradInputT*>(gradInput)[offset] = *dX_v; } offset = size - last + threadIdx.x; for (; offset < size; offset += blockDim.x) { gradInput[offset] = epilogue(gradOutput[offset], output[offset]); } } /** * This will apply the Epilogue with non-vectrorized reads & writes for the general case */ template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __device__ __forceinline__ void WriteFpropResults( int classes, scalar_t *input, outscalar_t *output, Epilogue<scalar_t, accum_t, outscalar_t> epilogue) { int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); // Main bulk of loop with ILP for (; offset < classes - last; offset += blockDim.x * ILP) { scalar_t tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmp[j] = input[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) { output[offset + j * blockDim.x] = epilogue(tmp[j]); } } // Remainder - no ILP for (; offset < classes; offset += blockDim.x) { output[offset] = epilogue(input[offset]); } } template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __device__ __forceinline__ void WriteBpropResults( int classes, scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, Epilogue<scalar_t, accum_t, outscalar_t> epilogue) { int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { outscalar_t tmpOutput[ILP]; outscalar_t tmpGradOutput[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmpOutput[j] = output[offset + j * blockDim.x]; tmpGradOutput[j] = gradOutput[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) { gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]); } } // Remainder - no ILP for (; offset < classes; offset += blockDim.x) { gradInput[offset] = epilogue(gradOutput[offset], output[offset]); } } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes) { extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>; using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>; // forward pointers to batch[blockIdx.x] // each block handles a sample in the mini-batch input += blockIdx.x * classes; output += blockIdx.x * classes; const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t); const int output_shift = ((uint64_t)output) % ALIGN_BYTES / sizeof(outscalar_t); // find the max accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>( shift, input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); accscalar_t max_k = blockReduce<Max, accscalar_t>( sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max()); // reduce all values accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>( shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0)); accscalar_t sumAll = blockReduce<Add, accscalar_t>( sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll); if (shift == output_shift) { WriteFpropResultsVectorized<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, shift, input, output, epilogue); } else { WriteFpropResults<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, input, output, epilogue); } } template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue> __global__ void cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes) { using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>; using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>; extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); gradInput += blockIdx.x * classes; output += blockIdx.x * classes; gradOutput += blockIdx.x * classes; const int shift = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t); const int output_shift = ((uint64_t)output) % ALIGN_BYTES / sizeof(outscalar_t); const int grad_output_shift = ((uint64_t)gradOutput) % ALIGN_BYTES / sizeof(outscalar_t); accscalar_t threadSum = ilpReduce<AddFloat, ILP, outscalar_t, accscalar_t>( shift, gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0)); accscalar_t sum_k = blockReduce<Add, accscalar_t>( sdata, threadSum, Add<accscalar_t>(), accscalar_t(0)); Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k); if (shift == output_shift && shift == grad_output_shift) { WriteBpropResultsVectorized<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, shift, gradInput, output, gradOutput, epilogue); } else { WriteBpropResults<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, gradInput, output, gradOutput, epilogue); } } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){ if (half_to_float) AT_ASSERTM(input_.scalar_type() == ScalarType::Half,"conversion is supported for Half type only"); auto input = input_.contiguous(); Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (input.dim() == 0) input = input.view(1); int64_t dim = maybe_wrap_dim(dim_, input.dim()); TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions"); int64_t outer_size = 1; int64_t dim_size = input.size(dim); if (input.numel() > 0) { int64_t inner_size = 1; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); for (int64_t i = 0; i < dim; ++i) outer_size *= input.size(i); for (int64_t i = dim + 1; i < input.dim(); ++i) inner_size *= input.size(i); // This kernel spawns a block per each element in the batch. // XXX: it assumes that inner_size == 1 if (inner_size == 1) { dim3 grid(outer_size); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "host_softmax", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { constexpr int ILP = sizeof(float4) / sizeof(scalar_t); dim3 block = SoftMax_getBlockSize(ILP, dim_size); cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_forward<scalar_t, accscalar_t, accscalar_t, is_log_softmax>( output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { constexpr int ILP = sizeof(float4) / sizeof(accscalar_t); dim3 block = SoftMax_getBlockSize(ILP, dim_size); cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size ); } } }); }); // This kernel runs in a 2D grid, where each application along y dimension has a fixed // outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size. // Reductions over dim are done in a single-threaded manner. } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "host_softmax", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } }); }); } AT_CUDA_CHECK(cudaGetLastError()); } return output; } template<template<typename, typename, typename> class Epilogue, bool is_log_softmax> Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){ int64_t dim = maybe_wrap_dim(dim_, grad_.dim()); Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(grad_, LEGACY_CONTIGUOUS_MEMORY_FORMAT); if (grad_.numel() == 0) { return gI; } auto grad = grad_.contiguous(); static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float"); if (grad.dim() == 0) grad = grad.view(1); TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions"); auto output = output_.contiguous(); if (output.dim() == 0) output = output.view(1); int64_t outer_size = 1; int64_t dim_size = output.size(dim); int64_t inner_size = 1; for (int64_t i = 0; i < dim; ++i) outer_size *= output.size(i); for (int64_t i = dim + 1; i < output.dim(); ++i) inner_size *= output.size(i); // See descriptions of kernels above. cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (inner_size == 1) { dim3 grid(outer_size); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, gI.scalar_type(), "host_softmax_backward", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<scalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), dim_size, dim_size, outer_size); } else { constexpr int ILP = sizeof(float4) / sizeof(scalar_t); dim3 block = SoftMax_getBlockSize(ILP, dim_size); cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), dim_size ); } } else { if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) { dispatch_softmax_backward<accscalar_t, scalar_t, accscalar_t, is_log_softmax>( gI.data_ptr<scalar_t>(), grad.data_ptr<accscalar_t>(), output.data_ptr<accscalar_t>(), dim_size, dim_size, outer_size); } else { constexpr int ILP = sizeof(float4) / sizeof(accscalar_t); dim3 block = SoftMax_getBlockSize(ILP, dim_size); cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, block.x * sizeof(accscalar_t), stream>>>( gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), dim_size ); } } }); }); } else { uint32_t smem_size; dim3 grid, block; AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, gI.scalar_type(), "host_softmax_backward", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; if (!half_to_float) { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), outer_size, dim_size, inner_size ); } else { SpatialSoftMax_getLaunchSizes<accscalar_t>( &cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>, outer_size, dim_size, inner_size, grid, block, smem_size); cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue> <<<grid, block, smem_size, stream>>>( gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), outer_size, dim_size, inner_size ); } }); }); } AT_CUDA_CHECK(cudaGetLastError()); return gI; } } Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float); } Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } return host_softmax_backward<LogSoftMaxBackwardEpilogue,true>(grad, output, dim, half_to_float); } Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){ return host_softmax<SoftMaxForwardEpilogue,false>(input, dim, half_to_float); } Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){ bool half_to_float = grad.scalar_type() != input.scalar_type(); if (half_to_float) { AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float"); } Tensor tmp = grad * output; return host_softmax_backward<SoftMaxBackwardEpilogue,false>(tmp, output, dim, half_to_float); } } }
bb2ce0ea441ca65d94d0cfc3ac49edffb87d0da3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "add_hip.cuh" __global__ void add(int a, int b, int *c)//kernelgpu { *c = a + b; } int add(int a,int b) { int c; int *dev_c; hipMalloc((void**)&dev_c, sizeof(int));//gpu hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, a, b, dev_c);//kernel<<<1,1>>>gpu11 hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);//gpu //dev_csizeof(int)&c hipFree(dev_c);//cudaMalloc return c; } int TEST::ADD() { int c; int *dev_c; hipMalloc((void**)&dev_c, sizeof(int));//gpu hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, a, b, dev_c);//kernel<<<1,1>>>gpu11 hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);//gpu //dev_csizeof(int)&c hipFree(dev_c);//cudaMalloc return c; }
bb2ce0ea441ca65d94d0cfc3ac49edffb87d0da3.cu
#include "add.cuh" __global__ void add(int a, int b, int *c)//kernel函数,在gpu上运行。 { *c = a + b; } int add(int a,int b) { int c; int *dev_c; cudaMalloc((void**)&dev_c, sizeof(int));//分配gpu的内存,第一个参数指向新分配内存的地址,第二个参数是分配内存的大小。 add<<<1,1>>>(a, b, dev_c);//调用kernel函数,<<<1,1>>>指gpu启动1个线程块,每个线程块中有1个线程。 cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);//将gpu上的数据复制到主机上, //即从dev_c指向的存储区域中将sizeof(int)个字节复制到&c指向的存储区域。 cudaFree(dev_c);//释放cudaMalloc分配的内存。 return c; } int TEST::ADD() { int c; int *dev_c; cudaMalloc((void**)&dev_c, sizeof(int));//分配gpu的内存,第一个参数指向新分配内存的地址,第二个参数是分配内存的大小。 add<<<1,1>>>(a, b, dev_c);//调用kernel函数,<<<1,1>>>指gpu启动1个线程块,每个线程块中有1个线程。 cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);//将gpu上的数据复制到主机上, //即从dev_c指向的存储区域中将sizeof(int)个字节复制到&c指向的存储区域。 cudaFree(dev_c);//释放cudaMalloc分配的内存。 return c; }
a70070a52a9396195c6c9677276411e91584abdc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <chrono> using namespace std; void checkError(hipError_t e) { if (e != hipSuccess) { std::cerr << "CUDA error: " << int(e) << " : " << hipGetErrorString(e) << '\n'; abort(); } } // code that will run on the GPU, but can call it from the CPU __global__ void add(int n, double* x, double const* y) { int index = threadIdx.x; int stride = blockDim.x; // blockDim is the number of threads in a block for (int i = index; i < n; i += stride) { x[i] = x[i] + y[i]; } } int main() { int N = 1<<20; // pow(2,20) = 1,048,576 // allocate memory on the host double* x = new double[N]; double* y = new double[N]; // initialize arrays for (int i = 0; i < N; i++) { x[i] = 1.0; y[i] = 2.0; } // allocate memory on the device double* xDevice; double* yDevice; checkError(hipMalloc(&xDevice, N*sizeof(double))); checkError(hipMalloc(&yDevice, N*sizeof(double))); // copy memory from host to device checkError(hipMemcpy(xDevice, x, N*sizeof(double), hipMemcpyHostToDevice)); checkError(hipMemcpy(yDevice, y, N*sizeof(double), hipMemcpyHostToDevice)); auto t1 = std::chrono::high_resolution_clock::now(); // Invoke the CUDA kernel with add<<<NumberOfBlocks, NumberOfThreadsPerBlock>>> hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, xDevice, yDevice); // asynchronous checkError(hipDeviceSynchronize()); auto t2 = std::chrono::high_resolution_clock::now(); // copy memory from device back to host checkError(hipMemcpy(x, xDevice, N*sizeof(double), hipMemcpyDeviceToHost)); // check the result for (int i = 0; i < N; ++i) { if (x[i] != 3.0) { std::cerr << "error at array index " << i << " value " << x[i] << " expected 3.0\n"; std::abort(); } } auto duration = std::chrono::duration_cast<std::chrono::microseconds>(t2-t1).count(); std::cout << "Time = " << duration << " us\n"; // clean up checkError(hipFree(xDevice)); checkError(hipFree(yDevice)); delete[] x; delete[] y; }
a70070a52a9396195c6c9677276411e91584abdc.cu
#include <iostream> #include <chrono> using namespace std; void checkError(cudaError_t e) { if (e != cudaSuccess) { std::cerr << "CUDA error: " << int(e) << " : " << cudaGetErrorString(e) << '\n'; abort(); } } // code that will run on the GPU, but can call it from the CPU __global__ void add(int n, double* x, double const* y) { int index = threadIdx.x; int stride = blockDim.x; // blockDim is the number of threads in a block for (int i = index; i < n; i += stride) { x[i] = x[i] + y[i]; } } int main() { int N = 1<<20; // pow(2,20) = 1,048,576 // allocate memory on the host double* x = new double[N]; double* y = new double[N]; // initialize arrays for (int i = 0; i < N; i++) { x[i] = 1.0; y[i] = 2.0; } // allocate memory on the device double* xDevice; double* yDevice; checkError(cudaMalloc(&xDevice, N*sizeof(double))); checkError(cudaMalloc(&yDevice, N*sizeof(double))); // copy memory from host to device checkError(cudaMemcpy(xDevice, x, N*sizeof(double), cudaMemcpyHostToDevice)); checkError(cudaMemcpy(yDevice, y, N*sizeof(double), cudaMemcpyHostToDevice)); auto t1 = std::chrono::high_resolution_clock::now(); // Invoke the CUDA kernel with add<<<NumberOfBlocks, NumberOfThreadsPerBlock>>> add<<<1, 1>>>(N, xDevice, yDevice); // asynchronous checkError(cudaDeviceSynchronize()); auto t2 = std::chrono::high_resolution_clock::now(); // copy memory from device back to host checkError(cudaMemcpy(x, xDevice, N*sizeof(double), cudaMemcpyDeviceToHost)); // check the result for (int i = 0; i < N; ++i) { if (x[i] != 3.0) { std::cerr << "error at array index " << i << " value " << x[i] << " expected 3.0\n"; std::abort(); } } auto duration = std::chrono::duration_cast<std::chrono::microseconds>(t2-t1).count(); std::cout << "Time = " << duration << " us\n"; // clean up checkError(cudaFree(xDevice)); checkError(cudaFree(yDevice)); delete[] x; delete[] y; }
5a35f696d74df6842592c066c64d6cf2a463472d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hip/hip_fp16.h> #include <stdio.h> #include <algorithm> #include <cmath> #include "amir_cuda_util/cuda_util.h" #include "grid_sample.h" //// the code copy from /// https://github.com/pytorch/pytorch/blob/ec683299ebabf297a3504c76248d37be830e4342/aten/src/ATen/native/cuda/GridSampler.cuh //// and /// https://github.com/pytorch/pytorch/blob/ec683299ebabf297a3504c76248d37be830e4342/aten/src/ATen/native/cuda/GridSampler.cu namespace amirstan { namespace plugin { using namespace amirstan::cuda; // Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value, // where we view each pixel as an area between (idx - 0.5) and (idx + 0.5). // if align_corners: -1 and +1 get sent to the centers of the corner pixels // -1 --> 0 // +1 --> (size - 1) // scale_factor = (size - 1) / 2 // if not align_corners: -1 and +1 get sent to the image edges // -1 --> -0.5 // +1 --> (size - 1) + 0.5 == size - 0.5 // scale_factor = size / 2 template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_unnormalize(scalar_t coord, int size, bool align_corners) { if (align_corners) { // unnormalize coord from [-1, 1] to [0, size - 1] return ((coord + 1.f) / 2) * (size - 1); } else { // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] return ((coord + 1.f) * size - 1) / 2; } } // grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize // except that it also returns the `d output / d input` via pointer argument // `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_unnormalize_set_grad( scalar_t coord, int size, bool align_corners, scalar_t *grad_in) { if (align_corners) { // unnormalize coord from [-1, 1] to [0, size - 1] *grad_in = static_cast<scalar_t>(size - 1) / 2; return ((coord + 1.f) / 2) * (size - 1); } else { // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] *grad_in = static_cast<scalar_t>(size) / 2; return ((coord + 1.f) * size - 1) / 2; } } // Clips coordinates to between 0 and clip_limit - 1 template <typename scalar_t> static __forceinline__ __device__ scalar_t clip_coordinates(scalar_t in, int clip_limit) { return ::min(static_cast<scalar_t>(clip_limit - 1), ::max(in, static_cast<scalar_t>(0))); } // clip_coordinates_set_grad works similarly to clip_coordinates except that // it also returns the `d output / d input` via pointer argument `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ scalar_t clip_coordinates_set_grad(scalar_t in, int clip_limit, scalar_t *grad_in) { // Note that it is important for the gradient calculation that borders // are considered out of bounds. if (in <= static_cast<scalar_t>(0)) { *grad_in = static_cast<scalar_t>(0); return static_cast<scalar_t>(0); } else { scalar_t max = static_cast<scalar_t>(clip_limit - 1); if (in >= max) { *grad_in = static_cast<scalar_t>(0); return max; } else { *grad_in = static_cast<scalar_t>(1); return in; } } } // Reflects coordinates until they fall between low and high (inclusive). // The bounds are passed as twice their value so that half-integer values // can be represented as ints. template <typename scalar_t> static __forceinline__ __device__ scalar_t reflect_coordinates(scalar_t in, int twice_low, int twice_high) { if (twice_low == twice_high) { return static_cast<scalar_t>(0); } scalar_t min = static_cast<scalar_t>(twice_low) / 2; scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2; in = ::fabs(in - min); // `fmod` returns same sign as `in`, which is positive after the `fabs` above. scalar_t extra = ::fmod(in, span); int flips = static_cast<int>(::floor(in / span)); if (flips % 2 == 0) { return extra + min; } else { return span - extra + min; } } // reflect_coordinates_set_grad works similarly to reflect_coordinates except // that it also returns the `d output / d input` via pointer argument // `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ scalar_t reflect_coordinates_set_grad( scalar_t in, int twice_low, int twice_high, scalar_t *grad_in) { if (twice_low == twice_high) { *grad_in = static_cast<scalar_t>(0); return static_cast<scalar_t>(0); } int grad_in_mult_; scalar_t min = static_cast<scalar_t>(twice_low) / 2; scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2; in = in - min; if (in < static_cast<scalar_t>(0)) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } // `fmod` returns same sign as `in`, which is positive after the `if` above. scalar_t extra = ::fmod(in, span); int flips = static_cast<int>(::floor(in / span)); if (flips % 2 == 0) { *grad_in = static_cast<scalar_t>(grad_in_mult_); return extra + min; } else { *grad_in = static_cast<scalar_t>(-grad_in_mult_); return span - extra + min; } } template <typename scalar_t> static __forceinline__ __device__ scalar_t safe_downgrade_to_int_range(scalar_t x) { // -100.0 does not have special meaning. This is just to make sure // it's not within_bounds_2d or within_bounds_3d, and does not cause // undefined behavior. See #35506. if (x > INT_MAX - 1 || x < INT_MIN || !::isfinite(static_cast<double>(x))) return static_cast<scalar_t>(-100.0); return x; } // Computes the pixel source index value for a grid coordinate template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_compute_source_index( scalar_t coord, int size, GridSamplerPadding padding_mode, bool align_corners) { coord = grid_sampler_unnormalize(coord, size, align_corners); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders coord = clip_coordinates(coord, size); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders if (align_corners) { coord = reflect_coordinates(coord, 0, 2 * (size - 1)); } else { coord = reflect_coordinates(coord, -1, 2 * size - 1); } // clip coordinates to image borders coord = clip_coordinates(coord, size); } coord = safe_downgrade_to_int_range(coord); return coord; } // grid_sampler_compute_source_index_set_grad works similarly to // grid_sampler_compute_source_index except that it also returns the // `d output / d input` via pointer argument `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_compute_source_index_set_grad(scalar_t coord, int size, GridSamplerPadding padding_mode, bool align_corners, scalar_t *grad_in) { scalar_t grad_clip, grad_refl; coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders coord = clip_coordinates_set_grad(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_clip; } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders if (align_corners) { coord = reflect_coordinates_set_grad(coord, 0, 2 * (size - 1), &grad_refl); } else { coord = reflect_coordinates_set_grad(coord, -1, 2 * size - 1, &grad_refl); } // clip coordinates to image borders coord = clip_coordinates_set_grad(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_refl * grad_clip; } coord = safe_downgrade_to_int_range(coord); return coord; } static __forceinline__ __device__ bool within_bounds_2d(int h, int w, int H, int W) { return h >= 0 && h < H && w >= 0 && w < W; } static __forceinline__ __device__ bool within_bounds_3d(int d, int h, int w, int D, int H, int W) { return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; } template <typename scalar_t> static __forceinline__ __device__ void safe_add_2d(scalar_t *data, int h, int w, int sH, int sW, int H, int W, scalar_t delta) { if (within_bounds_2d(h, w, H, W)) { atomicAdd(data + h * sH + w * sW, delta); } } template <typename scalar_t> static __forceinline__ __device__ void safe_add_3d(scalar_t *data, int d, int h, int w, int sD, int sH, int sW, int D, int H, int W, scalar_t delta) { if (within_bounds_3d(d, h, w, D, H, W)) { atomicAdd(data + d * sD + h * sH + w * sW, delta); } } using amirstan::cuda::TensorSize; using amirstan::cuda::TensorStride; template <typename scalar_t> __global__ void grid_sampler_2d_kernel( const int nthreads, const scalar_t *input, const scalar_t *grid, scalar_t *output, TensorSize input_size, TensorSize gride_size, TensorStride input_stride, TensorStride grid_stride, TensorStride output_stride, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { int C = input_size.size[1]; int inp_H = input_size.size[2]; int inp_W = input_size.size[3]; int out_H = gride_size.size[1]; int out_W = gride_size.size[2]; int inp_sN = input_stride.size[0]; int inp_sC = input_stride.size[1]; int inp_sH = input_stride.size[2]; int inp_sW = input_stride.size[3]; int grid_sN = grid_stride.size[0]; int grid_sH = grid_stride.size[1]; int grid_sW = grid_stride.size[2]; int grid_sCoor = grid_stride.size[3]; int out_sN = output_stride.size[0]; int out_sC = output_stride.size[1]; int out_sH = output_stride.size[2]; int out_sW = output_stride.size[3]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid[grid_offset]; scalar_t iy = grid[grid_offset + grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(::floor(ix)); int iy_nw = static_cast<int>(::floor(iy)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCHW = output + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ix)); int iy_nearest = static_cast<int>(::round(iy)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCHW = output + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t> __global__ void grid_sampler_3d_kernel( const int nthreads, const scalar_t *input, const scalar_t *grid, scalar_t *output, TensorSize input_size, TensorSize gride_size, TensorStride input_stride, TensorStride grid_stride, TensorStride output_stride, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { int C = input_size.size[1]; int inp_D = input_size.size[2]; int inp_H = input_size.size[3]; int inp_W = input_size.size[4]; int out_D = gride_size.size[1]; int out_H = gride_size.size[2]; int out_W = gride_size.size[3]; int inp_sN = input_stride.size[0]; int inp_sC = input_stride.size[1]; int inp_sD = input_stride.size[2]; int inp_sH = input_stride.size[3]; int inp_sW = input_stride.size[4]; int grid_sN = grid_stride.size[0]; int grid_sD = grid_stride.size[1]; int grid_sH = grid_stride.size[2]; int grid_sW = grid_stride.size[3]; int grid_sCoor = grid_stride.size[4]; int out_sN = output_stride.size[0]; int out_sC = output_stride.size[1]; int out_sD = output_stride.size[2]; int out_sH = output_stride.size[3]; int out_sW = output_stride.size[4]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid[grid_offset]; scalar_t iy = grid[grid_offset + grid_sCoor]; scalar_t iz = grid[grid_offset + 2 * grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCDHW = output + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * // tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * // tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * // bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * // bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ix)); int iy_nearest = static_cast<int>(::round(iy)); int iz_nearest = static_cast<int>(::round(iz)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCDHW = output + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } void create_size_stride(const int *dims, int nb_dims, TensorSize &size, TensorStride &stride) { memcpy(&size.size[0], dims, sizeof(int) * nb_dims); stride.size[nb_dims - 1] = 1; for (int i = nb_dims - 2; i >= 0; --i) { stride.size[i] = stride.size[i + 1] * size.size[i + 1]; } } template <typename T> void grid_sample(T *output, const T *input, const T *grid, int *output_dims, int *input_dims, int *grid_dims, int nb_dims, GridSamplerInterpolation interp, GridSamplerPadding padding, bool align_corners, hipStream_t stream) { TensorSize ts_input_size; TensorStride input_stride; create_size_stride(input_dims, nb_dims, ts_input_size, input_stride); TensorSize ts_output_size; TensorStride output_stride; create_size_stride(output_dims, nb_dims, ts_output_size, output_stride); TensorSize ts_grid_size; TensorStride grid_stride; create_size_stride(grid_dims, nb_dims, ts_grid_size, grid_stride); int count = ts_input_size.size[0]; for (int i = 1; i < nb_dims - 1; ++i) { count *= ts_grid_size.size[i]; } if (nb_dims == 4) { hipLaunchKernelGGL(( grid_sampler_2d_kernel<T>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream, count, input, grid, output, ts_input_size, ts_grid_size, input_stride, grid_stride, output_stride, interp, padding, align_corners); } else if (nb_dims == 5) { hipLaunchKernelGGL(( grid_sampler_3d_kernel<T>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream, count, input, grid, output, ts_input_size, ts_grid_size, input_stride, grid_stride, output_stride, interp, padding, align_corners); } else { printf("input and grid dims should be 4 or 5\n"); } } template void grid_sample<float>(float *output, const float *input, const float *grid, int *output_dims, int *input_dims, int *grid_dims, int nb_dims, GridSamplerInterpolation interp, GridSamplerPadding padding, bool align_corners, hipStream_t stream); } // namespace plugin } // namespace amirstan
5a35f696d74df6842592c066c64d6cf2a463472d.cu
#include <cuda_fp16.h> #include <stdio.h> #include <algorithm> #include <cmath> #include "amir_cuda_util/cuda_util.h" #include "grid_sample.h" //// the code copy from /// https://github.com/pytorch/pytorch/blob/ec683299ebabf297a3504c76248d37be830e4342/aten/src/ATen/native/cuda/GridSampler.cuh //// and /// https://github.com/pytorch/pytorch/blob/ec683299ebabf297a3504c76248d37be830e4342/aten/src/ATen/native/cuda/GridSampler.cu namespace amirstan { namespace plugin { using namespace amirstan::cuda; // Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value, // where we view each pixel as an area between (idx - 0.5) and (idx + 0.5). // if align_corners: -1 and +1 get sent to the centers of the corner pixels // -1 --> 0 // +1 --> (size - 1) // scale_factor = (size - 1) / 2 // if not align_corners: -1 and +1 get sent to the image edges // -1 --> -0.5 // +1 --> (size - 1) + 0.5 == size - 0.5 // scale_factor = size / 2 template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_unnormalize(scalar_t coord, int size, bool align_corners) { if (align_corners) { // unnormalize coord from [-1, 1] to [0, size - 1] return ((coord + 1.f) / 2) * (size - 1); } else { // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] return ((coord + 1.f) * size - 1) / 2; } } // grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize // except that it also returns the `d output / d input` via pointer argument // `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_unnormalize_set_grad( scalar_t coord, int size, bool align_corners, scalar_t *grad_in) { if (align_corners) { // unnormalize coord from [-1, 1] to [0, size - 1] *grad_in = static_cast<scalar_t>(size - 1) / 2; return ((coord + 1.f) / 2) * (size - 1); } else { // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] *grad_in = static_cast<scalar_t>(size) / 2; return ((coord + 1.f) * size - 1) / 2; } } // Clips coordinates to between 0 and clip_limit - 1 template <typename scalar_t> static __forceinline__ __device__ scalar_t clip_coordinates(scalar_t in, int clip_limit) { return ::min(static_cast<scalar_t>(clip_limit - 1), ::max(in, static_cast<scalar_t>(0))); } // clip_coordinates_set_grad works similarly to clip_coordinates except that // it also returns the `d output / d input` via pointer argument `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ scalar_t clip_coordinates_set_grad(scalar_t in, int clip_limit, scalar_t *grad_in) { // Note that it is important for the gradient calculation that borders // are considered out of bounds. if (in <= static_cast<scalar_t>(0)) { *grad_in = static_cast<scalar_t>(0); return static_cast<scalar_t>(0); } else { scalar_t max = static_cast<scalar_t>(clip_limit - 1); if (in >= max) { *grad_in = static_cast<scalar_t>(0); return max; } else { *grad_in = static_cast<scalar_t>(1); return in; } } } // Reflects coordinates until they fall between low and high (inclusive). // The bounds are passed as twice their value so that half-integer values // can be represented as ints. template <typename scalar_t> static __forceinline__ __device__ scalar_t reflect_coordinates(scalar_t in, int twice_low, int twice_high) { if (twice_low == twice_high) { return static_cast<scalar_t>(0); } scalar_t min = static_cast<scalar_t>(twice_low) / 2; scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2; in = ::fabs(in - min); // `fmod` returns same sign as `in`, which is positive after the `fabs` above. scalar_t extra = ::fmod(in, span); int flips = static_cast<int>(::floor(in / span)); if (flips % 2 == 0) { return extra + min; } else { return span - extra + min; } } // reflect_coordinates_set_grad works similarly to reflect_coordinates except // that it also returns the `d output / d input` via pointer argument // `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ scalar_t reflect_coordinates_set_grad( scalar_t in, int twice_low, int twice_high, scalar_t *grad_in) { if (twice_low == twice_high) { *grad_in = static_cast<scalar_t>(0); return static_cast<scalar_t>(0); } int grad_in_mult_; scalar_t min = static_cast<scalar_t>(twice_low) / 2; scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2; in = in - min; if (in < static_cast<scalar_t>(0)) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } // `fmod` returns same sign as `in`, which is positive after the `if` above. scalar_t extra = ::fmod(in, span); int flips = static_cast<int>(::floor(in / span)); if (flips % 2 == 0) { *grad_in = static_cast<scalar_t>(grad_in_mult_); return extra + min; } else { *grad_in = static_cast<scalar_t>(-grad_in_mult_); return span - extra + min; } } template <typename scalar_t> static __forceinline__ __device__ scalar_t safe_downgrade_to_int_range(scalar_t x) { // -100.0 does not have special meaning. This is just to make sure // it's not within_bounds_2d or within_bounds_3d, and does not cause // undefined behavior. See #35506. if (x > INT_MAX - 1 || x < INT_MIN || !::isfinite(static_cast<double>(x))) return static_cast<scalar_t>(-100.0); return x; } // Computes the pixel source index value for a grid coordinate template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_compute_source_index( scalar_t coord, int size, GridSamplerPadding padding_mode, bool align_corners) { coord = grid_sampler_unnormalize(coord, size, align_corners); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders coord = clip_coordinates(coord, size); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders if (align_corners) { coord = reflect_coordinates(coord, 0, 2 * (size - 1)); } else { coord = reflect_coordinates(coord, -1, 2 * size - 1); } // clip coordinates to image borders coord = clip_coordinates(coord, size); } coord = safe_downgrade_to_int_range(coord); return coord; } // grid_sampler_compute_source_index_set_grad works similarly to // grid_sampler_compute_source_index except that it also returns the // `d output / d input` via pointer argument `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_compute_source_index_set_grad(scalar_t coord, int size, GridSamplerPadding padding_mode, bool align_corners, scalar_t *grad_in) { scalar_t grad_clip, grad_refl; coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders coord = clip_coordinates_set_grad(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_clip; } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders if (align_corners) { coord = reflect_coordinates_set_grad(coord, 0, 2 * (size - 1), &grad_refl); } else { coord = reflect_coordinates_set_grad(coord, -1, 2 * size - 1, &grad_refl); } // clip coordinates to image borders coord = clip_coordinates_set_grad(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_refl * grad_clip; } coord = safe_downgrade_to_int_range(coord); return coord; } static __forceinline__ __device__ bool within_bounds_2d(int h, int w, int H, int W) { return h >= 0 && h < H && w >= 0 && w < W; } static __forceinline__ __device__ bool within_bounds_3d(int d, int h, int w, int D, int H, int W) { return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; } template <typename scalar_t> static __forceinline__ __device__ void safe_add_2d(scalar_t *data, int h, int w, int sH, int sW, int H, int W, scalar_t delta) { if (within_bounds_2d(h, w, H, W)) { atomicAdd(data + h * sH + w * sW, delta); } } template <typename scalar_t> static __forceinline__ __device__ void safe_add_3d(scalar_t *data, int d, int h, int w, int sD, int sH, int sW, int D, int H, int W, scalar_t delta) { if (within_bounds_3d(d, h, w, D, H, W)) { atomicAdd(data + d * sD + h * sH + w * sW, delta); } } using amirstan::cuda::TensorSize; using amirstan::cuda::TensorStride; template <typename scalar_t> __global__ void grid_sampler_2d_kernel( const int nthreads, const scalar_t *input, const scalar_t *grid, scalar_t *output, TensorSize input_size, TensorSize gride_size, TensorStride input_stride, TensorStride grid_stride, TensorStride output_stride, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { int C = input_size.size[1]; int inp_H = input_size.size[2]; int inp_W = input_size.size[3]; int out_H = gride_size.size[1]; int out_W = gride_size.size[2]; int inp_sN = input_stride.size[0]; int inp_sC = input_stride.size[1]; int inp_sH = input_stride.size[2]; int inp_sW = input_stride.size[3]; int grid_sN = grid_stride.size[0]; int grid_sH = grid_stride.size[1]; int grid_sW = grid_stride.size[2]; int grid_sCoor = grid_stride.size[3]; int out_sN = output_stride.size[0]; int out_sC = output_stride.size[1]; int out_sH = output_stride.size[2]; int out_sW = output_stride.size[3]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid[grid_offset]; scalar_t iy = grid[grid_offset + grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(::floor(ix)); int iy_nw = static_cast<int>(::floor(iy)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCHW = output + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ix)); int iy_nearest = static_cast<int>(::round(iy)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCHW = output + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t> __global__ void grid_sampler_3d_kernel( const int nthreads, const scalar_t *input, const scalar_t *grid, scalar_t *output, TensorSize input_size, TensorSize gride_size, TensorStride input_stride, TensorStride grid_stride, TensorStride output_stride, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { int C = input_size.size[1]; int inp_D = input_size.size[2]; int inp_H = input_size.size[3]; int inp_W = input_size.size[4]; int out_D = gride_size.size[1]; int out_H = gride_size.size[2]; int out_W = gride_size.size[3]; int inp_sN = input_stride.size[0]; int inp_sC = input_stride.size[1]; int inp_sD = input_stride.size[2]; int inp_sH = input_stride.size[3]; int inp_sW = input_stride.size[4]; int grid_sN = grid_stride.size[0]; int grid_sD = grid_stride.size[1]; int grid_sH = grid_stride.size[2]; int grid_sW = grid_stride.size[3]; int grid_sCoor = grid_stride.size[4]; int out_sN = output_stride.size[0]; int out_sC = output_stride.size[1]; int out_sD = output_stride.size[2]; int out_sH = output_stride.size[3]; int out_sW = output_stride.size[4]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid[grid_offset]; scalar_t iy = grid[grid_offset + grid_sCoor]; scalar_t iz = grid[grid_offset + 2 * grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCDHW = output + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * // tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * // tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * // bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * // bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ix)); int iy_nearest = static_cast<int>(::round(iy)); int iz_nearest = static_cast<int>(::round(iz)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCDHW = output + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } void create_size_stride(const int *dims, int nb_dims, TensorSize &size, TensorStride &stride) { memcpy(&size.size[0], dims, sizeof(int) * nb_dims); stride.size[nb_dims - 1] = 1; for (int i = nb_dims - 2; i >= 0; --i) { stride.size[i] = stride.size[i + 1] * size.size[i + 1]; } } template <typename T> void grid_sample(T *output, const T *input, const T *grid, int *output_dims, int *input_dims, int *grid_dims, int nb_dims, GridSamplerInterpolation interp, GridSamplerPadding padding, bool align_corners, cudaStream_t stream) { TensorSize ts_input_size; TensorStride input_stride; create_size_stride(input_dims, nb_dims, ts_input_size, input_stride); TensorSize ts_output_size; TensorStride output_stride; create_size_stride(output_dims, nb_dims, ts_output_size, output_stride); TensorSize ts_grid_size; TensorStride grid_stride; create_size_stride(grid_dims, nb_dims, ts_grid_size, grid_stride); int count = ts_input_size.size[0]; for (int i = 1; i < nb_dims - 1; ++i) { count *= ts_grid_size.size[i]; } if (nb_dims == 4) { grid_sampler_2d_kernel<T> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>( count, input, grid, output, ts_input_size, ts_grid_size, input_stride, grid_stride, output_stride, interp, padding, align_corners); } else if (nb_dims == 5) { grid_sampler_3d_kernel<T> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>( count, input, grid, output, ts_input_size, ts_grid_size, input_stride, grid_stride, output_stride, interp, padding, align_corners); } else { printf("input and grid dims should be 4 or 5\n"); } } template void grid_sample<float>(float *output, const float *input, const float *grid, int *output_dims, int *input_dims, int *grid_dims, int nb_dims, GridSamplerInterpolation interp, GridSamplerPadding padding, bool align_corners, cudaStream_t stream); } // namespace plugin } // namespace amirstan
11d868a296be3314a40e6c063698a963307ef202.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void prime( int *a, int *b, int *c ) { int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (tid < vector_size){ c[tid] = a[tid] + b[tid]; // add vectors together } }
11d868a296be3314a40e6c063698a963307ef202.cu
#include "includes.h" __global__ void prime( int *a, int *b, int *c ) { int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (tid < vector_size){ c[tid] = a[tid] + b[tid]; // add vectors together } }
ee430137baf5fc6e9e7dac30ec7f2067f584e80f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Intro to GPU Programming/ / Module 7 Assignment // // #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <stdint.h> #include <assert.h> #include <time.h> #include <math.h> static const uint32_t DEFAULT_NUM_BLOCKS = 2048; .... static const int DEFAULT_SEED = -1; static float MIN_VAL = -10000.0; // Print usage information static void usage(){ printf("Usage: ./assignment7 [-t <num_threads>] [-b <num_blocks>] [-s ] [-i ] [-p] [-h]\n"); printf("\t-t: Specify the number of threads. <num_threads> must be greater than 0. Optional (default %u)\n", DEFAULT_NUM_THREADS); printf("\t-b: Specify the number of blocks. <num_blocks> must be greater than 0. Optional (default %u)\n", DEFAULT_NUM_BLOCKS); printf("\t-s: Specify a seed for the random number generator. must be greater or equal to zero. Optional (default is random)\n"); printf("\t-i: Specify the number of kernel iterations for benchmarking. Optional (default is %u)\n", DEFAULT_NUM_ITERATIONS); .... } // Structure that holds program arguments specifying number of threads/blocks // to use. typedef struct { uint32_t num_threads; uint32_t num_blocks; int seed; .... } Arguments; static void check_arg(const int value, const int c){ if (value <= 0) { printf("Error: invalid value (%d) for arg (%c). Must be positive\n", value, c); exit(-1); } } // Parse the command line arguments using getopt and return an Argument structure // GetOpt requies the POSIX C Library static Arguments parse_arguments(const int argc, char ** argv){ // Argument format string for getopt static const char * _ARG_STR = "ht:b:s:i:"; // Initialize arguments to their default values Arguments args; args.num_threads = DEFAULT_NUM_THREADS; args.num_blocks = DEFAULT_NUM_BLOCKS; args.seed = DEFAULT_SEED; args.iterations = DEFAULT_NUM_ITERATIONS; // Parse any command line options int c; int value; while ((c = getopt(argc, argv, _ARG_STR)) != -1) { switch (c) { case 't': value = atoi(optarg); ... break; case 'b': // Normal argument value = atoi(optarg); check_arg(value, c); ... break; case 'h': // 'help': print usage, then exit // note the fall through usage(); default: exit(-1); } } return args; } static float rand_float(){ const float scale = rand() / (float) RAND_MAX; return MIN_VAL + scale * (MAX_VAL - MIN_VAL); } static float * alloc_vector(const uint32_t len, const int pinned){ void * vec; if (pinned) { .... } else { vec = malloc(len * sizeof(float)); } return (float *) vec; } static void free_vector(float * vec, int pinned){ if (pinned) { hipHostFree(vec); } else { free(vec); } } // Initialize two random vector of floats in the range (-100, 100) of length len. // If seed > 0, use it to seed the random number generator. static void init_random_vectors(float ** a, float ** result, const int seed, const uint32_t len, const int pinned){ if (seed >= 0) { srand(seed); } float * vecA = alloc_vector(len, pinned); assert(vecA != NULL); float * vecResult = alloc_vector(len, pinned); assert(vecResult != NULL); for (uint32_t i = 0; i < len; i++) { ... } *a = vecA; *result = vecResult; } // calculate square root of each element using fast-inverse square root method __global__ void fast_sqrt(const float * a, float * output){ const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float in = a[idx]; int32_t i; float x2, y; const float threehalves = 1.5f; x2 = in * 0.5; y = in; i = *((int32_t *) &in); i = 0x5f3759df - (i >> 1); y = *((float *) &i); y = y * (threehalves - (x2 * y * y)); y = y * (threehalves - (x2 * y * y)); output[idx] = 1.0 / y; } // Make sure results match within some error (1%) static void check_result(const float * const a, const float * const result, const uint32_t len){ for (uint32_t i = 0; i < len; i++) { const float cpu_result = sqrt(a[i]); const float epsilon = fabs(cpu_result * .01); if (fabs(cpu_result - result[i]) < 0.0001 ) { // special case when results are very, very small continue; } if (fabs(cpu_result - result[i]) > epsilon) { printf("Error: CPU and GPU results do not agree at index %u (cpu = %f, gpu = %f)\n", i, cpu_result, result[i]); return; } } } // Allocate cuda memory, generate the random vectors, and run the cuda kernel static void run_cuda(Arguments args){ float elapsed_s = 0.0; float milliseconds = 0.0; hipStream_t streams[NUM_STREAMS]; hipEvent_t start, stop; // Create events hipEventCreate(&start); hipEventCreate(&stop); // Create streams for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { hipStreamCreate(streams + snum); } for (uint32_t i = 0; i < args.iterations; i++) { // Allocate Host memory float * a = NULL; float * result; init_random_vectors(&a, &result, args.seed, NUM_STREAMS * args.num_threads, args.pinned); // Allocate GPU Memory float * gpu_a = NULL; float * gpu_result = NULL; uint32_t array_len_bytes = NUM_STREAMS * args.num_threads * sizeof(float); hipMalloc((void **) &gpu_a, array_len_bytes); hipMalloc((void **) &gpu_result, array_len_bytes); // Start execution time measurement hipEventRecord(start); const uint32_t threads_per_block = args.num_threads / args.num_blocks; if (args.depth_first_exeuction) { for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { ... } } else { for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { ... } for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { const int offset = snum * args.num_threads; hipLaunchKernelGGL(( fast_sqrt), dim3(args.num_blocks), dim3(threads_per_block), 0, streams[snum], gpu_a + offset, gpu_result + offset); } for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { ... } } hipEventRecord(stop); // Calculate execution time hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); ... printf("\nRan %u iterations\n", args.iterations); printf("Average execution time of %f seconds while using %s-first execution\n", avg_elapsed, args.depth_first_exeuction? "depth" : "breadth"); hipEventDestroy(start); hipEventDestroy(stop); for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { hipStreamDestroy(streams[snum]); } } int main(int argc, char ** argv) { Arguments args = parse_arguments(argc, argv); printf("Num Threads: %u, Num Blocks: %u\n", args.num_threads, args.num_blocks); // always use pinned memory for streams ... run_cuda(args); return EXIT_SUCCESS; }
ee430137baf5fc6e9e7dac30ec7f2067f584e80f.cu
// Intro to GPU Programming/ / Module 7 Assignment // // #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <stdint.h> #include <assert.h> #include <time.h> #include <math.h> static const uint32_t DEFAULT_NUM_BLOCKS = 2048; .... static const int DEFAULT_SEED = -1; static float MIN_VAL = -10000.0; // Print usage information static void usage(){ printf("Usage: ./assignment7 [-t <num_threads>] [-b <num_blocks>] [-s ] [-i ] [-p] [-h]\n"); printf("\t-t: Specify the number of threads. <num_threads> must be greater than 0. Optional (default %u)\n", DEFAULT_NUM_THREADS); printf("\t-b: Specify the number of blocks. <num_blocks> must be greater than 0. Optional (default %u)\n", DEFAULT_NUM_BLOCKS); printf("\t-s: Specify a seed for the random number generator. must be greater or equal to zero. Optional (default is random)\n"); printf("\t-i: Specify the number of kernel iterations for benchmarking. Optional (default is %u)\n", DEFAULT_NUM_ITERATIONS); .... } // Structure that holds program arguments specifying number of threads/blocks // to use. typedef struct { uint32_t num_threads; uint32_t num_blocks; int seed; .... } Arguments; static void check_arg(const int value, const int c){ if (value <= 0) { printf("Error: invalid value (%d) for arg (%c). Must be positive\n", value, c); exit(-1); } } // Parse the command line arguments using getopt and return an Argument structure // GetOpt requies the POSIX C Library static Arguments parse_arguments(const int argc, char ** argv){ // Argument format string for getopt static const char * _ARG_STR = "ht:b:s:i:"; // Initialize arguments to their default values Arguments args; args.num_threads = DEFAULT_NUM_THREADS; args.num_blocks = DEFAULT_NUM_BLOCKS; args.seed = DEFAULT_SEED; args.iterations = DEFAULT_NUM_ITERATIONS; // Parse any command line options int c; int value; while ((c = getopt(argc, argv, _ARG_STR)) != -1) { switch (c) { case 't': value = atoi(optarg); ... break; case 'b': // Normal argument value = atoi(optarg); check_arg(value, c); ... break; case 'h': // 'help': print usage, then exit // note the fall through usage(); default: exit(-1); } } return args; } static float rand_float(){ const float scale = rand() / (float) RAND_MAX; return MIN_VAL + scale * (MAX_VAL - MIN_VAL); } static float * alloc_vector(const uint32_t len, const int pinned){ void * vec; if (pinned) { .... } else { vec = malloc(len * sizeof(float)); } return (float *) vec; } static void free_vector(float * vec, int pinned){ if (pinned) { cudaFreeHost(vec); } else { free(vec); } } // Initialize two random vector of floats in the range (-100, 100) of length len. // If seed > 0, use it to seed the random number generator. static void init_random_vectors(float ** a, float ** result, const int seed, const uint32_t len, const int pinned){ if (seed >= 0) { srand(seed); } float * vecA = alloc_vector(len, pinned); assert(vecA != NULL); float * vecResult = alloc_vector(len, pinned); assert(vecResult != NULL); for (uint32_t i = 0; i < len; i++) { ... } *a = vecA; *result = vecResult; } // calculate square root of each element using fast-inverse square root method __global__ void fast_sqrt(const float * a, float * output){ const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float in = a[idx]; int32_t i; float x2, y; const float threehalves = 1.5f; x2 = in * 0.5; y = in; i = *((int32_t *) &in); i = 0x5f3759df - (i >> 1); y = *((float *) &i); y = y * (threehalves - (x2 * y * y)); y = y * (threehalves - (x2 * y * y)); output[idx] = 1.0 / y; } // Make sure results match within some error (1%) static void check_result(const float * const a, const float * const result, const uint32_t len){ for (uint32_t i = 0; i < len; i++) { const float cpu_result = sqrt(a[i]); const float epsilon = fabs(cpu_result * .01); if (fabs(cpu_result - result[i]) < 0.0001 ) { // special case when results are very, very small continue; } if (fabs(cpu_result - result[i]) > epsilon) { printf("Error: CPU and GPU results do not agree at index %u (cpu = %f, gpu = %f)\n", i, cpu_result, result[i]); return; } } } // Allocate cuda memory, generate the random vectors, and run the cuda kernel static void run_cuda(Arguments args){ float elapsed_s = 0.0; float milliseconds = 0.0; cudaStream_t streams[NUM_STREAMS]; cudaEvent_t start, stop; // Create events cudaEventCreate(&start); cudaEventCreate(&stop); // Create streams for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { cudaStreamCreate(streams + snum); } for (uint32_t i = 0; i < args.iterations; i++) { // Allocate Host memory float * a = NULL; float * result; init_random_vectors(&a, &result, args.seed, NUM_STREAMS * args.num_threads, args.pinned); // Allocate GPU Memory float * gpu_a = NULL; float * gpu_result = NULL; uint32_t array_len_bytes = NUM_STREAMS * args.num_threads * sizeof(float); cudaMalloc((void **) &gpu_a, array_len_bytes); cudaMalloc((void **) &gpu_result, array_len_bytes); // Start execution time measurement cudaEventRecord(start); const uint32_t threads_per_block = args.num_threads / args.num_blocks; if (args.depth_first_exeuction) { for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { ... } } else { for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { ... } for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { const int offset = snum * args.num_threads; fast_sqrt<<<args.num_blocks, threads_per_block, 0, streams[snum]>>>(gpu_a + offset, gpu_result + offset); } for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { ... } } cudaEventRecord(stop); // Calculate execution time cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); ... printf("\nRan %u iterations\n", args.iterations); printf("Average execution time of %f seconds while using %s-first execution\n", avg_elapsed, args.depth_first_exeuction? "depth" : "breadth"); cudaEventDestroy(start); cudaEventDestroy(stop); for (uint8_t snum = 0; snum < NUM_STREAMS; snum++) { cudaStreamDestroy(streams[snum]); } } int main(int argc, char ** argv) { Arguments args = parse_arguments(argc, argv); printf("Num Threads: %u, Num Blocks: %u\n", args.num_threads, args.num_blocks); // always use pinned memory for streams ... run_cuda(args); return EXIT_SUCCESS; }
fa7a206ae9131daa471b6eb6fffdd9815c226b0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <launch_kernel.cuh> #include <comm_quda.h> #include <unitarization_links.h> #include <pgauge_monte.h> #include <random_quda.h> #include <hipcub/hipcub.hpp> #include <index_helper.cuh> #ifndef PI #define PI 3.1415926535897932384626433832795 // pi #endif #ifndef PII #define PII 6.2831853071795864769252867665590 // 2 * pi #endif namespace quda { #ifdef GPU_GAUGE_ALG template <typename Gauge> struct InitGaugeColdArg { int threads; // number of active threads required int X[4]; // grid dimensions Gauge dataOr; InitGaugeColdArg(const Gauge &dataOr, const cudaGaugeField &data) : dataOr(dataOr) { for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; threads = X[0] * X[1] * X[2] * X[3]; } }; template<typename Float, typename Gauge, int NCOLORS> __global__ void compute_InitGauge_ColdStart(InitGaugeColdArg<Gauge> arg){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= arg.threads ) return; int parity = 0; if ( idx >= arg.threads / 2 ) { parity = 1; idx -= arg.threads / 2; } Matrix<complex<Float>,NCOLORS> U; setIdentity(&U); for ( int d = 0; d < 4; d++ ) arg.dataOr.save((Float*)(U.data),idx, d, parity); } template<typename Float, typename Gauge, int NCOLORS> class InitGaugeCold : Tunable { InitGaugeColdArg<Gauge> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: InitGaugeCold(InitGaugeColdArg<Gauge> &arg) : arg(arg) { } ~InitGaugeCold () { } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); compute_InitGauge_ColdStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg); //hipDeviceSynchronize(); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { return 0; } // Only correct if there is no link reconstruction, no cub reduction accounted also long long bytes() const { return 0; } //no accounting the reduction!!!! }; template<typename Float, int NCOLORS, typename Gauge> void InitGaugeField( Gauge dataOr, cudaGaugeField& data) { InitGaugeColdArg<Gauge> initarg(dataOr, data); InitGaugeCold<Float, Gauge, NCOLORS> init(initarg); init.apply(0); checkCudaError(); } template<typename Float> void InitGaugeField( cudaGaugeField& data) { if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } /** @brief Perform a cold start to the gauge field, identity SU(3) matrix, also fills the ghost links in multi-GPU case (no need to exchange data) * * @param[in,out] data Gauge field */ void InitGaugeField( cudaGaugeField& data) { if ( data.Precision() == QUDA_SINGLE_PRECISION ) { InitGaugeField<float> (data); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { InitGaugeField<double>(data); } else { errorQuda("Precision %d not supported", data.Precision()); } } template <typename Gauge> struct InitGaugeHotArg { int threads; // number of active threads required int X[4]; // grid dimensions RNG rngstate; #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; InitGaugeHotArg(const Gauge &dataOr, const cudaGaugeField &data, RNG &rngstate) : dataOr(dataOr), rngstate(rngstate) { #ifdef MULTI_GPU for ( int dir = 0; dir < 4; ++dir ) { border[dir] = data.R()[dir]; X[dir] = data.X()[dir] - border[dir] * 2; } #else for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; #endif //the optimal number of RNG states in rngstate array must be equal to half the lattice volume //this number is the same used in heatbath... threads = X[0] * X[1] * X[2] * X[3] >> 1; } }; template <typename Float> __host__ __device__ static inline void reunit_link( Matrix<complex<Float>,3> &U ){ complex<Float> t2((Float)0.0, (Float)0.0); Float t1 = 0.0; //first normalize first row //sum of squares of row #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += norm(U(0,c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(0,c) *= t1; //6 #pragma unroll for ( int c = 0; c < 3; c++ ) t2 += conj(U(0,c)) * U(1,c); //24 #pragma unroll for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c); //24 //normalize second row //sum of squares of row t1 = 0.0; #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += norm(U(1,c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(1, c) *= t1; //6 //Reconstruct lat row U(2,0) = conj(U(0,1) * U(1,2) - U(0,2) * U(1,1)); U(2,1) = conj(U(0,2) * U(1,0) - U(0,0) * U(1,2)); U(2,2) = conj(U(0,0) * U(1,1) - U(0,1) * U(1,0)); //42 //T=130 } /** @brief Generate the four random real elements of the SU(2) matrix @param localstate CURAND rng state @return four real numbers of the SU(2) matrix */ template <class T> __device__ static inline Matrix<T,2> randomSU2(cuRNGState& localState){ Matrix<T,2> a; T aabs, ctheta, stheta, phi; a(0,0) = Random<T>(localState, (T)-1.0, (T)1.0); aabs = sqrt( 1.0 - a(0,0) * a(0,0)); ctheta = Random<T>(localState, (T)-1.0, (T)1.0); phi = PII * Random<T>(localState); stheta = ( hiprand(&localState) & 1 ? 1 : -1 ) * sqrt( (T)1.0 - ctheta * ctheta ); a(0,1) = aabs * stheta * cos( phi ); a(1,0) = aabs * stheta * sin( phi ); a(1,1) = aabs * ctheta; return a; } /** @brief Update the SU(Nc) link with the new SU(2) matrix, link <- u * link @param u SU(2) matrix represented by four real numbers @param link SU(Nc) matrix @param id indices */ template <class T, int NCOLORS> __host__ __device__ static inline void mul_block_sun( Matrix<T,2> u, Matrix<complex<T>,NCOLORS> &link, int2 id ){ for ( int j = 0; j < NCOLORS; j++ ) { complex<T> tmp = complex<T>( u(0,0), u(1,1) ) * link(id.x, j) + complex<T>( u(1,0), u(0,1) ) * link(id.y, j); link(id.y, j) = complex<T>(-u(1,0), u(0,1) ) * link(id.x, j) + complex<T>( u(0,0),-u(1,1) ) * link(id.y, j); link(id.x, j) = tmp; } } /** @brief Calculate the SU(2) index block in the SU(Nc) matrix @param block number to calculate the index's, the total number of blocks is NCOLORS * ( NCOLORS - 1) / 2. @return Returns two index's in int2 type, accessed by .x and .y. */ template<int NCOLORS> __host__ __device__ static inline int2 IndexBlock(int block){ int2 id; int i1; int found = 0; int del_i = 0; int index = -1; while ( del_i < (NCOLORS - 1) && found == 0 ) { del_i++; for ( i1 = 0; i1 < (NCOLORS - del_i); i1++ ) { index++; if ( index == block ) { found = 1; break; } } } id.y = i1 + del_i; id.x = i1; return id; } /** @brief Generate a SU(Nc) random matrix @param localstate CURAND rng state @return SU(Nc) matrix */ template <class Float, int NCOLORS> __device__ inline Matrix<complex<Float>,NCOLORS> randomize( cuRNGState& localState ){ Matrix<complex<Float>,NCOLORS> U; for ( int i = 0; i < NCOLORS; i++ ) for ( int j = 0; j < NCOLORS; j++ ) U(i,j) = complex<Float>( (Float)(Random<Float>(localState) - 0.5), (Float)(Random<Float>(localState) - 0.5) ); reunit_link<Float>(U); return U; /*setIdentity(&U); for( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) { Matrix<Float,2> rr = randomSU2<Float>(localState); int2 id = IndexBlock<NCOLORS>( block ); mul_block_sun<Float, NCOLORS>(rr, U, id); //U = block_su2_to_su3<Float>( U, a00, a01, a10, a11, block ); } return U;*/ } template<typename Float, typename Gauge, int NCOLORS> __global__ void compute_InitGauge_HotStart(InitGaugeHotArg<Gauge> arg){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= arg.threads ) return; #ifdef MULTI_GPU int X[4], x[4]; for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; for ( int dr = 0; dr < 4; ++dr ) X[dr] += 2 * arg.border[dr]; int id = idx; cuRNGState localState = arg.rngstate.State()[ id ]; #else cuRNGState localState = arg.rngstate.State()[ idx ]; #endif for ( int parity = 0; parity < 2; parity++ ) { #ifdef MULTI_GPU getCoords(x, id, arg.X, parity); for ( int dr = 0; dr < 4; ++dr ) x[dr] += arg.border[dr]; idx = linkIndex(x,X); #endif for ( int d = 0; d < 4; d++ ) { Matrix<complex<Float>,NCOLORS> U; U = randomize<Float, NCOLORS>(localState); arg.dataOr.save((Float*)(U.data),idx, d, parity); } } #ifdef MULTI_GPU arg.rngstate.State()[ id ] = localState; #else arg.rngstate.State()[ idx ] = localState; #endif } template<typename Float, typename Gauge, int NCOLORS> class InitGaugeHot : Tunable { InitGaugeHotArg<Gauge> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: InitGaugeHot(InitGaugeHotArg<Gauge> &arg) : arg(arg) { } ~InitGaugeHot () { } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); compute_InitGauge_HotStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg); //hipDeviceSynchronize(); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lud", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } void preTune(){ arg.rngstate.backup(); } void postTune(){ arg.rngstate.restore(); } long long flops() const { return 0; } // Only correct if there is no link reconstruction, no cub reduction accounted also long long bytes() const { return 0; } //no accounting the reduction!!!! }; template<typename Float, int NCOLORS, typename Gauge> void InitGaugeField( Gauge dataOr, cudaGaugeField& data, RNG &rngstate) { InitGaugeHotArg<Gauge> initarg(dataOr, data, rngstate); InitGaugeHot<Float, Gauge, NCOLORS> init(initarg); init.apply(0); checkCudaError(); qudaDeviceSynchronize(); data.exchangeExtendedGhost(data.R(),false); } template<typename Float> void InitGaugeField( cudaGaugeField& data, RNG &rngstate) { if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } #endif // GPU_GAUGE_ALG /** @brief Perform a hot start to the gauge field, random SU(3) matrix, followed by reunitarization, also exchange borders links in multi-GPU case. * * @param[in,out] data Gauge field * @param[in,out] rngstate state of the CURAND random number generator */ void InitGaugeField( cudaGaugeField& data, RNG &rngstate) { #ifdef GPU_GAUGE_ALG if ( data.Precision() == QUDA_SINGLE_PRECISION ) { InitGaugeField<float> (data, rngstate); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { InitGaugeField<double>(data, rngstate); } else { errorQuda("Precision %d not supported", data.Precision()); } #else errorQuda("Pure gauge code has not been built"); #endif } }
fa7a206ae9131daa471b6eb6fffdd9815c226b0b.cu
#include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <launch_kernel.cuh> #include <comm_quda.h> #include <unitarization_links.h> #include <pgauge_monte.h> #include <random_quda.h> #include <cub/cub.cuh> #include <index_helper.cuh> #ifndef PI #define PI 3.1415926535897932384626433832795 // pi #endif #ifndef PII #define PII 6.2831853071795864769252867665590 // 2 * pi #endif namespace quda { #ifdef GPU_GAUGE_ALG template <typename Gauge> struct InitGaugeColdArg { int threads; // number of active threads required int X[4]; // grid dimensions Gauge dataOr; InitGaugeColdArg(const Gauge &dataOr, const cudaGaugeField &data) : dataOr(dataOr) { for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; threads = X[0] * X[1] * X[2] * X[3]; } }; template<typename Float, typename Gauge, int NCOLORS> __global__ void compute_InitGauge_ColdStart(InitGaugeColdArg<Gauge> arg){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= arg.threads ) return; int parity = 0; if ( idx >= arg.threads / 2 ) { parity = 1; idx -= arg.threads / 2; } Matrix<complex<Float>,NCOLORS> U; setIdentity(&U); for ( int d = 0; d < 4; d++ ) arg.dataOr.save((Float*)(U.data),idx, d, parity); } template<typename Float, typename Gauge, int NCOLORS> class InitGaugeCold : Tunable { InitGaugeColdArg<Gauge> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: InitGaugeCold(InitGaugeColdArg<Gauge> &arg) : arg(arg) { } ~InitGaugeCold () { } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); compute_InitGauge_ColdStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg); //cudaDeviceSynchronize(); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { return 0; } // Only correct if there is no link reconstruction, no cub reduction accounted also long long bytes() const { return 0; } //no accounting the reduction!!!! }; template<typename Float, int NCOLORS, typename Gauge> void InitGaugeField( Gauge dataOr, cudaGaugeField& data) { InitGaugeColdArg<Gauge> initarg(dataOr, data); InitGaugeCold<Float, Gauge, NCOLORS> init(initarg); init.apply(0); checkCudaError(); } template<typename Float> void InitGaugeField( cudaGaugeField& data) { if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } /** @brief Perform a cold start to the gauge field, identity SU(3) matrix, also fills the ghost links in multi-GPU case (no need to exchange data) * * @param[in,out] data Gauge field */ void InitGaugeField( cudaGaugeField& data) { if ( data.Precision() == QUDA_SINGLE_PRECISION ) { InitGaugeField<float> (data); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { InitGaugeField<double>(data); } else { errorQuda("Precision %d not supported", data.Precision()); } } template <typename Gauge> struct InitGaugeHotArg { int threads; // number of active threads required int X[4]; // grid dimensions RNG rngstate; #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; InitGaugeHotArg(const Gauge &dataOr, const cudaGaugeField &data, RNG &rngstate) : dataOr(dataOr), rngstate(rngstate) { #ifdef MULTI_GPU for ( int dir = 0; dir < 4; ++dir ) { border[dir] = data.R()[dir]; X[dir] = data.X()[dir] - border[dir] * 2; } #else for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; #endif //the optimal number of RNG states in rngstate array must be equal to half the lattice volume //this number is the same used in heatbath... threads = X[0] * X[1] * X[2] * X[3] >> 1; } }; template <typename Float> __host__ __device__ static inline void reunit_link( Matrix<complex<Float>,3> &U ){ complex<Float> t2((Float)0.0, (Float)0.0); Float t1 = 0.0; //first normalize first row //sum of squares of row #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += norm(U(0,c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(0,c) *= t1; //6 #pragma unroll for ( int c = 0; c < 3; c++ ) t2 += conj(U(0,c)) * U(1,c); //24 #pragma unroll for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c); //24 //normalize second row //sum of squares of row t1 = 0.0; #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += norm(U(1,c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(1, c) *= t1; //6 //Reconstruct lat row U(2,0) = conj(U(0,1) * U(1,2) - U(0,2) * U(1,1)); U(2,1) = conj(U(0,2) * U(1,0) - U(0,0) * U(1,2)); U(2,2) = conj(U(0,0) * U(1,1) - U(0,1) * U(1,0)); //42 //T=130 } /** @brief Generate the four random real elements of the SU(2) matrix @param localstate CURAND rng state @return four real numbers of the SU(2) matrix */ template <class T> __device__ static inline Matrix<T,2> randomSU2(cuRNGState& localState){ Matrix<T,2> a; T aabs, ctheta, stheta, phi; a(0,0) = Random<T>(localState, (T)-1.0, (T)1.0); aabs = sqrt( 1.0 - a(0,0) * a(0,0)); ctheta = Random<T>(localState, (T)-1.0, (T)1.0); phi = PII * Random<T>(localState); stheta = ( curand(&localState) & 1 ? 1 : -1 ) * sqrt( (T)1.0 - ctheta * ctheta ); a(0,1) = aabs * stheta * cos( phi ); a(1,0) = aabs * stheta * sin( phi ); a(1,1) = aabs * ctheta; return a; } /** @brief Update the SU(Nc) link with the new SU(2) matrix, link <- u * link @param u SU(2) matrix represented by four real numbers @param link SU(Nc) matrix @param id indices */ template <class T, int NCOLORS> __host__ __device__ static inline void mul_block_sun( Matrix<T,2> u, Matrix<complex<T>,NCOLORS> &link, int2 id ){ for ( int j = 0; j < NCOLORS; j++ ) { complex<T> tmp = complex<T>( u(0,0), u(1,1) ) * link(id.x, j) + complex<T>( u(1,0), u(0,1) ) * link(id.y, j); link(id.y, j) = complex<T>(-u(1,0), u(0,1) ) * link(id.x, j) + complex<T>( u(0,0),-u(1,1) ) * link(id.y, j); link(id.x, j) = tmp; } } /** @brief Calculate the SU(2) index block in the SU(Nc) matrix @param block number to calculate the index's, the total number of blocks is NCOLORS * ( NCOLORS - 1) / 2. @return Returns two index's in int2 type, accessed by .x and .y. */ template<int NCOLORS> __host__ __device__ static inline int2 IndexBlock(int block){ int2 id; int i1; int found = 0; int del_i = 0; int index = -1; while ( del_i < (NCOLORS - 1) && found == 0 ) { del_i++; for ( i1 = 0; i1 < (NCOLORS - del_i); i1++ ) { index++; if ( index == block ) { found = 1; break; } } } id.y = i1 + del_i; id.x = i1; return id; } /** @brief Generate a SU(Nc) random matrix @param localstate CURAND rng state @return SU(Nc) matrix */ template <class Float, int NCOLORS> __device__ inline Matrix<complex<Float>,NCOLORS> randomize( cuRNGState& localState ){ Matrix<complex<Float>,NCOLORS> U; for ( int i = 0; i < NCOLORS; i++ ) for ( int j = 0; j < NCOLORS; j++ ) U(i,j) = complex<Float>( (Float)(Random<Float>(localState) - 0.5), (Float)(Random<Float>(localState) - 0.5) ); reunit_link<Float>(U); return U; /*setIdentity(&U); for( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) { Matrix<Float,2> rr = randomSU2<Float>(localState); int2 id = IndexBlock<NCOLORS>( block ); mul_block_sun<Float, NCOLORS>(rr, U, id); //U = block_su2_to_su3<Float>( U, a00, a01, a10, a11, block ); } return U;*/ } template<typename Float, typename Gauge, int NCOLORS> __global__ void compute_InitGauge_HotStart(InitGaugeHotArg<Gauge> arg){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= arg.threads ) return; #ifdef MULTI_GPU int X[4], x[4]; for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; for ( int dr = 0; dr < 4; ++dr ) X[dr] += 2 * arg.border[dr]; int id = idx; cuRNGState localState = arg.rngstate.State()[ id ]; #else cuRNGState localState = arg.rngstate.State()[ idx ]; #endif for ( int parity = 0; parity < 2; parity++ ) { #ifdef MULTI_GPU getCoords(x, id, arg.X, parity); for ( int dr = 0; dr < 4; ++dr ) x[dr] += arg.border[dr]; idx = linkIndex(x,X); #endif for ( int d = 0; d < 4; d++ ) { Matrix<complex<Float>,NCOLORS> U; U = randomize<Float, NCOLORS>(localState); arg.dataOr.save((Float*)(U.data),idx, d, parity); } } #ifdef MULTI_GPU arg.rngstate.State()[ id ] = localState; #else arg.rngstate.State()[ idx ] = localState; #endif } template<typename Float, typename Gauge, int NCOLORS> class InitGaugeHot : Tunable { InitGaugeHotArg<Gauge> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: InitGaugeHot(InitGaugeHotArg<Gauge> &arg) : arg(arg) { } ~InitGaugeHot () { } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); compute_InitGauge_HotStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg); //cudaDeviceSynchronize(); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lud", arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } void preTune(){ arg.rngstate.backup(); } void postTune(){ arg.rngstate.restore(); } long long flops() const { return 0; } // Only correct if there is no link reconstruction, no cub reduction accounted also long long bytes() const { return 0; } //no accounting the reduction!!!! }; template<typename Float, int NCOLORS, typename Gauge> void InitGaugeField( Gauge dataOr, cudaGaugeField& data, RNG &rngstate) { InitGaugeHotArg<Gauge> initarg(dataOr, data, rngstate); InitGaugeHot<Float, Gauge, NCOLORS> init(initarg); init.apply(0); checkCudaError(); qudaDeviceSynchronize(); data.exchangeExtendedGhost(data.R(),false); } template<typename Float> void InitGaugeField( cudaGaugeField& data, RNG &rngstate) { if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } #endif // GPU_GAUGE_ALG /** @brief Perform a hot start to the gauge field, random SU(3) matrix, followed by reunitarization, also exchange borders links in multi-GPU case. * * @param[in,out] data Gauge field * @param[in,out] rngstate state of the CURAND random number generator */ void InitGaugeField( cudaGaugeField& data, RNG &rngstate) { #ifdef GPU_GAUGE_ALG if ( data.Precision() == QUDA_SINGLE_PRECISION ) { InitGaugeField<float> (data, rngstate); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { InitGaugeField<double>(data, rngstate); } else { errorQuda("Precision %d not supported", data.Precision()); } #else errorQuda("Pure gauge code has not been built"); #endif } }
1a9b54235000e9f02a5d5c085a2e36abd44d900d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO ORC writer class implementation */ #include "writer_impl.hpp" #include <io/utilities/column_utils.cuh> #include <cudf/null_mask.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <algorithm> #include <cstring> #include <numeric> #include <utility> namespace cudf { namespace io { namespace detail { namespace orc { using namespace cudf::io::orc; using namespace cudf::io; struct row_group_index_info { int32_t pos = -1; // Position int32_t blk_pos = -1; // Block Position int32_t comp_pos = -1; // Compressed Position int32_t comp_size = -1; // Compressed size }; namespace { /** * @brief Helper for pinned host memory */ template <typename T> using pinned_buffer = std::unique_ptr<T, decltype(&hipHostFree)>; /** * @brief Function that translates GDF compression to ORC compression */ orc::CompressionKind to_orc_compression(compression_type compression) { switch (compression) { case compression_type::AUTO: case compression_type::SNAPPY: return orc::CompressionKind::SNAPPY; case compression_type::NONE: return orc::CompressionKind::NONE; default: CUDF_EXPECTS(false, "Unsupported compression type"); return orc::CompressionKind::NONE; } } /** * @brief Function that translates GDF dtype to ORC datatype */ constexpr orc::TypeKind to_orc_type(cudf::type_id id) { switch (id) { case cudf::type_id::INT8: return TypeKind::BYTE; case cudf::type_id::INT16: return TypeKind::SHORT; case cudf::type_id::INT32: return TypeKind::INT; case cudf::type_id::INT64: return TypeKind::LONG; case cudf::type_id::FLOAT32: return TypeKind::FLOAT; case cudf::type_id::FLOAT64: return TypeKind::DOUBLE; case cudf::type_id::BOOL8: return TypeKind::BOOLEAN; case cudf::type_id::TIMESTAMP_DAYS: return TypeKind::DATE; case cudf::type_id::TIMESTAMP_SECONDS: case cudf::type_id::TIMESTAMP_MICROSECONDS: case cudf::type_id::TIMESTAMP_MILLISECONDS: case cudf::type_id::TIMESTAMP_NANOSECONDS: return TypeKind::TIMESTAMP; case cudf::type_id::STRING: return TypeKind::STRING; default: return TypeKind::INVALID_TYPE_KIND; } } /** * @brief Function that translates time unit to nanoscale multiple */ template <typename T> constexpr T to_clockscale(cudf::type_id timestamp_id) { switch (timestamp_id) { case cudf::type_id::TIMESTAMP_SECONDS: return 9; case cudf::type_id::TIMESTAMP_MILLISECONDS: return 6; case cudf::type_id::TIMESTAMP_MICROSECONDS: return 3; case cudf::type_id::TIMESTAMP_NANOSECONDS: default: return 0; } } } // namespace /** * @brief Helper class that adds ORC-specific column info */ class orc_column_view { public: /** * @brief Constructor that extracts out the string position + length pairs * for building dictionaries for string columns */ explicit orc_column_view(size_t index, size_t str_id, column_view const &col, const table_metadata *metadata, rmm::cuda_stream_view stream) : _index(index), _str_id(str_id), _is_string_type(col.type().id() == type_id::STRING), _type_width(_is_string_type ? 0 : cudf::size_of(col.type())), _data_count(col.size()), _null_count(col.null_count()), _nulls(col.null_mask()), _clockscale(to_clockscale<uint8_t>(col.type().id())), _type_kind(to_orc_type(col.type().id())) { // Generating default name if name isn't present in metadata if (metadata && _index < metadata->column_names.size()) { _name = metadata->column_names[_index]; } else { _name = "_col" + std::to_string(_index); } } auto is_string() const noexcept { return _is_string_type; } void set_dict_stride(size_t stride) noexcept { dict_stride = stride; } auto get_dict_stride() const noexcept { return dict_stride; } /** * @brief Function that associates an existing dictionary chunk allocation */ void attach_dict_chunk(gpu::DictionaryChunk *host_dict, gpu::DictionaryChunk *dev_dict) { dict = host_dict; d_dict = dev_dict; } auto host_dict_chunk(size_t rowgroup) const { assert(_is_string_type); return &dict[rowgroup * dict_stride + _str_id]; } auto device_dict_chunk() const { return d_dict; } /** * @brief Function that associates an existing stripe dictionary allocation */ void attach_stripe_dict(gpu::StripeDictionary *host_stripe_dict, gpu::StripeDictionary *dev_stripe_dict) { stripe_dict = host_stripe_dict; d_stripe_dict = dev_stripe_dict; } auto host_stripe_dict(size_t stripe) const { assert(_is_string_type); return &stripe_dict[stripe * dict_stride + _str_id]; } auto device_stripe_dict() const { return d_stripe_dict; } // Index in the table auto index() const noexcept { return _index; } // Id in the ORC file auto id() const noexcept { return _index + 1; } size_t type_width() const noexcept { return _type_width; } size_t data_count() const noexcept { return _data_count; } size_t null_count() const noexcept { return _null_count; } bool nullable() const noexcept { return (_nulls != nullptr); } uint32_t const *nulls() const noexcept { return _nulls; } uint8_t clockscale() const noexcept { return _clockscale; } void set_orc_encoding(ColumnEncodingKind e) { _encoding_kind = e; } auto orc_kind() const noexcept { return _type_kind; } auto orc_encoding() const noexcept { return _encoding_kind; } auto orc_name() const noexcept { return _name; } private: // Identifier within set of columns and string columns, respectively uint32_t _index = 0; uint32_t _str_id = 0; bool _is_string_type = false; size_t _type_width = 0; size_t _data_count = 0; size_t _null_count = 0; uint32_t const *_nulls = nullptr; uint8_t _clockscale = 0; // ORC-related members std::string _name{}; TypeKind _type_kind; ColumnEncodingKind _encoding_kind; // String dictionary-related members size_t dict_stride = 0; gpu::DictionaryChunk const *dict = nullptr; gpu::StripeDictionary const *stripe_dict = nullptr; gpu::DictionaryChunk *d_dict = nullptr; gpu::StripeDictionary *d_stripe_dict = nullptr; }; std::vector<stripe_rowgroups> writer::impl::gather_stripe_info( host_span<orc_column_view const> columns, size_t num_rowgroups) { auto const is_any_column_string = std::any_of(columns.begin(), columns.end(), [](auto const &col) { return col.is_string(); }); // Apply rows per stripe limit to limit string dictionaries size_t const max_stripe_rows = is_any_column_string ? 1000000 : 5000000; std::vector<stripe_rowgroups> infos; for (size_t rowgroup = 0, stripe_start = 0, stripe_size = 0; rowgroup < num_rowgroups; ++rowgroup) { auto const rowgroup_size = std::accumulate(columns.begin(), columns.end(), 0ul, [&](size_t total_size, auto const &col) { if (col.is_string()) { const auto dt = col.host_dict_chunk(rowgroup); return total_size + row_index_stride_ + dt->string_char_count; } else { return total_size + col.type_width() * row_index_stride_; } }); if ((rowgroup > stripe_start) && (stripe_size + rowgroup_size > max_stripe_size_ || (rowgroup + 1 - stripe_start) * row_index_stride_ > max_stripe_rows)) { infos.emplace_back(infos.size(), stripe_start, rowgroup - stripe_start); stripe_start = rowgroup; stripe_size = 0; } stripe_size += rowgroup_size; if (rowgroup + 1 == num_rowgroups) { infos.emplace_back(infos.size(), stripe_start, num_rowgroups - stripe_start); } } return infos; } void writer::impl::init_dictionaries(const table_device_view &view, orc_column_view *columns, std::vector<int> const &str_col_ids, device_span<size_type> d_str_col_ids, uint32_t *dict_data, uint32_t *dict_index, hostdevice_vector<gpu::DictionaryChunk> *dict) { const size_t num_rowgroups = dict->size() / str_col_ids.size(); // Setup per-rowgroup dictionary indexes for each dictionary-aware column for (size_t i = 0; i < str_col_ids.size(); ++i) { auto &str_column = columns[str_col_ids[i]]; str_column.set_dict_stride(str_col_ids.size()); str_column.attach_dict_chunk(dict->host_ptr(), dict->device_ptr()); } gpu::InitDictionaryIndices(view, dict->device_ptr(), dict_data, dict_index, row_index_stride_, d_str_col_ids.data(), d_str_col_ids.size(), num_rowgroups, stream); dict->device_to_host(stream, true); } void writer::impl::build_dictionaries(orc_column_view *columns, std::vector<int> const &str_col_ids, host_span<stripe_rowgroups const> stripe_bounds, hostdevice_vector<gpu::DictionaryChunk> const &dict, uint32_t *dict_index, hostdevice_vector<gpu::StripeDictionary> &stripe_dict) { const auto num_rowgroups = dict.size() / str_col_ids.size(); for (size_t col_idx = 0; col_idx < str_col_ids.size(); ++col_idx) { auto &str_column = columns[str_col_ids[col_idx]]; str_column.attach_stripe_dict(stripe_dict.host_ptr(), stripe_dict.device_ptr()); for (auto const &stripe : stripe_bounds) { auto &sd = stripe_dict[stripe.id * str_col_ids.size() + col_idx]; sd.dict_data = str_column.host_dict_chunk(stripe.first)->dict_data; sd.dict_index = dict_index + col_idx * str_column.data_count(); // Indexed by abs row sd.column_id = str_col_ids[col_idx]; sd.start_chunk = stripe.first; sd.num_chunks = stripe.size; sd.dict_char_count = 0; sd.num_strings = std::accumulate(stripe.cbegin(), stripe.cend(), 0, [&](auto dt_str_cnt, auto rg_idx) { const auto &dt = dict[rg_idx * str_col_ids.size() + col_idx]; return dt_str_cnt + dt.num_dict_strings; }); sd.leaf_column = dict[col_idx].leaf_column; } if (enable_dictionary_) { struct string_column_cost { size_t direct = 0; size_t dictionary = 0; }; auto const col_cost = std::accumulate(stripe_bounds.front().cbegin(), stripe_bounds.back().cend(), string_column_cost{}, [&](auto cost, auto rg_idx) -> string_column_cost { const auto &dt = dict[rg_idx * str_col_ids.size() + col_idx]; return {cost.direct + dt.string_char_count, cost.dictionary + dt.dict_char_count + dt.num_dict_strings}; }); // Disable dictionary if it does not reduce the output size if (col_cost.dictionary >= col_cost.direct) { for (auto const &stripe : stripe_bounds) { stripe_dict[stripe.id * str_col_ids.size() + col_idx].dict_data = nullptr; } } } } stripe_dict.host_to_device(stream); gpu::BuildStripeDictionaries(stripe_dict.device_ptr(), stripe_dict.host_ptr(), dict.device_ptr(), stripe_bounds.size(), num_rowgroups, str_col_ids.size(), stream); stripe_dict.device_to_host(stream, true); } orc_streams writer::impl::create_streams(host_span<orc_column_view> columns, host_span<stripe_rowgroups const> stripe_bounds) { // 'column 0' row index stream std::vector<Stream> streams{{ROW_INDEX, 0}}; // TODO: Separate index and data streams? // First n + 1 streams are row index streams streams.reserve(columns.size() + 1); std::transform(columns.begin(), columns.end(), std::back_inserter(streams), [](auto const &col) { return Stream{ROW_INDEX, col.id()}; }); std::vector<int32_t> ids(columns.size() * gpu::CI_NUM_STREAMS, -1); for (auto &column : columns) { TypeKind kind = column.orc_kind(); StreamKind data_kind = DATA; StreamKind data2_kind = LENGTH; ColumnEncodingKind encoding_kind = DIRECT; int64_t present_stream_size = 0; int64_t data_stream_size = 0; int64_t data2_stream_size = 0; int64_t dict_stream_size = 0; auto const is_nullable = [&]() { if (single_write_mode) { return column.nullable(); } else { return (column.index() < user_metadata_with_nullability.column_nullable.size()) ? user_metadata_with_nullability.column_nullable[column.index()] : true; } }(); if (is_nullable) { present_stream_size = ((row_index_stride_ + 7) >> 3); present_stream_size += (present_stream_size + 0x7f) >> 7; } switch (kind) { case TypeKind::BOOLEAN: data_stream_size = div_rowgroups_by<int64_t>(1024) * (128 + 1); encoding_kind = DIRECT; break; case TypeKind::BYTE: data_stream_size = div_rowgroups_by<int64_t>(128) * (128 + 1); encoding_kind = DIRECT; break; case TypeKind::SHORT: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 2 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::FLOAT: // Pass through if no nulls (no RLE encoding for floating point) data_stream_size = (column.null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 4 + 2) : INT64_C(-1); encoding_kind = DIRECT; break; case TypeKind::INT: case TypeKind::DATE: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::DOUBLE: // Pass through if no nulls (no RLE encoding for floating point) data_stream_size = (column.null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 8 + 2) : INT64_C(-1); encoding_kind = DIRECT; break; case TypeKind::LONG: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 8 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::STRING: { bool enable_dict = enable_dictionary_; size_t dict_data_size = 0; size_t dict_strings = 0; size_t dict_lengths_div512 = 0; for (auto const &stripe : stripe_bounds) { const auto sd = column.host_stripe_dict(stripe.id); enable_dict = (enable_dict && sd->dict_data != nullptr); if (enable_dict) { dict_strings += sd->num_strings; dict_lengths_div512 += (sd->num_strings + 0x1ff) >> 9; dict_data_size += sd->dict_char_count; } } auto const direct_data_size = std::accumulate(stripe_bounds.front().cbegin(), stripe_bounds.back().cend(), size_t{0}, [&](auto data_size, auto rg_idx) { return data_size + column.host_dict_chunk(rg_idx)->string_char_count; }); if (enable_dict) { uint32_t dict_bits = 0; for (dict_bits = 1; dict_bits < 32; dict_bits <<= 1) { if (dict_strings <= (1ull << dict_bits)) break; } const auto valid_count = column.data_count() - column.null_count(); dict_data_size += (dict_bits * valid_count + 7) >> 3; } // Decide between direct or dictionary encoding if (enable_dict && dict_data_size < direct_data_size) { data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); data2_stream_size = dict_lengths_div512 * (512 * 4 + 2); dict_stream_size = std::max<size_t>(dict_data_size, 1); encoding_kind = DICTIONARY_V2; } else { data_stream_size = std::max<size_t>(direct_data_size, 1); data2_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); encoding_kind = DIRECT_V2; } break; } case TypeKind::TIMESTAMP: data_stream_size = ((row_index_stride_ + 0x1ff) >> 9) * (512 * 4 + 2); data2_stream_size = data_stream_size; data2_kind = SECONDARY; encoding_kind = DIRECT_V2; break; default: CUDF_FAIL("Unsupported ORC type kind"); } // Initialize the column's metadata (this is the only reason columns is in/out param) column.set_orc_encoding(encoding_kind); // Initialize the column's data stream(s) const auto base = column.index() * gpu::CI_NUM_STREAMS; if (present_stream_size != 0) { auto len = static_cast<uint64_t>(present_stream_size); ids[base + gpu::CI_PRESENT] = streams.size(); streams.push_back(orc::Stream{PRESENT, column.id(), len}); } if (data_stream_size != 0) { auto len = static_cast<uint64_t>(std::max<int64_t>(data_stream_size, 0)); ids[base + gpu::CI_DATA] = streams.size(); streams.push_back(orc::Stream{data_kind, column.id(), len}); } if (data2_stream_size != 0) { auto len = static_cast<uint64_t>(std::max<int64_t>(data2_stream_size, 0)); ids[base + gpu::CI_DATA2] = streams.size(); streams.push_back(orc::Stream{data2_kind, column.id(), len}); } if (dict_stream_size != 0) { auto len = static_cast<uint64_t>(dict_stream_size); ids[base + gpu::CI_DICTIONARY] = streams.size(); streams.push_back(orc::Stream{DICTIONARY_DATA, column.id(), len}); } } return {std::move(streams), std::move(ids)}; } orc_streams::orc_stream_offsets orc_streams::compute_offsets( host_span<orc_column_view const> columns, size_t num_rowgroups) const { std::vector<size_t> strm_offsets(streams.size()); size_t str_data_size = 0; size_t rle_data_size = 0; for (size_t i = 0; i < streams.size(); ++i) { const auto &stream = streams[i]; auto const is_str_data = [&]() { // First stream is an index stream if (!stream.column_index().has_value()) return false; auto const &column = columns[stream.column_index().value()]; if (column.orc_kind() != TypeKind::STRING) return false; // Dictionary encoded string column dictionary characters or // directly encoded string column characters return ((stream.kind == DICTIONARY_DATA && column.orc_encoding() == DICTIONARY_V2) || (stream.kind == DATA && column.orc_encoding() == DIRECT_V2)); }(); if (is_str_data) { strm_offsets[i] = str_data_size; str_data_size += stream.length; } else { strm_offsets[i] = rle_data_size; rle_data_size += (stream.length * num_rowgroups + 7) & ~7; } } str_data_size = (str_data_size + 7) & ~7; return {std::move(strm_offsets), str_data_size, rle_data_size}; } struct segmented_valid_cnt_input { bitmask_type const *mask; std::vector<size_type> indices; }; encoded_data writer::impl::encode_columns(const table_device_view &view, host_span<orc_column_view const> columns, std::vector<int> const &str_col_ids, rmm::device_uvector<uint32_t> &&dict_data, rmm::device_uvector<uint32_t> &&dict_index, host_span<stripe_rowgroups const> stripe_bounds, orc_streams const &streams) { auto const num_columns = columns.size(); auto const num_rowgroups = stripes_size(stripe_bounds); hostdevice_2dvector<gpu::EncChunk> chunks(num_columns, num_rowgroups, stream); hostdevice_2dvector<gpu::encoder_chunk_streams> chunk_streams(num_columns, num_rowgroups, stream); auto const stream_offsets = streams.compute_offsets(columns, num_rowgroups); rmm::device_uvector<uint8_t> encoded_data(stream_offsets.data_size(), stream); // Initialize column chunks' descriptions std::map<size_type, segmented_valid_cnt_input> validity_check_inputs; for (auto const &column : columns) { for (auto const &stripe : stripe_bounds) { for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) { auto const rg_idx = *rg_idx_it; auto &ck = chunks[column.index()][rg_idx]; ck.start_row = (rg_idx * row_index_stride_); ck.num_rows = std::min<uint32_t>(row_index_stride_, column.data_count() - ck.start_row); ck.encoding_kind = column.orc_encoding(); ck.type_kind = column.orc_kind(); if (ck.type_kind == TypeKind::STRING) { ck.dict_index = (ck.encoding_kind == DICTIONARY_V2) ? column.host_stripe_dict(stripe.id)->dict_index : nullptr; ck.dtype_len = 1; } else { ck.dtype_len = column.type_width(); } ck.scale = column.clockscale(); // Only need to check row groups that end within the stripe } } } auto validity_check_indices = [&](size_t col_idx) { std::vector<size_type> indices; for (auto const &stripe : stripe_bounds) { for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend() - 1; ++rg_idx_it) { auto const &chunk = chunks[col_idx][*rg_idx_it]; indices.push_back(chunk.start_row); indices.push_back(chunk.start_row + chunk.num_rows); } } return indices; }; for (auto const &column : columns) { if (column.orc_kind() == TypeKind::BOOLEAN && column.nullable()) { validity_check_inputs[column.index()] = {column.nulls(), validity_check_indices(column.index())}; } } for (auto &cnt_in : validity_check_inputs) { auto const valid_counts = segmented_count_set_bits(cnt_in.second.mask, cnt_in.second.indices); CUDF_EXPECTS( std::none_of(valid_counts.cbegin(), valid_counts.cend(), [](auto valid_count) { return valid_count % 8; }), "There's currently a bug in encoding boolean columns. Suggested workaround is to convert " "to int8 type." " Please see https://github.com/rapidsai/cudf/issues/6763 for more information."); } for (size_t col_idx = 0; col_idx < num_columns; col_idx++) { auto const &column = columns[col_idx]; auto col_streams = chunk_streams[col_idx]; for (auto const &stripe : stripe_bounds) { for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) { auto const rg_idx = *rg_idx_it; auto const &ck = chunks[col_idx][rg_idx]; auto &strm = col_streams[rg_idx]; for (int strm_type = 0; strm_type < gpu::CI_NUM_STREAMS; ++strm_type) { auto const strm_id = streams.id(col_idx * gpu::CI_NUM_STREAMS + strm_type); strm.ids[strm_type] = strm_id; if (strm_id >= 0) { if ((strm_type == gpu::CI_DICTIONARY) || (strm_type == gpu::CI_DATA2 && ck.encoding_kind == DICTIONARY_V2)) { if (rg_idx_it == stripe.cbegin()) { const int32_t dict_stride = column.get_dict_stride(); const auto stripe_dict = column.host_stripe_dict(stripe.id); strm.lengths[strm_type] = (strm_type == gpu::CI_DICTIONARY) ? stripe_dict->dict_char_count : (((stripe_dict->num_strings + 0x1ff) >> 9) * (512 * 4 + 2)); if (stripe.id == 0) { strm.data_ptrs[strm_type] = encoded_data.data() + stream_offsets.offsets[strm_id]; } else { auto const &strm_up = col_streams[stripe_dict[-dict_stride].start_chunk]; strm.data_ptrs[strm_type] = strm_up.data_ptrs[strm_type] + strm_up.lengths[strm_type]; } } else { strm.lengths[strm_type] = 0; strm.data_ptrs[strm_type] = col_streams[rg_idx - 1].data_ptrs[strm_type]; } } else if (strm_type == gpu::CI_DATA && ck.type_kind == TypeKind::STRING && ck.encoding_kind == DIRECT_V2) { strm.lengths[strm_type] = column.host_dict_chunk(rg_idx)->string_char_count; strm.data_ptrs[strm_type] = (rg_idx == 0) ? encoded_data.data() + stream_offsets.offsets[strm_id] : (col_streams[rg_idx - 1].data_ptrs[strm_type] + col_streams[rg_idx - 1].lengths[strm_type]); } else if (strm_type == gpu::CI_DATA && streams[strm_id].length == 0 && (ck.type_kind == DOUBLE || ck.type_kind == FLOAT)) { // Pass-through strm.lengths[strm_type] = ck.num_rows * ck.dtype_len; strm.data_ptrs[strm_type] = nullptr; } else { strm.lengths[strm_type] = streams[strm_id].length; strm.data_ptrs[strm_type] = encoded_data.data() + stream_offsets.str_data_size + stream_offsets.offsets[strm_id] + streams[strm_id].length * rg_idx; } } else { strm.lengths[strm_type] = 0; strm.data_ptrs[strm_type] = nullptr; } } } } } chunks.host_to_device(stream); chunk_streams.host_to_device(stream); gpu::set_chunk_columns(view, chunks, stream); if (!str_col_ids.empty()) { auto d_stripe_dict = columns[str_col_ids[0]].device_stripe_dict(); gpu::EncodeStripeDictionaries( d_stripe_dict, chunks, str_col_ids.size(), stripe_bounds.size(), chunk_streams, stream); } gpu::EncodeOrcColumnData(chunks, chunk_streams, stream); dict_data.release(); dict_index.release(); stream.synchronize(); return {std::move(encoded_data), std::move(chunk_streams)}; } std::vector<StripeInformation> writer::impl::gather_stripes( size_t num_rows, size_t num_index_streams, host_span<stripe_rowgroups const> stripe_bounds, hostdevice_2dvector<gpu::encoder_chunk_streams> *enc_streams, hostdevice_2dvector<gpu::StripeStream> *strm_desc) { std::vector<StripeInformation> stripes(stripe_bounds.size()); for (auto const &stripe : stripe_bounds) { for (size_t col_idx = 0; col_idx < enc_streams->size().first; col_idx++) { const auto &strm = (*enc_streams)[col_idx][stripe.first]; // Assign stream data of column data stream(s) for (int k = 0; k < gpu::CI_INDEX; k++) { const auto stream_id = strm.ids[k]; if (stream_id != -1) { auto *ss = &(*strm_desc)[stripe.id][stream_id - num_index_streams]; ss->stream_size = 0; ss->first_chunk_id = stripe.first; ss->num_chunks = stripe.size; ss->column_id = col_idx; ss->stream_type = k; } } } auto const stripe_group_end = *stripe.cend(); auto const stripe_end = ::min(stripe_group_end * row_index_stride_, num_rows); stripes[stripe.id].numberOfRows = stripe_end - stripe.first * row_index_stride_; } strm_desc->host_to_device(stream); gpu::CompactOrcDataStreams(*strm_desc, *enc_streams, stream); strm_desc->device_to_host(stream); enc_streams->device_to_host(stream, true); return stripes; } std::vector<std::vector<uint8_t>> writer::impl::gather_statistic_blobs( const table_device_view &table, host_span<orc_column_view const> columns, host_span<stripe_rowgroups const> stripe_bounds) { auto const num_rowgroups = stripes_size(stripe_bounds); size_t num_stat_blobs = (1 + stripe_bounds.size()) * columns.size(); size_t num_chunks = num_rowgroups * columns.size(); std::vector<std::vector<uint8_t>> stat_blobs(num_stat_blobs); hostdevice_vector<stats_column_desc> stat_desc(columns.size(), stream); hostdevice_vector<statistics_merge_group> stat_merge(num_stat_blobs, stream); rmm::device_uvector<statistics_chunk> stat_chunks(num_chunks + num_stat_blobs, stream); rmm::device_uvector<statistics_group> stat_groups(num_chunks, stream); for (auto const &column : columns) { stats_column_desc *desc = &stat_desc[column.index()]; switch (column.orc_kind()) { case TypeKind::BYTE: desc->stats_dtype = dtype_int8; break; case TypeKind::SHORT: desc->stats_dtype = dtype_int16; break; case TypeKind::INT: desc->stats_dtype = dtype_int32; break; case TypeKind::LONG: desc->stats_dtype = dtype_int64; break; case TypeKind::FLOAT: desc->stats_dtype = dtype_float32; break; case TypeKind::DOUBLE: desc->stats_dtype = dtype_float64; break; case TypeKind::BOOLEAN: desc->stats_dtype = dtype_bool; break; case TypeKind::DATE: desc->stats_dtype = dtype_int32; break; case TypeKind::TIMESTAMP: desc->stats_dtype = dtype_timestamp64; break; case TypeKind::STRING: desc->stats_dtype = dtype_string; break; default: desc->stats_dtype = dtype_none; break; } desc->num_rows = column.data_count(); desc->num_values = column.data_count(); if (desc->stats_dtype == dtype_timestamp64) { // Timestamp statistics are in milliseconds switch (column.clockscale()) { case 9: desc->ts_scale = 1000; break; case 6: desc->ts_scale = 0; break; case 3: desc->ts_scale = -1000; break; case 0: desc->ts_scale = -1000000; break; default: desc->ts_scale = 0; break; } } else { desc->ts_scale = 0; } for (auto const &stripe : stripe_bounds) { auto grp = &stat_merge[column.index() * stripe_bounds.size() + stripe.id]; grp->col = stat_desc.device_ptr(column.index()); grp->start_chunk = static_cast<uint32_t>(column.index() * num_rowgroups + stripe.first); grp->num_chunks = stripe.size; } statistics_merge_group *col_stats = &stat_merge[stripe_bounds.size() * columns.size() + column.index()]; col_stats->col = stat_desc.device_ptr(column.index()); col_stats->start_chunk = static_cast<uint32_t>(column.index() * stripe_bounds.size()); col_stats->num_chunks = static_cast<uint32_t>(stripe_bounds.size()); } stat_desc.host_to_device(stream); stat_merge.host_to_device(stream); rmm::device_uvector<column_device_view> leaf_column_views = create_leaf_column_device_views<stats_column_desc>(stat_desc, table, stream); gpu::orc_init_statistics_groups(stat_groups.data(), stat_desc.device_ptr(), columns.size(), num_rowgroups, row_index_stride_, stream); GatherColumnStatistics(stat_chunks.data(), stat_groups.data(), num_chunks, stream); MergeColumnStatistics(stat_chunks.data() + num_chunks, stat_chunks.data(), stat_merge.device_ptr(), stripe_bounds.size() * columns.size(), stream); MergeColumnStatistics(stat_chunks.data() + num_chunks + stripe_bounds.size() * columns.size(), stat_chunks.data() + num_chunks, stat_merge.device_ptr(stripe_bounds.size() * columns.size()), columns.size(), stream); gpu::orc_init_statistics_buffersize( stat_merge.device_ptr(), stat_chunks.data() + num_chunks, num_stat_blobs, stream); stat_merge.device_to_host(stream, true); hostdevice_vector<uint8_t> blobs( stat_merge[num_stat_blobs - 1].start_chunk + stat_merge[num_stat_blobs - 1].num_chunks, stream); gpu::orc_encode_statistics(blobs.device_ptr(), stat_merge.device_ptr(), stat_chunks.data() + num_chunks, num_stat_blobs, stream); stat_merge.device_to_host(stream); blobs.device_to_host(stream, true); for (size_t i = 0; i < num_stat_blobs; i++) { const uint8_t *stat_begin = blobs.host_ptr(stat_merge[i].start_chunk); const uint8_t *stat_end = stat_begin + stat_merge[i].num_chunks; stat_blobs[i].assign(stat_begin, stat_end); } return stat_blobs; } void writer::impl::write_index_stream(int32_t stripe_id, int32_t stream_id, host_span<orc_column_view const> columns, stripe_rowgroups const &rowgroups_range, host_2dspan<gpu::encoder_chunk_streams const> enc_streams, host_2dspan<gpu::StripeStream const> strm_desc, host_span<gpu_inflate_status_s const> comp_out, StripeInformation *stripe, orc_streams *streams, ProtobufWriter *pbw) { row_group_index_info present; row_group_index_info data; row_group_index_info data2; auto kind = TypeKind::STRUCT; auto const column_id = stream_id - 1; auto find_record = [=, &strm_desc](gpu::encoder_chunk_streams const &stream, gpu::StreamIndexType type) { row_group_index_info record; if (stream.ids[type] > 0) { record.pos = 0; if (compression_kind_ != NONE) { auto const &ss = strm_desc[stripe_id][stream.ids[type] - (columns.size() + 1)]; record.blk_pos = ss.first_block; record.comp_pos = 0; record.comp_size = ss.stream_size; } } return record; }; auto scan_record = [=, &comp_out](gpu::encoder_chunk_streams const &stream, gpu::StreamIndexType type, row_group_index_info &record) { if (record.pos >= 0) { record.pos += stream.lengths[type]; while ((record.pos >= 0) && (record.blk_pos >= 0) && (static_cast<size_t>(record.pos) >= compression_blocksize_) && (record.comp_pos + 3 + comp_out[record.blk_pos].bytes_written < static_cast<size_t>(record.comp_size))) { record.pos -= compression_blocksize_; record.comp_pos += 3 + comp_out[record.blk_pos].bytes_written; record.blk_pos += 1; } } }; // TBD: Not sure we need an empty index stream for column 0 if (stream_id != 0) { const auto &strm = enc_streams[column_id][0]; present = find_record(strm, gpu::CI_PRESENT); data = find_record(strm, gpu::CI_DATA); data2 = find_record(strm, gpu::CI_DATA2); // Change string dictionary to int from index point of view kind = columns[column_id].orc_kind(); if (kind == TypeKind::STRING && columns[column_id].orc_encoding() == DICTIONARY_V2) { kind = TypeKind::INT; } } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); // Add row index entries std::for_each(rowgroups_range.cbegin(), rowgroups_range.cend(), [&](auto rowgroup) { pbw->put_row_index_entry( present.comp_pos, present.pos, data.comp_pos, data.pos, data2.comp_pos, data2.pos, kind); if (stream_id != 0) { const auto &strm = enc_streams[column_id][rowgroup]; scan_record(strm, gpu::CI_PRESENT, present); scan_record(strm, gpu::CI_DATA, data); scan_record(strm, gpu::CI_DATA2, data2); } }); (*streams)[stream_id].length = buffer_.size(); if (compression_kind_ != NONE) { uint32_t uncomp_ix_len = (uint32_t)((*streams)[stream_id].length - 3) * 2 + 1; buffer_[0] = static_cast<uint8_t>(uncomp_ix_len >> 0); buffer_[1] = static_cast<uint8_t>(uncomp_ix_len >> 8); buffer_[2] = static_cast<uint8_t>(uncomp_ix_len >> 16); } out_sink_->host_write(buffer_.data(), buffer_.size()); stripe->indexLength += buffer_.size(); } void writer::impl::write_data_stream(gpu::StripeStream const &strm_desc, gpu::encoder_chunk_streams const &enc_stream, uint8_t const *compressed_data, uint8_t *stream_out, StripeInformation *stripe, orc_streams *streams) { const auto length = strm_desc.stream_size; (*streams)[enc_stream.ids[strm_desc.stream_type]].length = length; if (length == 0) { return; } const auto *stream_in = (compression_kind_ == NONE) ? enc_stream.data_ptrs[strm_desc.stream_type] : (compressed_data + strm_desc.bfr_offset); if (out_sink_->is_device_write_preferred(length)) { out_sink_->device_write(stream_in, length, stream); } else { CUDA_TRY( hipMemcpyAsync(stream_out, stream_in, length, hipMemcpyDeviceToHost, stream.value())); stream.synchronize(); out_sink_->host_write(stream_out, length); } stripe->dataLength += length; } void writer::impl::add_uncompressed_block_headers(std::vector<uint8_t> &v) { if (compression_kind_ != NONE) { size_t uncomp_len = v.size() - 3, pos = 0, block_len; while (uncomp_len > compression_blocksize_) { block_len = compression_blocksize_ * 2 + 1; v[pos + 0] = static_cast<uint8_t>(block_len >> 0); v[pos + 1] = static_cast<uint8_t>(block_len >> 8); v[pos + 2] = static_cast<uint8_t>(block_len >> 16); pos += 3 + compression_blocksize_; v.insert(v.begin() + pos, 3, 0); uncomp_len -= compression_blocksize_; } block_len = uncomp_len * 2 + 1; v[pos + 0] = static_cast<uint8_t>(block_len >> 0); v[pos + 1] = static_cast<uint8_t>(block_len >> 8); v[pos + 2] = static_cast<uint8_t>(block_len >> 16); } } writer::impl::impl(std::unique_ptr<data_sink> sink, orc_writer_options const &options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) : compression_kind_(to_orc_compression(options.get_compression())), enable_statistics_(options.enable_statistics()), out_sink_(std::move(sink)), single_write_mode(mode == SingleWriteMode::YES), user_metadata(options.get_metadata()), stream(stream), _mr(mr) { init_state(); } writer::impl::impl(std::unique_ptr<data_sink> sink, chunked_orc_writer_options const &options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) : compression_kind_(to_orc_compression(options.get_compression())), enable_statistics_(options.enable_statistics()), out_sink_(std::move(sink)), single_write_mode(mode == SingleWriteMode::YES), stream(stream), _mr(mr) { if (options.get_metadata() != nullptr) { user_metadata_with_nullability = *options.get_metadata(); user_metadata = &user_metadata_with_nullability; } init_state(); } writer::impl::~impl() { close(); } void writer::impl::init_state() { // Write file header out_sink_->host_write(MAGIC, std::strlen(MAGIC)); } rmm::device_uvector<size_type> get_string_column_ids(const table_device_view &view, rmm::cuda_stream_view stream) { rmm::device_uvector<size_type> string_column_ids(view.num_columns(), stream); auto iter = thrust::make_counting_iterator<size_type>(0); auto end_iter = thrust::copy_if(rmm::exec_policy(stream), iter, iter + view.num_columns(), string_column_ids.begin(), [view] __device__(size_type index) { return (view.column(index).type().id() == type_id::STRING); }); string_column_ids.resize(end_iter - string_column_ids.begin(), stream); return string_column_ids; } void writer::impl::write(table_view const &table) { CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed"); auto const num_columns = table.num_columns(); auto const num_rows = table.num_rows(); if (user_metadata_with_nullability.column_nullable.size() > 0) { CUDF_EXPECTS( user_metadata_with_nullability.column_nullable.size() == static_cast<size_t>(num_columns), "When passing values in user_metadata_with_nullability, data for all columns must " "be specified"); } auto device_columns = table_device_view::create(table, stream); auto string_column_ids = get_string_column_ids(*device_columns, stream); // Wrapper around cudf columns to attach ORC-specific type info std::vector<orc_column_view> orc_columns; orc_columns.reserve(num_columns); // Mapping of string columns for quick look-up std::vector<int> str_col_ids; for (auto const &column : table) { auto const current_id = orc_columns.size(); auto const current_str_id = str_col_ids.size(); orc_columns.emplace_back(current_id, current_str_id, column, user_metadata, stream); if (orc_columns.back().is_string()) { str_col_ids.push_back(current_id); } } rmm::device_uvector<uint32_t> dict_data(str_col_ids.size() * num_rows, stream); rmm::device_uvector<uint32_t> dict_index(str_col_ids.size() * num_rows, stream); // Build per-column dictionary indices const auto num_rowgroups = div_by_rowgroups<size_t>(num_rows); const auto num_dict_chunks = num_rowgroups * str_col_ids.size(); hostdevice_vector<gpu::DictionaryChunk> dict(num_dict_chunks, stream); if (!str_col_ids.empty()) { init_dictionaries(*device_columns, orc_columns.data(), str_col_ids, string_column_ids, dict_data.data(), dict_index.data(), &dict); } // Decide stripe boundaries early on, based on uncompressed size auto const stripe_bounds = gather_stripe_info(orc_columns, num_rowgroups); // Build stripe-level dictionaries const auto num_stripe_dict = stripe_bounds.size() * str_col_ids.size(); hostdevice_vector<gpu::StripeDictionary> stripe_dict(num_stripe_dict, stream); if (!str_col_ids.empty()) { build_dictionaries( orc_columns.data(), str_col_ids, stripe_bounds, dict, dict_index.data(), stripe_dict); } auto streams = create_streams(orc_columns, stripe_bounds); auto enc_data = encode_columns(*device_columns, orc_columns, str_col_ids, std::move(dict_data), std::move(dict_index), stripe_bounds, streams); // Assemble individual disparate column chunks into contiguous data streams const auto num_index_streams = (num_columns + 1); const auto num_data_streams = streams.size() - num_index_streams; hostdevice_2dvector<gpu::StripeStream> strm_descs(stripe_bounds.size(), num_data_streams, stream); auto stripes = gather_stripes(num_rows, num_index_streams, stripe_bounds, &enc_data.streams, &strm_descs); // Gather column statistics std::vector<std::vector<uint8_t>> column_stats; if (enable_statistics_ && num_columns > 0 && num_rows > 0) { column_stats = gather_statistic_blobs(*device_columns, orc_columns, stripe_bounds); } // Allocate intermediate output stream buffer size_t compressed_bfr_size = 0; size_t num_compressed_blocks = 0; auto stream_output = [&]() { size_t max_stream_size = 0; bool all_device_write = true; for (size_t stripe_id = 0; stripe_id < stripe_bounds.size(); stripe_id++) { for (size_t i = 0; i < num_data_streams; i++) { // TODO range for (at least) gpu::StripeStream *ss = &strm_descs[stripe_id][i]; if (!out_sink_->is_device_write_preferred(ss->stream_size)) { all_device_write = false; } size_t stream_size = ss->stream_size; if (compression_kind_ != NONE) { ss->first_block = num_compressed_blocks; ss->bfr_offset = compressed_bfr_size; auto num_blocks = std::max<uint32_t>( (stream_size + compression_blocksize_ - 1) / compression_blocksize_, 1); stream_size += num_blocks * 3; num_compressed_blocks += num_blocks; compressed_bfr_size += stream_size; } max_stream_size = ::max(max_stream_size, stream_size); } } if (all_device_write) { return pinned_buffer<uint8_t>{nullptr, hipHostFree}; } else { return pinned_buffer<uint8_t>{[](size_t size) { uint8_t *ptr = nullptr; CUDA_TRY(hipHostMalloc(&ptr, size)); return ptr; }(max_stream_size), hipHostFree}; } }(); // Compress the data streams rmm::device_buffer compressed_data(compressed_bfr_size, stream); hostdevice_vector<gpu_inflate_status_s> comp_out(num_compressed_blocks, stream); hostdevice_vector<gpu_inflate_input_s> comp_in(num_compressed_blocks, stream); if (compression_kind_ != NONE) { strm_descs.host_to_device(stream); gpu::CompressOrcDataStreams(static_cast<uint8_t *>(compressed_data.data()), num_compressed_blocks, compression_kind_, compression_blocksize_, strm_descs, enc_data.streams, comp_in.device_ptr(), comp_out.device_ptr(), stream); strm_descs.device_to_host(stream); comp_out.device_to_host(stream, true); } ProtobufWriter pbw_(&buffer_); // Write stripes for (size_t stripe_id = 0; stripe_id < stripes.size(); ++stripe_id) { auto const &rowgroup_range = stripe_bounds[stripe_id]; auto &stripe = stripes[stripe_id]; stripe.offset = out_sink_->bytes_written(); // Column (skippable) index streams appear at the start of the stripe for (size_type stream_id = 0; stream_id <= num_columns; ++stream_id) { write_index_stream(stripe_id, stream_id, orc_columns, rowgroup_range, enc_data.streams, strm_descs, comp_out, &stripe, &streams, &pbw_); } // Column data consisting one or more separate streams for (auto const &strm_desc : strm_descs[stripe_id]) { write_data_stream(strm_desc, enc_data.streams[strm_desc.column_id][rowgroup_range.first], static_cast<uint8_t *>(compressed_data.data()), stream_output.get(), &stripe, &streams); } // Write stripefooter consisting of stream information StripeFooter sf; sf.streams = streams; sf.columns.resize(num_columns + 1); sf.columns[0].kind = DIRECT; for (size_t i = 1; i < sf.columns.size(); ++i) { sf.columns[i].kind = orc_columns[i - 1].orc_encoding(); sf.columns[i].dictionarySize = (sf.columns[i].kind == DICTIONARY_V2) ? orc_columns[i - 1].host_stripe_dict(stripe_id)->num_strings : 0; if (orc_columns[i - 1].orc_kind() == TIMESTAMP) { sf.writerTimezone = "UTC"; } } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); pbw_.write(sf); stripe.footerLength = buffer_.size(); if (compression_kind_ != NONE) { uint32_t uncomp_sf_len = (stripe.footerLength - 3) * 2 + 1; buffer_[0] = static_cast<uint8_t>(uncomp_sf_len >> 0); buffer_[1] = static_cast<uint8_t>(uncomp_sf_len >> 8); buffer_[2] = static_cast<uint8_t>(uncomp_sf_len >> 16); } out_sink_->host_write(buffer_.data(), buffer_.size()); } if (column_stats.size() != 0) { // File-level statistics // NOTE: Excluded from chunked write mode to avoid the need for merging stats across calls if (single_write_mode) { ff.statistics.resize(1 + num_columns); // First entry contains total number of rows buffer_.resize(0); pbw_.putb(1 * 8 + PB_TYPE_VARINT); pbw_.put_uint(num_rows); ff.statistics[0] = std::move(buffer_); for (int col_idx = 0; col_idx < num_columns; col_idx++) { size_t idx = stripes.size() * num_columns + col_idx; if (idx < column_stats.size()) { ff.statistics[1 + col_idx] = std::move(column_stats[idx]); } } } // Stripe-level statistics size_t first_stripe = md.stripeStats.size(); md.stripeStats.resize(first_stripe + stripes.size()); for (size_t stripe_id = 0; stripe_id < stripes.size(); stripe_id++) { md.stripeStats[first_stripe + stripe_id].colStats.resize(1 + num_columns); buffer_.resize(0); pbw_.putb(1 * 8 + PB_TYPE_VARINT); pbw_.put_uint(stripes[stripe_id].numberOfRows); md.stripeStats[first_stripe + stripe_id].colStats[0] = std::move(buffer_); for (int col_idx = 0; col_idx < num_columns; col_idx++) { size_t idx = stripes.size() * col_idx + stripe_id; if (idx < column_stats.size()) { md.stripeStats[first_stripe + stripe_id].colStats[1 + col_idx] = std::move(column_stats[idx]); } } } } if (ff.headerLength == 0) { // First call ff.headerLength = std::strlen(MAGIC); ff.rowIndexStride = row_index_stride_; ff.types.resize(1 + num_columns); ff.types[0].kind = STRUCT; ff.types[0].subtypes.resize(num_columns); ff.types[0].fieldNames.resize(num_columns); for (auto const &column : orc_columns) { ff.types[column.id()].kind = column.orc_kind(); ff.types[0].subtypes[column.index()] = column.id(); ff.types[0].fieldNames[column.index()] = column.orc_name(); } } else { // verify the user isn't passing mismatched tables CUDF_EXPECTS(ff.types.size() == 1 + orc_columns.size(), "Mismatch in table structure between multiple calls to write"); CUDF_EXPECTS(std::all_of(orc_columns.cbegin(), orc_columns.cend(), [&](auto const &col) { return ff.types[1 + col.index()].kind == col.orc_kind(); }), "Mismatch in column types between multiple calls to write"); } ff.stripes.insert(ff.stripes.end(), std::make_move_iterator(stripes.begin()), std::make_move_iterator(stripes.end())); ff.numberOfRows += num_rows; } void writer::impl::close() { if (closed) { return; } closed = true; ProtobufWriter pbw_(&buffer_); PostScript ps; ff.contentLength = out_sink_->bytes_written(); if (user_metadata) { for (auto it = user_metadata->user_data.begin(); it != user_metadata->user_data.end(); it++) { ff.metadata.push_back({it->first, it->second}); } } // Write statistics metadata if (md.stripeStats.size() != 0) { buffer_.resize((compression_kind_ != NONE) ? 3 : 0); pbw_.write(md); add_uncompressed_block_headers(buffer_); ps.metadataLength = buffer_.size(); out_sink_->host_write(buffer_.data(), buffer_.size()); } else { ps.metadataLength = 0; } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); pbw_.write(ff); add_uncompressed_block_headers(buffer_); // Write postscript metadata ps.footerLength = buffer_.size(); ps.compression = compression_kind_; ps.compressionBlockSize = compression_blocksize_; ps.version = {0, 12}; ps.magic = MAGIC; const auto ps_length = static_cast<uint8_t>(pbw_.write(ps)); buffer_.push_back(ps_length); out_sink_->host_write(buffer_.data(), buffer_.size()); out_sink_->flush(); } // Forward to implementation writer::writer(std::unique_ptr<data_sink> sink, orc_writer_options const &options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr)) { } // Forward to implementation writer::writer(std::unique_ptr<data_sink> sink, chunked_orc_writer_options const &options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr)) { } // Destructor within this translation unit writer::~writer() = default; // Forward to implementation void writer::write(table_view const &table) { _impl->write(table); } // Forward to implementation void writer::close() { _impl->close(); } } // namespace orc } // namespace detail } // namespace io } // namespace cudf
1a9b54235000e9f02a5d5c085a2e36abd44d900d.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO ORC writer class implementation */ #include "writer_impl.hpp" #include <io/utilities/column_utils.cuh> #include <cudf/null_mask.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <algorithm> #include <cstring> #include <numeric> #include <utility> namespace cudf { namespace io { namespace detail { namespace orc { using namespace cudf::io::orc; using namespace cudf::io; struct row_group_index_info { int32_t pos = -1; // Position int32_t blk_pos = -1; // Block Position int32_t comp_pos = -1; // Compressed Position int32_t comp_size = -1; // Compressed size }; namespace { /** * @brief Helper for pinned host memory */ template <typename T> using pinned_buffer = std::unique_ptr<T, decltype(&cudaFreeHost)>; /** * @brief Function that translates GDF compression to ORC compression */ orc::CompressionKind to_orc_compression(compression_type compression) { switch (compression) { case compression_type::AUTO: case compression_type::SNAPPY: return orc::CompressionKind::SNAPPY; case compression_type::NONE: return orc::CompressionKind::NONE; default: CUDF_EXPECTS(false, "Unsupported compression type"); return orc::CompressionKind::NONE; } } /** * @brief Function that translates GDF dtype to ORC datatype */ constexpr orc::TypeKind to_orc_type(cudf::type_id id) { switch (id) { case cudf::type_id::INT8: return TypeKind::BYTE; case cudf::type_id::INT16: return TypeKind::SHORT; case cudf::type_id::INT32: return TypeKind::INT; case cudf::type_id::INT64: return TypeKind::LONG; case cudf::type_id::FLOAT32: return TypeKind::FLOAT; case cudf::type_id::FLOAT64: return TypeKind::DOUBLE; case cudf::type_id::BOOL8: return TypeKind::BOOLEAN; case cudf::type_id::TIMESTAMP_DAYS: return TypeKind::DATE; case cudf::type_id::TIMESTAMP_SECONDS: case cudf::type_id::TIMESTAMP_MICROSECONDS: case cudf::type_id::TIMESTAMP_MILLISECONDS: case cudf::type_id::TIMESTAMP_NANOSECONDS: return TypeKind::TIMESTAMP; case cudf::type_id::STRING: return TypeKind::STRING; default: return TypeKind::INVALID_TYPE_KIND; } } /** * @brief Function that translates time unit to nanoscale multiple */ template <typename T> constexpr T to_clockscale(cudf::type_id timestamp_id) { switch (timestamp_id) { case cudf::type_id::TIMESTAMP_SECONDS: return 9; case cudf::type_id::TIMESTAMP_MILLISECONDS: return 6; case cudf::type_id::TIMESTAMP_MICROSECONDS: return 3; case cudf::type_id::TIMESTAMP_NANOSECONDS: default: return 0; } } } // namespace /** * @brief Helper class that adds ORC-specific column info */ class orc_column_view { public: /** * @brief Constructor that extracts out the string position + length pairs * for building dictionaries for string columns */ explicit orc_column_view(size_t index, size_t str_id, column_view const &col, const table_metadata *metadata, rmm::cuda_stream_view stream) : _index(index), _str_id(str_id), _is_string_type(col.type().id() == type_id::STRING), _type_width(_is_string_type ? 0 : cudf::size_of(col.type())), _data_count(col.size()), _null_count(col.null_count()), _nulls(col.null_mask()), _clockscale(to_clockscale<uint8_t>(col.type().id())), _type_kind(to_orc_type(col.type().id())) { // Generating default name if name isn't present in metadata if (metadata && _index < metadata->column_names.size()) { _name = metadata->column_names[_index]; } else { _name = "_col" + std::to_string(_index); } } auto is_string() const noexcept { return _is_string_type; } void set_dict_stride(size_t stride) noexcept { dict_stride = stride; } auto get_dict_stride() const noexcept { return dict_stride; } /** * @brief Function that associates an existing dictionary chunk allocation */ void attach_dict_chunk(gpu::DictionaryChunk *host_dict, gpu::DictionaryChunk *dev_dict) { dict = host_dict; d_dict = dev_dict; } auto host_dict_chunk(size_t rowgroup) const { assert(_is_string_type); return &dict[rowgroup * dict_stride + _str_id]; } auto device_dict_chunk() const { return d_dict; } /** * @brief Function that associates an existing stripe dictionary allocation */ void attach_stripe_dict(gpu::StripeDictionary *host_stripe_dict, gpu::StripeDictionary *dev_stripe_dict) { stripe_dict = host_stripe_dict; d_stripe_dict = dev_stripe_dict; } auto host_stripe_dict(size_t stripe) const { assert(_is_string_type); return &stripe_dict[stripe * dict_stride + _str_id]; } auto device_stripe_dict() const { return d_stripe_dict; } // Index in the table auto index() const noexcept { return _index; } // Id in the ORC file auto id() const noexcept { return _index + 1; } size_t type_width() const noexcept { return _type_width; } size_t data_count() const noexcept { return _data_count; } size_t null_count() const noexcept { return _null_count; } bool nullable() const noexcept { return (_nulls != nullptr); } uint32_t const *nulls() const noexcept { return _nulls; } uint8_t clockscale() const noexcept { return _clockscale; } void set_orc_encoding(ColumnEncodingKind e) { _encoding_kind = e; } auto orc_kind() const noexcept { return _type_kind; } auto orc_encoding() const noexcept { return _encoding_kind; } auto orc_name() const noexcept { return _name; } private: // Identifier within set of columns and string columns, respectively uint32_t _index = 0; uint32_t _str_id = 0; bool _is_string_type = false; size_t _type_width = 0; size_t _data_count = 0; size_t _null_count = 0; uint32_t const *_nulls = nullptr; uint8_t _clockscale = 0; // ORC-related members std::string _name{}; TypeKind _type_kind; ColumnEncodingKind _encoding_kind; // String dictionary-related members size_t dict_stride = 0; gpu::DictionaryChunk const *dict = nullptr; gpu::StripeDictionary const *stripe_dict = nullptr; gpu::DictionaryChunk *d_dict = nullptr; gpu::StripeDictionary *d_stripe_dict = nullptr; }; std::vector<stripe_rowgroups> writer::impl::gather_stripe_info( host_span<orc_column_view const> columns, size_t num_rowgroups) { auto const is_any_column_string = std::any_of(columns.begin(), columns.end(), [](auto const &col) { return col.is_string(); }); // Apply rows per stripe limit to limit string dictionaries size_t const max_stripe_rows = is_any_column_string ? 1000000 : 5000000; std::vector<stripe_rowgroups> infos; for (size_t rowgroup = 0, stripe_start = 0, stripe_size = 0; rowgroup < num_rowgroups; ++rowgroup) { auto const rowgroup_size = std::accumulate(columns.begin(), columns.end(), 0ul, [&](size_t total_size, auto const &col) { if (col.is_string()) { const auto dt = col.host_dict_chunk(rowgroup); return total_size + row_index_stride_ + dt->string_char_count; } else { return total_size + col.type_width() * row_index_stride_; } }); if ((rowgroup > stripe_start) && (stripe_size + rowgroup_size > max_stripe_size_ || (rowgroup + 1 - stripe_start) * row_index_stride_ > max_stripe_rows)) { infos.emplace_back(infos.size(), stripe_start, rowgroup - stripe_start); stripe_start = rowgroup; stripe_size = 0; } stripe_size += rowgroup_size; if (rowgroup + 1 == num_rowgroups) { infos.emplace_back(infos.size(), stripe_start, num_rowgroups - stripe_start); } } return infos; } void writer::impl::init_dictionaries(const table_device_view &view, orc_column_view *columns, std::vector<int> const &str_col_ids, device_span<size_type> d_str_col_ids, uint32_t *dict_data, uint32_t *dict_index, hostdevice_vector<gpu::DictionaryChunk> *dict) { const size_t num_rowgroups = dict->size() / str_col_ids.size(); // Setup per-rowgroup dictionary indexes for each dictionary-aware column for (size_t i = 0; i < str_col_ids.size(); ++i) { auto &str_column = columns[str_col_ids[i]]; str_column.set_dict_stride(str_col_ids.size()); str_column.attach_dict_chunk(dict->host_ptr(), dict->device_ptr()); } gpu::InitDictionaryIndices(view, dict->device_ptr(), dict_data, dict_index, row_index_stride_, d_str_col_ids.data(), d_str_col_ids.size(), num_rowgroups, stream); dict->device_to_host(stream, true); } void writer::impl::build_dictionaries(orc_column_view *columns, std::vector<int> const &str_col_ids, host_span<stripe_rowgroups const> stripe_bounds, hostdevice_vector<gpu::DictionaryChunk> const &dict, uint32_t *dict_index, hostdevice_vector<gpu::StripeDictionary> &stripe_dict) { const auto num_rowgroups = dict.size() / str_col_ids.size(); for (size_t col_idx = 0; col_idx < str_col_ids.size(); ++col_idx) { auto &str_column = columns[str_col_ids[col_idx]]; str_column.attach_stripe_dict(stripe_dict.host_ptr(), stripe_dict.device_ptr()); for (auto const &stripe : stripe_bounds) { auto &sd = stripe_dict[stripe.id * str_col_ids.size() + col_idx]; sd.dict_data = str_column.host_dict_chunk(stripe.first)->dict_data; sd.dict_index = dict_index + col_idx * str_column.data_count(); // Indexed by abs row sd.column_id = str_col_ids[col_idx]; sd.start_chunk = stripe.first; sd.num_chunks = stripe.size; sd.dict_char_count = 0; sd.num_strings = std::accumulate(stripe.cbegin(), stripe.cend(), 0, [&](auto dt_str_cnt, auto rg_idx) { const auto &dt = dict[rg_idx * str_col_ids.size() + col_idx]; return dt_str_cnt + dt.num_dict_strings; }); sd.leaf_column = dict[col_idx].leaf_column; } if (enable_dictionary_) { struct string_column_cost { size_t direct = 0; size_t dictionary = 0; }; auto const col_cost = std::accumulate(stripe_bounds.front().cbegin(), stripe_bounds.back().cend(), string_column_cost{}, [&](auto cost, auto rg_idx) -> string_column_cost { const auto &dt = dict[rg_idx * str_col_ids.size() + col_idx]; return {cost.direct + dt.string_char_count, cost.dictionary + dt.dict_char_count + dt.num_dict_strings}; }); // Disable dictionary if it does not reduce the output size if (col_cost.dictionary >= col_cost.direct) { for (auto const &stripe : stripe_bounds) { stripe_dict[stripe.id * str_col_ids.size() + col_idx].dict_data = nullptr; } } } } stripe_dict.host_to_device(stream); gpu::BuildStripeDictionaries(stripe_dict.device_ptr(), stripe_dict.host_ptr(), dict.device_ptr(), stripe_bounds.size(), num_rowgroups, str_col_ids.size(), stream); stripe_dict.device_to_host(stream, true); } orc_streams writer::impl::create_streams(host_span<orc_column_view> columns, host_span<stripe_rowgroups const> stripe_bounds) { // 'column 0' row index stream std::vector<Stream> streams{{ROW_INDEX, 0}}; // TODO: Separate index and data streams? // First n + 1 streams are row index streams streams.reserve(columns.size() + 1); std::transform(columns.begin(), columns.end(), std::back_inserter(streams), [](auto const &col) { return Stream{ROW_INDEX, col.id()}; }); std::vector<int32_t> ids(columns.size() * gpu::CI_NUM_STREAMS, -1); for (auto &column : columns) { TypeKind kind = column.orc_kind(); StreamKind data_kind = DATA; StreamKind data2_kind = LENGTH; ColumnEncodingKind encoding_kind = DIRECT; int64_t present_stream_size = 0; int64_t data_stream_size = 0; int64_t data2_stream_size = 0; int64_t dict_stream_size = 0; auto const is_nullable = [&]() { if (single_write_mode) { return column.nullable(); } else { return (column.index() < user_metadata_with_nullability.column_nullable.size()) ? user_metadata_with_nullability.column_nullable[column.index()] : true; } }(); if (is_nullable) { present_stream_size = ((row_index_stride_ + 7) >> 3); present_stream_size += (present_stream_size + 0x7f) >> 7; } switch (kind) { case TypeKind::BOOLEAN: data_stream_size = div_rowgroups_by<int64_t>(1024) * (128 + 1); encoding_kind = DIRECT; break; case TypeKind::BYTE: data_stream_size = div_rowgroups_by<int64_t>(128) * (128 + 1); encoding_kind = DIRECT; break; case TypeKind::SHORT: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 2 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::FLOAT: // Pass through if no nulls (no RLE encoding for floating point) data_stream_size = (column.null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 4 + 2) : INT64_C(-1); encoding_kind = DIRECT; break; case TypeKind::INT: case TypeKind::DATE: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::DOUBLE: // Pass through if no nulls (no RLE encoding for floating point) data_stream_size = (column.null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 8 + 2) : INT64_C(-1); encoding_kind = DIRECT; break; case TypeKind::LONG: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 8 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::STRING: { bool enable_dict = enable_dictionary_; size_t dict_data_size = 0; size_t dict_strings = 0; size_t dict_lengths_div512 = 0; for (auto const &stripe : stripe_bounds) { const auto sd = column.host_stripe_dict(stripe.id); enable_dict = (enable_dict && sd->dict_data != nullptr); if (enable_dict) { dict_strings += sd->num_strings; dict_lengths_div512 += (sd->num_strings + 0x1ff) >> 9; dict_data_size += sd->dict_char_count; } } auto const direct_data_size = std::accumulate(stripe_bounds.front().cbegin(), stripe_bounds.back().cend(), size_t{0}, [&](auto data_size, auto rg_idx) { return data_size + column.host_dict_chunk(rg_idx)->string_char_count; }); if (enable_dict) { uint32_t dict_bits = 0; for (dict_bits = 1; dict_bits < 32; dict_bits <<= 1) { if (dict_strings <= (1ull << dict_bits)) break; } const auto valid_count = column.data_count() - column.null_count(); dict_data_size += (dict_bits * valid_count + 7) >> 3; } // Decide between direct or dictionary encoding if (enable_dict && dict_data_size < direct_data_size) { data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); data2_stream_size = dict_lengths_div512 * (512 * 4 + 2); dict_stream_size = std::max<size_t>(dict_data_size, 1); encoding_kind = DICTIONARY_V2; } else { data_stream_size = std::max<size_t>(direct_data_size, 1); data2_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); encoding_kind = DIRECT_V2; } break; } case TypeKind::TIMESTAMP: data_stream_size = ((row_index_stride_ + 0x1ff) >> 9) * (512 * 4 + 2); data2_stream_size = data_stream_size; data2_kind = SECONDARY; encoding_kind = DIRECT_V2; break; default: CUDF_FAIL("Unsupported ORC type kind"); } // Initialize the column's metadata (this is the only reason columns is in/out param) column.set_orc_encoding(encoding_kind); // Initialize the column's data stream(s) const auto base = column.index() * gpu::CI_NUM_STREAMS; if (present_stream_size != 0) { auto len = static_cast<uint64_t>(present_stream_size); ids[base + gpu::CI_PRESENT] = streams.size(); streams.push_back(orc::Stream{PRESENT, column.id(), len}); } if (data_stream_size != 0) { auto len = static_cast<uint64_t>(std::max<int64_t>(data_stream_size, 0)); ids[base + gpu::CI_DATA] = streams.size(); streams.push_back(orc::Stream{data_kind, column.id(), len}); } if (data2_stream_size != 0) { auto len = static_cast<uint64_t>(std::max<int64_t>(data2_stream_size, 0)); ids[base + gpu::CI_DATA2] = streams.size(); streams.push_back(orc::Stream{data2_kind, column.id(), len}); } if (dict_stream_size != 0) { auto len = static_cast<uint64_t>(dict_stream_size); ids[base + gpu::CI_DICTIONARY] = streams.size(); streams.push_back(orc::Stream{DICTIONARY_DATA, column.id(), len}); } } return {std::move(streams), std::move(ids)}; } orc_streams::orc_stream_offsets orc_streams::compute_offsets( host_span<orc_column_view const> columns, size_t num_rowgroups) const { std::vector<size_t> strm_offsets(streams.size()); size_t str_data_size = 0; size_t rle_data_size = 0; for (size_t i = 0; i < streams.size(); ++i) { const auto &stream = streams[i]; auto const is_str_data = [&]() { // First stream is an index stream if (!stream.column_index().has_value()) return false; auto const &column = columns[stream.column_index().value()]; if (column.orc_kind() != TypeKind::STRING) return false; // Dictionary encoded string column dictionary characters or // directly encoded string column characters return ((stream.kind == DICTIONARY_DATA && column.orc_encoding() == DICTIONARY_V2) || (stream.kind == DATA && column.orc_encoding() == DIRECT_V2)); }(); if (is_str_data) { strm_offsets[i] = str_data_size; str_data_size += stream.length; } else { strm_offsets[i] = rle_data_size; rle_data_size += (stream.length * num_rowgroups + 7) & ~7; } } str_data_size = (str_data_size + 7) & ~7; return {std::move(strm_offsets), str_data_size, rle_data_size}; } struct segmented_valid_cnt_input { bitmask_type const *mask; std::vector<size_type> indices; }; encoded_data writer::impl::encode_columns(const table_device_view &view, host_span<orc_column_view const> columns, std::vector<int> const &str_col_ids, rmm::device_uvector<uint32_t> &&dict_data, rmm::device_uvector<uint32_t> &&dict_index, host_span<stripe_rowgroups const> stripe_bounds, orc_streams const &streams) { auto const num_columns = columns.size(); auto const num_rowgroups = stripes_size(stripe_bounds); hostdevice_2dvector<gpu::EncChunk> chunks(num_columns, num_rowgroups, stream); hostdevice_2dvector<gpu::encoder_chunk_streams> chunk_streams(num_columns, num_rowgroups, stream); auto const stream_offsets = streams.compute_offsets(columns, num_rowgroups); rmm::device_uvector<uint8_t> encoded_data(stream_offsets.data_size(), stream); // Initialize column chunks' descriptions std::map<size_type, segmented_valid_cnt_input> validity_check_inputs; for (auto const &column : columns) { for (auto const &stripe : stripe_bounds) { for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) { auto const rg_idx = *rg_idx_it; auto &ck = chunks[column.index()][rg_idx]; ck.start_row = (rg_idx * row_index_stride_); ck.num_rows = std::min<uint32_t>(row_index_stride_, column.data_count() - ck.start_row); ck.encoding_kind = column.orc_encoding(); ck.type_kind = column.orc_kind(); if (ck.type_kind == TypeKind::STRING) { ck.dict_index = (ck.encoding_kind == DICTIONARY_V2) ? column.host_stripe_dict(stripe.id)->dict_index : nullptr; ck.dtype_len = 1; } else { ck.dtype_len = column.type_width(); } ck.scale = column.clockscale(); // Only need to check row groups that end within the stripe } } } auto validity_check_indices = [&](size_t col_idx) { std::vector<size_type> indices; for (auto const &stripe : stripe_bounds) { for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend() - 1; ++rg_idx_it) { auto const &chunk = chunks[col_idx][*rg_idx_it]; indices.push_back(chunk.start_row); indices.push_back(chunk.start_row + chunk.num_rows); } } return indices; }; for (auto const &column : columns) { if (column.orc_kind() == TypeKind::BOOLEAN && column.nullable()) { validity_check_inputs[column.index()] = {column.nulls(), validity_check_indices(column.index())}; } } for (auto &cnt_in : validity_check_inputs) { auto const valid_counts = segmented_count_set_bits(cnt_in.second.mask, cnt_in.second.indices); CUDF_EXPECTS( std::none_of(valid_counts.cbegin(), valid_counts.cend(), [](auto valid_count) { return valid_count % 8; }), "There's currently a bug in encoding boolean columns. Suggested workaround is to convert " "to int8 type." " Please see https://github.com/rapidsai/cudf/issues/6763 for more information."); } for (size_t col_idx = 0; col_idx < num_columns; col_idx++) { auto const &column = columns[col_idx]; auto col_streams = chunk_streams[col_idx]; for (auto const &stripe : stripe_bounds) { for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) { auto const rg_idx = *rg_idx_it; auto const &ck = chunks[col_idx][rg_idx]; auto &strm = col_streams[rg_idx]; for (int strm_type = 0; strm_type < gpu::CI_NUM_STREAMS; ++strm_type) { auto const strm_id = streams.id(col_idx * gpu::CI_NUM_STREAMS + strm_type); strm.ids[strm_type] = strm_id; if (strm_id >= 0) { if ((strm_type == gpu::CI_DICTIONARY) || (strm_type == gpu::CI_DATA2 && ck.encoding_kind == DICTIONARY_V2)) { if (rg_idx_it == stripe.cbegin()) { const int32_t dict_stride = column.get_dict_stride(); const auto stripe_dict = column.host_stripe_dict(stripe.id); strm.lengths[strm_type] = (strm_type == gpu::CI_DICTIONARY) ? stripe_dict->dict_char_count : (((stripe_dict->num_strings + 0x1ff) >> 9) * (512 * 4 + 2)); if (stripe.id == 0) { strm.data_ptrs[strm_type] = encoded_data.data() + stream_offsets.offsets[strm_id]; } else { auto const &strm_up = col_streams[stripe_dict[-dict_stride].start_chunk]; strm.data_ptrs[strm_type] = strm_up.data_ptrs[strm_type] + strm_up.lengths[strm_type]; } } else { strm.lengths[strm_type] = 0; strm.data_ptrs[strm_type] = col_streams[rg_idx - 1].data_ptrs[strm_type]; } } else if (strm_type == gpu::CI_DATA && ck.type_kind == TypeKind::STRING && ck.encoding_kind == DIRECT_V2) { strm.lengths[strm_type] = column.host_dict_chunk(rg_idx)->string_char_count; strm.data_ptrs[strm_type] = (rg_idx == 0) ? encoded_data.data() + stream_offsets.offsets[strm_id] : (col_streams[rg_idx - 1].data_ptrs[strm_type] + col_streams[rg_idx - 1].lengths[strm_type]); } else if (strm_type == gpu::CI_DATA && streams[strm_id].length == 0 && (ck.type_kind == DOUBLE || ck.type_kind == FLOAT)) { // Pass-through strm.lengths[strm_type] = ck.num_rows * ck.dtype_len; strm.data_ptrs[strm_type] = nullptr; } else { strm.lengths[strm_type] = streams[strm_id].length; strm.data_ptrs[strm_type] = encoded_data.data() + stream_offsets.str_data_size + stream_offsets.offsets[strm_id] + streams[strm_id].length * rg_idx; } } else { strm.lengths[strm_type] = 0; strm.data_ptrs[strm_type] = nullptr; } } } } } chunks.host_to_device(stream); chunk_streams.host_to_device(stream); gpu::set_chunk_columns(view, chunks, stream); if (!str_col_ids.empty()) { auto d_stripe_dict = columns[str_col_ids[0]].device_stripe_dict(); gpu::EncodeStripeDictionaries( d_stripe_dict, chunks, str_col_ids.size(), stripe_bounds.size(), chunk_streams, stream); } gpu::EncodeOrcColumnData(chunks, chunk_streams, stream); dict_data.release(); dict_index.release(); stream.synchronize(); return {std::move(encoded_data), std::move(chunk_streams)}; } std::vector<StripeInformation> writer::impl::gather_stripes( size_t num_rows, size_t num_index_streams, host_span<stripe_rowgroups const> stripe_bounds, hostdevice_2dvector<gpu::encoder_chunk_streams> *enc_streams, hostdevice_2dvector<gpu::StripeStream> *strm_desc) { std::vector<StripeInformation> stripes(stripe_bounds.size()); for (auto const &stripe : stripe_bounds) { for (size_t col_idx = 0; col_idx < enc_streams->size().first; col_idx++) { const auto &strm = (*enc_streams)[col_idx][stripe.first]; // Assign stream data of column data stream(s) for (int k = 0; k < gpu::CI_INDEX; k++) { const auto stream_id = strm.ids[k]; if (stream_id != -1) { auto *ss = &(*strm_desc)[stripe.id][stream_id - num_index_streams]; ss->stream_size = 0; ss->first_chunk_id = stripe.first; ss->num_chunks = stripe.size; ss->column_id = col_idx; ss->stream_type = k; } } } auto const stripe_group_end = *stripe.cend(); auto const stripe_end = std::min(stripe_group_end * row_index_stride_, num_rows); stripes[stripe.id].numberOfRows = stripe_end - stripe.first * row_index_stride_; } strm_desc->host_to_device(stream); gpu::CompactOrcDataStreams(*strm_desc, *enc_streams, stream); strm_desc->device_to_host(stream); enc_streams->device_to_host(stream, true); return stripes; } std::vector<std::vector<uint8_t>> writer::impl::gather_statistic_blobs( const table_device_view &table, host_span<orc_column_view const> columns, host_span<stripe_rowgroups const> stripe_bounds) { auto const num_rowgroups = stripes_size(stripe_bounds); size_t num_stat_blobs = (1 + stripe_bounds.size()) * columns.size(); size_t num_chunks = num_rowgroups * columns.size(); std::vector<std::vector<uint8_t>> stat_blobs(num_stat_blobs); hostdevice_vector<stats_column_desc> stat_desc(columns.size(), stream); hostdevice_vector<statistics_merge_group> stat_merge(num_stat_blobs, stream); rmm::device_uvector<statistics_chunk> stat_chunks(num_chunks + num_stat_blobs, stream); rmm::device_uvector<statistics_group> stat_groups(num_chunks, stream); for (auto const &column : columns) { stats_column_desc *desc = &stat_desc[column.index()]; switch (column.orc_kind()) { case TypeKind::BYTE: desc->stats_dtype = dtype_int8; break; case TypeKind::SHORT: desc->stats_dtype = dtype_int16; break; case TypeKind::INT: desc->stats_dtype = dtype_int32; break; case TypeKind::LONG: desc->stats_dtype = dtype_int64; break; case TypeKind::FLOAT: desc->stats_dtype = dtype_float32; break; case TypeKind::DOUBLE: desc->stats_dtype = dtype_float64; break; case TypeKind::BOOLEAN: desc->stats_dtype = dtype_bool; break; case TypeKind::DATE: desc->stats_dtype = dtype_int32; break; case TypeKind::TIMESTAMP: desc->stats_dtype = dtype_timestamp64; break; case TypeKind::STRING: desc->stats_dtype = dtype_string; break; default: desc->stats_dtype = dtype_none; break; } desc->num_rows = column.data_count(); desc->num_values = column.data_count(); if (desc->stats_dtype == dtype_timestamp64) { // Timestamp statistics are in milliseconds switch (column.clockscale()) { case 9: desc->ts_scale = 1000; break; case 6: desc->ts_scale = 0; break; case 3: desc->ts_scale = -1000; break; case 0: desc->ts_scale = -1000000; break; default: desc->ts_scale = 0; break; } } else { desc->ts_scale = 0; } for (auto const &stripe : stripe_bounds) { auto grp = &stat_merge[column.index() * stripe_bounds.size() + stripe.id]; grp->col = stat_desc.device_ptr(column.index()); grp->start_chunk = static_cast<uint32_t>(column.index() * num_rowgroups + stripe.first); grp->num_chunks = stripe.size; } statistics_merge_group *col_stats = &stat_merge[stripe_bounds.size() * columns.size() + column.index()]; col_stats->col = stat_desc.device_ptr(column.index()); col_stats->start_chunk = static_cast<uint32_t>(column.index() * stripe_bounds.size()); col_stats->num_chunks = static_cast<uint32_t>(stripe_bounds.size()); } stat_desc.host_to_device(stream); stat_merge.host_to_device(stream); rmm::device_uvector<column_device_view> leaf_column_views = create_leaf_column_device_views<stats_column_desc>(stat_desc, table, stream); gpu::orc_init_statistics_groups(stat_groups.data(), stat_desc.device_ptr(), columns.size(), num_rowgroups, row_index_stride_, stream); GatherColumnStatistics(stat_chunks.data(), stat_groups.data(), num_chunks, stream); MergeColumnStatistics(stat_chunks.data() + num_chunks, stat_chunks.data(), stat_merge.device_ptr(), stripe_bounds.size() * columns.size(), stream); MergeColumnStatistics(stat_chunks.data() + num_chunks + stripe_bounds.size() * columns.size(), stat_chunks.data() + num_chunks, stat_merge.device_ptr(stripe_bounds.size() * columns.size()), columns.size(), stream); gpu::orc_init_statistics_buffersize( stat_merge.device_ptr(), stat_chunks.data() + num_chunks, num_stat_blobs, stream); stat_merge.device_to_host(stream, true); hostdevice_vector<uint8_t> blobs( stat_merge[num_stat_blobs - 1].start_chunk + stat_merge[num_stat_blobs - 1].num_chunks, stream); gpu::orc_encode_statistics(blobs.device_ptr(), stat_merge.device_ptr(), stat_chunks.data() + num_chunks, num_stat_blobs, stream); stat_merge.device_to_host(stream); blobs.device_to_host(stream, true); for (size_t i = 0; i < num_stat_blobs; i++) { const uint8_t *stat_begin = blobs.host_ptr(stat_merge[i].start_chunk); const uint8_t *stat_end = stat_begin + stat_merge[i].num_chunks; stat_blobs[i].assign(stat_begin, stat_end); } return stat_blobs; } void writer::impl::write_index_stream(int32_t stripe_id, int32_t stream_id, host_span<orc_column_view const> columns, stripe_rowgroups const &rowgroups_range, host_2dspan<gpu::encoder_chunk_streams const> enc_streams, host_2dspan<gpu::StripeStream const> strm_desc, host_span<gpu_inflate_status_s const> comp_out, StripeInformation *stripe, orc_streams *streams, ProtobufWriter *pbw) { row_group_index_info present; row_group_index_info data; row_group_index_info data2; auto kind = TypeKind::STRUCT; auto const column_id = stream_id - 1; auto find_record = [=, &strm_desc](gpu::encoder_chunk_streams const &stream, gpu::StreamIndexType type) { row_group_index_info record; if (stream.ids[type] > 0) { record.pos = 0; if (compression_kind_ != NONE) { auto const &ss = strm_desc[stripe_id][stream.ids[type] - (columns.size() + 1)]; record.blk_pos = ss.first_block; record.comp_pos = 0; record.comp_size = ss.stream_size; } } return record; }; auto scan_record = [=, &comp_out](gpu::encoder_chunk_streams const &stream, gpu::StreamIndexType type, row_group_index_info &record) { if (record.pos >= 0) { record.pos += stream.lengths[type]; while ((record.pos >= 0) && (record.blk_pos >= 0) && (static_cast<size_t>(record.pos) >= compression_blocksize_) && (record.comp_pos + 3 + comp_out[record.blk_pos].bytes_written < static_cast<size_t>(record.comp_size))) { record.pos -= compression_blocksize_; record.comp_pos += 3 + comp_out[record.blk_pos].bytes_written; record.blk_pos += 1; } } }; // TBD: Not sure we need an empty index stream for column 0 if (stream_id != 0) { const auto &strm = enc_streams[column_id][0]; present = find_record(strm, gpu::CI_PRESENT); data = find_record(strm, gpu::CI_DATA); data2 = find_record(strm, gpu::CI_DATA2); // Change string dictionary to int from index point of view kind = columns[column_id].orc_kind(); if (kind == TypeKind::STRING && columns[column_id].orc_encoding() == DICTIONARY_V2) { kind = TypeKind::INT; } } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); // Add row index entries std::for_each(rowgroups_range.cbegin(), rowgroups_range.cend(), [&](auto rowgroup) { pbw->put_row_index_entry( present.comp_pos, present.pos, data.comp_pos, data.pos, data2.comp_pos, data2.pos, kind); if (stream_id != 0) { const auto &strm = enc_streams[column_id][rowgroup]; scan_record(strm, gpu::CI_PRESENT, present); scan_record(strm, gpu::CI_DATA, data); scan_record(strm, gpu::CI_DATA2, data2); } }); (*streams)[stream_id].length = buffer_.size(); if (compression_kind_ != NONE) { uint32_t uncomp_ix_len = (uint32_t)((*streams)[stream_id].length - 3) * 2 + 1; buffer_[0] = static_cast<uint8_t>(uncomp_ix_len >> 0); buffer_[1] = static_cast<uint8_t>(uncomp_ix_len >> 8); buffer_[2] = static_cast<uint8_t>(uncomp_ix_len >> 16); } out_sink_->host_write(buffer_.data(), buffer_.size()); stripe->indexLength += buffer_.size(); } void writer::impl::write_data_stream(gpu::StripeStream const &strm_desc, gpu::encoder_chunk_streams const &enc_stream, uint8_t const *compressed_data, uint8_t *stream_out, StripeInformation *stripe, orc_streams *streams) { const auto length = strm_desc.stream_size; (*streams)[enc_stream.ids[strm_desc.stream_type]].length = length; if (length == 0) { return; } const auto *stream_in = (compression_kind_ == NONE) ? enc_stream.data_ptrs[strm_desc.stream_type] : (compressed_data + strm_desc.bfr_offset); if (out_sink_->is_device_write_preferred(length)) { out_sink_->device_write(stream_in, length, stream); } else { CUDA_TRY( cudaMemcpyAsync(stream_out, stream_in, length, cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); out_sink_->host_write(stream_out, length); } stripe->dataLength += length; } void writer::impl::add_uncompressed_block_headers(std::vector<uint8_t> &v) { if (compression_kind_ != NONE) { size_t uncomp_len = v.size() - 3, pos = 0, block_len; while (uncomp_len > compression_blocksize_) { block_len = compression_blocksize_ * 2 + 1; v[pos + 0] = static_cast<uint8_t>(block_len >> 0); v[pos + 1] = static_cast<uint8_t>(block_len >> 8); v[pos + 2] = static_cast<uint8_t>(block_len >> 16); pos += 3 + compression_blocksize_; v.insert(v.begin() + pos, 3, 0); uncomp_len -= compression_blocksize_; } block_len = uncomp_len * 2 + 1; v[pos + 0] = static_cast<uint8_t>(block_len >> 0); v[pos + 1] = static_cast<uint8_t>(block_len >> 8); v[pos + 2] = static_cast<uint8_t>(block_len >> 16); } } writer::impl::impl(std::unique_ptr<data_sink> sink, orc_writer_options const &options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) : compression_kind_(to_orc_compression(options.get_compression())), enable_statistics_(options.enable_statistics()), out_sink_(std::move(sink)), single_write_mode(mode == SingleWriteMode::YES), user_metadata(options.get_metadata()), stream(stream), _mr(mr) { init_state(); } writer::impl::impl(std::unique_ptr<data_sink> sink, chunked_orc_writer_options const &options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) : compression_kind_(to_orc_compression(options.get_compression())), enable_statistics_(options.enable_statistics()), out_sink_(std::move(sink)), single_write_mode(mode == SingleWriteMode::YES), stream(stream), _mr(mr) { if (options.get_metadata() != nullptr) { user_metadata_with_nullability = *options.get_metadata(); user_metadata = &user_metadata_with_nullability; } init_state(); } writer::impl::~impl() { close(); } void writer::impl::init_state() { // Write file header out_sink_->host_write(MAGIC, std::strlen(MAGIC)); } rmm::device_uvector<size_type> get_string_column_ids(const table_device_view &view, rmm::cuda_stream_view stream) { rmm::device_uvector<size_type> string_column_ids(view.num_columns(), stream); auto iter = thrust::make_counting_iterator<size_type>(0); auto end_iter = thrust::copy_if(rmm::exec_policy(stream), iter, iter + view.num_columns(), string_column_ids.begin(), [view] __device__(size_type index) { return (view.column(index).type().id() == type_id::STRING); }); string_column_ids.resize(end_iter - string_column_ids.begin(), stream); return string_column_ids; } void writer::impl::write(table_view const &table) { CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed"); auto const num_columns = table.num_columns(); auto const num_rows = table.num_rows(); if (user_metadata_with_nullability.column_nullable.size() > 0) { CUDF_EXPECTS( user_metadata_with_nullability.column_nullable.size() == static_cast<size_t>(num_columns), "When passing values in user_metadata_with_nullability, data for all columns must " "be specified"); } auto device_columns = table_device_view::create(table, stream); auto string_column_ids = get_string_column_ids(*device_columns, stream); // Wrapper around cudf columns to attach ORC-specific type info std::vector<orc_column_view> orc_columns; orc_columns.reserve(num_columns); // Mapping of string columns for quick look-up std::vector<int> str_col_ids; for (auto const &column : table) { auto const current_id = orc_columns.size(); auto const current_str_id = str_col_ids.size(); orc_columns.emplace_back(current_id, current_str_id, column, user_metadata, stream); if (orc_columns.back().is_string()) { str_col_ids.push_back(current_id); } } rmm::device_uvector<uint32_t> dict_data(str_col_ids.size() * num_rows, stream); rmm::device_uvector<uint32_t> dict_index(str_col_ids.size() * num_rows, stream); // Build per-column dictionary indices const auto num_rowgroups = div_by_rowgroups<size_t>(num_rows); const auto num_dict_chunks = num_rowgroups * str_col_ids.size(); hostdevice_vector<gpu::DictionaryChunk> dict(num_dict_chunks, stream); if (!str_col_ids.empty()) { init_dictionaries(*device_columns, orc_columns.data(), str_col_ids, string_column_ids, dict_data.data(), dict_index.data(), &dict); } // Decide stripe boundaries early on, based on uncompressed size auto const stripe_bounds = gather_stripe_info(orc_columns, num_rowgroups); // Build stripe-level dictionaries const auto num_stripe_dict = stripe_bounds.size() * str_col_ids.size(); hostdevice_vector<gpu::StripeDictionary> stripe_dict(num_stripe_dict, stream); if (!str_col_ids.empty()) { build_dictionaries( orc_columns.data(), str_col_ids, stripe_bounds, dict, dict_index.data(), stripe_dict); } auto streams = create_streams(orc_columns, stripe_bounds); auto enc_data = encode_columns(*device_columns, orc_columns, str_col_ids, std::move(dict_data), std::move(dict_index), stripe_bounds, streams); // Assemble individual disparate column chunks into contiguous data streams const auto num_index_streams = (num_columns + 1); const auto num_data_streams = streams.size() - num_index_streams; hostdevice_2dvector<gpu::StripeStream> strm_descs(stripe_bounds.size(), num_data_streams, stream); auto stripes = gather_stripes(num_rows, num_index_streams, stripe_bounds, &enc_data.streams, &strm_descs); // Gather column statistics std::vector<std::vector<uint8_t>> column_stats; if (enable_statistics_ && num_columns > 0 && num_rows > 0) { column_stats = gather_statistic_blobs(*device_columns, orc_columns, stripe_bounds); } // Allocate intermediate output stream buffer size_t compressed_bfr_size = 0; size_t num_compressed_blocks = 0; auto stream_output = [&]() { size_t max_stream_size = 0; bool all_device_write = true; for (size_t stripe_id = 0; stripe_id < stripe_bounds.size(); stripe_id++) { for (size_t i = 0; i < num_data_streams; i++) { // TODO range for (at least) gpu::StripeStream *ss = &strm_descs[stripe_id][i]; if (!out_sink_->is_device_write_preferred(ss->stream_size)) { all_device_write = false; } size_t stream_size = ss->stream_size; if (compression_kind_ != NONE) { ss->first_block = num_compressed_blocks; ss->bfr_offset = compressed_bfr_size; auto num_blocks = std::max<uint32_t>( (stream_size + compression_blocksize_ - 1) / compression_blocksize_, 1); stream_size += num_blocks * 3; num_compressed_blocks += num_blocks; compressed_bfr_size += stream_size; } max_stream_size = std::max(max_stream_size, stream_size); } } if (all_device_write) { return pinned_buffer<uint8_t>{nullptr, cudaFreeHost}; } else { return pinned_buffer<uint8_t>{[](size_t size) { uint8_t *ptr = nullptr; CUDA_TRY(cudaMallocHost(&ptr, size)); return ptr; }(max_stream_size), cudaFreeHost}; } }(); // Compress the data streams rmm::device_buffer compressed_data(compressed_bfr_size, stream); hostdevice_vector<gpu_inflate_status_s> comp_out(num_compressed_blocks, stream); hostdevice_vector<gpu_inflate_input_s> comp_in(num_compressed_blocks, stream); if (compression_kind_ != NONE) { strm_descs.host_to_device(stream); gpu::CompressOrcDataStreams(static_cast<uint8_t *>(compressed_data.data()), num_compressed_blocks, compression_kind_, compression_blocksize_, strm_descs, enc_data.streams, comp_in.device_ptr(), comp_out.device_ptr(), stream); strm_descs.device_to_host(stream); comp_out.device_to_host(stream, true); } ProtobufWriter pbw_(&buffer_); // Write stripes for (size_t stripe_id = 0; stripe_id < stripes.size(); ++stripe_id) { auto const &rowgroup_range = stripe_bounds[stripe_id]; auto &stripe = stripes[stripe_id]; stripe.offset = out_sink_->bytes_written(); // Column (skippable) index streams appear at the start of the stripe for (size_type stream_id = 0; stream_id <= num_columns; ++stream_id) { write_index_stream(stripe_id, stream_id, orc_columns, rowgroup_range, enc_data.streams, strm_descs, comp_out, &stripe, &streams, &pbw_); } // Column data consisting one or more separate streams for (auto const &strm_desc : strm_descs[stripe_id]) { write_data_stream(strm_desc, enc_data.streams[strm_desc.column_id][rowgroup_range.first], static_cast<uint8_t *>(compressed_data.data()), stream_output.get(), &stripe, &streams); } // Write stripefooter consisting of stream information StripeFooter sf; sf.streams = streams; sf.columns.resize(num_columns + 1); sf.columns[0].kind = DIRECT; for (size_t i = 1; i < sf.columns.size(); ++i) { sf.columns[i].kind = orc_columns[i - 1].orc_encoding(); sf.columns[i].dictionarySize = (sf.columns[i].kind == DICTIONARY_V2) ? orc_columns[i - 1].host_stripe_dict(stripe_id)->num_strings : 0; if (orc_columns[i - 1].orc_kind() == TIMESTAMP) { sf.writerTimezone = "UTC"; } } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); pbw_.write(sf); stripe.footerLength = buffer_.size(); if (compression_kind_ != NONE) { uint32_t uncomp_sf_len = (stripe.footerLength - 3) * 2 + 1; buffer_[0] = static_cast<uint8_t>(uncomp_sf_len >> 0); buffer_[1] = static_cast<uint8_t>(uncomp_sf_len >> 8); buffer_[2] = static_cast<uint8_t>(uncomp_sf_len >> 16); } out_sink_->host_write(buffer_.data(), buffer_.size()); } if (column_stats.size() != 0) { // File-level statistics // NOTE: Excluded from chunked write mode to avoid the need for merging stats across calls if (single_write_mode) { ff.statistics.resize(1 + num_columns); // First entry contains total number of rows buffer_.resize(0); pbw_.putb(1 * 8 + PB_TYPE_VARINT); pbw_.put_uint(num_rows); ff.statistics[0] = std::move(buffer_); for (int col_idx = 0; col_idx < num_columns; col_idx++) { size_t idx = stripes.size() * num_columns + col_idx; if (idx < column_stats.size()) { ff.statistics[1 + col_idx] = std::move(column_stats[idx]); } } } // Stripe-level statistics size_t first_stripe = md.stripeStats.size(); md.stripeStats.resize(first_stripe + stripes.size()); for (size_t stripe_id = 0; stripe_id < stripes.size(); stripe_id++) { md.stripeStats[first_stripe + stripe_id].colStats.resize(1 + num_columns); buffer_.resize(0); pbw_.putb(1 * 8 + PB_TYPE_VARINT); pbw_.put_uint(stripes[stripe_id].numberOfRows); md.stripeStats[first_stripe + stripe_id].colStats[0] = std::move(buffer_); for (int col_idx = 0; col_idx < num_columns; col_idx++) { size_t idx = stripes.size() * col_idx + stripe_id; if (idx < column_stats.size()) { md.stripeStats[first_stripe + stripe_id].colStats[1 + col_idx] = std::move(column_stats[idx]); } } } } if (ff.headerLength == 0) { // First call ff.headerLength = std::strlen(MAGIC); ff.rowIndexStride = row_index_stride_; ff.types.resize(1 + num_columns); ff.types[0].kind = STRUCT; ff.types[0].subtypes.resize(num_columns); ff.types[0].fieldNames.resize(num_columns); for (auto const &column : orc_columns) { ff.types[column.id()].kind = column.orc_kind(); ff.types[0].subtypes[column.index()] = column.id(); ff.types[0].fieldNames[column.index()] = column.orc_name(); } } else { // verify the user isn't passing mismatched tables CUDF_EXPECTS(ff.types.size() == 1 + orc_columns.size(), "Mismatch in table structure between multiple calls to write"); CUDF_EXPECTS(std::all_of(orc_columns.cbegin(), orc_columns.cend(), [&](auto const &col) { return ff.types[1 + col.index()].kind == col.orc_kind(); }), "Mismatch in column types between multiple calls to write"); } ff.stripes.insert(ff.stripes.end(), std::make_move_iterator(stripes.begin()), std::make_move_iterator(stripes.end())); ff.numberOfRows += num_rows; } void writer::impl::close() { if (closed) { return; } closed = true; ProtobufWriter pbw_(&buffer_); PostScript ps; ff.contentLength = out_sink_->bytes_written(); if (user_metadata) { for (auto it = user_metadata->user_data.begin(); it != user_metadata->user_data.end(); it++) { ff.metadata.push_back({it->first, it->second}); } } // Write statistics metadata if (md.stripeStats.size() != 0) { buffer_.resize((compression_kind_ != NONE) ? 3 : 0); pbw_.write(md); add_uncompressed_block_headers(buffer_); ps.metadataLength = buffer_.size(); out_sink_->host_write(buffer_.data(), buffer_.size()); } else { ps.metadataLength = 0; } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); pbw_.write(ff); add_uncompressed_block_headers(buffer_); // Write postscript metadata ps.footerLength = buffer_.size(); ps.compression = compression_kind_; ps.compressionBlockSize = compression_blocksize_; ps.version = {0, 12}; ps.magic = MAGIC; const auto ps_length = static_cast<uint8_t>(pbw_.write(ps)); buffer_.push_back(ps_length); out_sink_->host_write(buffer_.data(), buffer_.size()); out_sink_->flush(); } // Forward to implementation writer::writer(std::unique_ptr<data_sink> sink, orc_writer_options const &options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr)) { } // Forward to implementation writer::writer(std::unique_ptr<data_sink> sink, chunked_orc_writer_options const &options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr)) { } // Destructor within this translation unit writer::~writer() = default; // Forward to implementation void writer::write(table_view const &table) { _impl->write(table); } // Forward to implementation void writer::close() { _impl->close(); } } // namespace orc } // namespace detail } // namespace io } // namespace cudf
97c2d61929ae843b6c2d89a2d256c5e030db4591.hip
// !!! This is a file automatically generated by hipify!!! /* Author: Cao Thanh Tung Filename: pba3DHost.cu Copyright (c) 2010, School of Computing, National University of Singapore. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the National University of Singapore nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission from the National University of Singapore. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <hip/device_functions.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <fstream> using namespace std; #include "pba3D.h" #include "Geometry.h" #include "CudaWrapper.h" // Parameters for CUDA kernel executions #define BLOCKX 32 #define BLOCKY 4 #define BLOCKXY 16 #define PBA_INFINITY 0x3ff /****** Global Variables *******/ int **pbaTextures; int pbaMemSize; int pbaCurrentBuffer; int pbaTexSize; texture<int> pbaTexColor; texture<int> pbaTexLinks; texture<short> pbaTexPointer; /********* Kernels ********/ #include "pba3DKernel.h" /////////////////////////////////////////////////////////////////////////// // // Initialize necessary memory for 3D Voronoi Diagram computation // - textureSize: The size of the Discrete Voronoi Diagram (width = height) // /////////////////////////////////////////////////////////////////////////// void pba3DInitialization(int fboSize) { pbaTexSize = fboSize; pbaTextures = (int **) malloc(2 * sizeof(int *)); pbaMemSize = pbaTexSize * pbaTexSize * pbaTexSize * sizeof(int); // Allocate 2 textures //hipMalloc((void **) &pbaTextures[0], pbaMemSize); //hipMalloc((void **) &pbaTextures[1], pbaMemSize); } /////////////////////////////////////////////////////////////////////////// // // Deallocate all allocated memory // /////////////////////////////////////////////////////////////////////////// void pba3DDeinitialization() { free(pbaTextures); return; } // Copy input to GPU void pba3DInitializeInput(int *input, int *output) { //hipMemcpy(pbaTextures[0], input, pbaMemSize, hipMemcpyHostToDevice); pbaTextures[0] = input; pbaTextures[1] = output; // Set Current Source Buffer pbaCurrentBuffer = 0; } // In-place transpose a cubic texture. // Transposition are performed on each XY plane. // Point coordinates are also swapped. void pba3DTransposeXY(int *texture) { dim3 block(BLOCKXY, BLOCKXY); dim3 grid((pbaTexSize / BLOCKXY) * pbaTexSize, pbaTexSize / BLOCKXY); hipLaunchKernelGGL(( kernelTransposeXY), dim3(grid), dim3(block) , 0, 0, texture, pbaTexSize); CudaCheckError(); } // Phase 1 of PBA. m1 must divides texture size // Sweeping are done along the Z axiz. void pba3DColorZAxis(int m1) { dim3 block = dim3(BLOCKX, BLOCKY); dim3 grid = dim3((pbaTexSize / block.x) * m1, pbaTexSize / block.y); CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) ); hipLaunchKernelGGL(( kernelFloodZ), dim3(grid), dim3(block) , 0, 0, pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1); CudaCheckError(); pbaCurrentBuffer = 1 - pbaCurrentBuffer; if (m1 > 1) { // Passing information between bands CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) ); hipLaunchKernelGGL(( kernelPropagateInterband), dim3(grid), dim3(block) , 0, 0, pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1); CudaCheckError(); CudaSafeCall( hipBindTexture(0, pbaTexLinks, pbaTextures[1 - pbaCurrentBuffer]) ); hipLaunchKernelGGL(( kernelUpdateVertical), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1); CudaCheckError(); } } // Phase 2 of PBA. m2 must divides texture size. // This method work along the Y axis void pba3DComputeProximatePointsYAxis(int m2) { int iStack = 1 - pbaCurrentBuffer; int iForward = pbaCurrentBuffer; dim3 block = dim3(BLOCKX, BLOCKY); dim3 grid = dim3((pbaTexSize / block.x) * m2, pbaTexSize / block.y); // Compute proximate points locally in each band CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) ); hipLaunchKernelGGL(( kernelMaurerAxis), dim3(grid), dim3(block) , 0, 0, pbaTextures[iStack], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2); CudaCheckError(); // Construct forward pointers CudaSafeCall( hipBindTexture(0, pbaTexLinks, pbaTextures[iStack]) ); hipLaunchKernelGGL(( kernelCreateForwardPointers), dim3(grid), dim3(block) , 0, 0, (short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2); CudaCheckError(); CudaSafeCall( hipBindTexture(0, pbaTexPointer, pbaTextures[iForward], pbaTexSize * pbaTexSize * pbaTexSize * sizeof( short ) ) ); // Repeatly merging two bands into one for (int noBand = m2; noBand > 1; noBand /= 2) { grid = dim3((pbaTexSize / block.x) * (noBand / 2), pbaTexSize / block.y); hipLaunchKernelGGL(( kernelMergeBands), dim3(grid), dim3(block) , 0, 0, pbaTextures[iStack], (short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / noBand); CudaCheckError(); } CudaSafeCall( hipUnbindTexture(pbaTexLinks) ); CudaSafeCall( hipUnbindTexture(pbaTexColor) ); CudaSafeCall( hipUnbindTexture(pbaTexPointer) ); } // Phase 3 of PBA. m3 must divides texture size // This method color along the Y axis void pba3DColorYAxis(int m3) { dim3 block = dim3(BLOCKX, m3); dim3 grid = dim3(pbaTexSize / block.x, pbaTexSize); CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[1 - pbaCurrentBuffer] ) ); hipLaunchKernelGGL(( kernelColorAxis), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaCurrentBuffer], pbaTexSize); CudaCheckError(); CudaSafeCall( hipUnbindTexture(pbaTexColor) ); return; } void pba3DCompute(int m1, int m2, int m3) { /************* Compute Z axis *************/ // --> (X, Y, Z) pba3DColorZAxis(m1); /************* Compute Y axis *************/ // --> (X, Y, Z) pba3DComputeProximatePointsYAxis(m2); pba3DColorYAxis(m3); // --> (Y, X, Z) pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]); /************** Compute X axis *************/ // Compute X axis pba3DComputeProximatePointsYAxis(m2); pba3DColorYAxis(m3); // --> (X, Y, Z) pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]); } // Compute 3D Voronoi diagram // Input: a 3D texture. Each pixel is an integer encoding 3 coordinates. // For each site at (x, y, z), the pixel at coordinate (x, y, z) should contain // the encoded coordinate (x, y, z). Pixels that are not sites should contain // the integer MARKER. Use ENCODE (and DECODE) macro to encode (and decode). // See original paper for the effect of the three parameters: // phase1Band, phase2Band, phase3Band // Parameters must divide textureSize // Note: input texture will be released after this. void pba3DVoronoiDiagram(int *dInput, int **dOutput, int phase1Band, int phase2Band, int phase3Band) { // Initialization pba3DInitializeInput(dInput, *dOutput); // Compute the 3D Voronoi Diagram pba3DCompute(phase1Band, phase2Band, phase3Band); // Pass back the result *dOutput = pbaTextures[pbaCurrentBuffer]; return; } // A function to draw points onto GPU texture void setPointsInGrid( Point3DVec& pointDVec, int *dInputVoronoi ) { const int BlocksPerGrid = 64; const int ThreadsPerBlock = 256; CudaSafeCall( hipMemset( dInputVoronoi, MARKER, pbaMemSize ) ); hipLaunchKernelGGL(( kerSetPointsInGrid), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, thrust::raw_pointer_cast( &pointDVec[0] ), ( int ) pointDVec.size(), dInputVoronoi, pbaTexSize ); CudaCheckError(); return; } // A function to draw point's IDs onto GPU texture void setPointIndicesInGrid( Point3DVec& pointDVec, int* dMapToID ) { const int BlocksPerGrid = 64; const int ThreadsPerBlock = 256; hipLaunchKernelGGL(( kerSetPointIndicesInGrid), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, thrust::raw_pointer_cast( &pointDVec[0] ), ( int ) pointDVec.size(), dMapToID, pbaTexSize ); CudaCheckError(); return; } void setIndexInGrid( int gridWidth, int* dPointIndexGrid, int* dGrid ) { const int BlocksPerGrid = 64; const int ThreadsPerBlock = 256; hipLaunchKernelGGL(( kerSetIndexInGrid), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, gridWidth, dPointIndexGrid, dGrid ); CudaCheckError(); // Free grid CudaSafeCall( hipFree( dPointIndexGrid ) ); return; }
97c2d61929ae843b6c2d89a2d256c5e030db4591.cu
/* Author: Cao Thanh Tung Filename: pba3DHost.cu Copyright (c) 2010, School of Computing, National University of Singapore. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the National University of Singapore nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission from the National University of Singapore. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <device_functions.h> #include <cuda_runtime.h> #include <stdio.h> #include <fstream> using namespace std; #include "pba3D.h" #include "Geometry.h" #include "CudaWrapper.h" // Parameters for CUDA kernel executions #define BLOCKX 32 #define BLOCKY 4 #define BLOCKXY 16 #define PBA_INFINITY 0x3ff /****** Global Variables *******/ int **pbaTextures; int pbaMemSize; int pbaCurrentBuffer; int pbaTexSize; texture<int> pbaTexColor; texture<int> pbaTexLinks; texture<short> pbaTexPointer; /********* Kernels ********/ #include "pba3DKernel.h" /////////////////////////////////////////////////////////////////////////// // // Initialize necessary memory for 3D Voronoi Diagram computation // - textureSize: The size of the Discrete Voronoi Diagram (width = height) // /////////////////////////////////////////////////////////////////////////// void pba3DInitialization(int fboSize) { pbaTexSize = fboSize; pbaTextures = (int **) malloc(2 * sizeof(int *)); pbaMemSize = pbaTexSize * pbaTexSize * pbaTexSize * sizeof(int); // Allocate 2 textures //cudaMalloc((void **) &pbaTextures[0], pbaMemSize); //cudaMalloc((void **) &pbaTextures[1], pbaMemSize); } /////////////////////////////////////////////////////////////////////////// // // Deallocate all allocated memory // /////////////////////////////////////////////////////////////////////////// void pba3DDeinitialization() { free(pbaTextures); return; } // Copy input to GPU void pba3DInitializeInput(int *input, int *output) { //cudaMemcpy(pbaTextures[0], input, pbaMemSize, cudaMemcpyHostToDevice); pbaTextures[0] = input; pbaTextures[1] = output; // Set Current Source Buffer pbaCurrentBuffer = 0; } // In-place transpose a cubic texture. // Transposition are performed on each XY plane. // Point coordinates are also swapped. void pba3DTransposeXY(int *texture) { dim3 block(BLOCKXY, BLOCKXY); dim3 grid((pbaTexSize / BLOCKXY) * pbaTexSize, pbaTexSize / BLOCKXY); kernelTransposeXY<<< grid, block >>>(texture, pbaTexSize); CudaCheckError(); } // Phase 1 of PBA. m1 must divides texture size // Sweeping are done along the Z axiz. void pba3DColorZAxis(int m1) { dim3 block = dim3(BLOCKX, BLOCKY); dim3 grid = dim3((pbaTexSize / block.x) * m1, pbaTexSize / block.y); CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) ); kernelFloodZ<<< grid, block >>>(pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1); CudaCheckError(); pbaCurrentBuffer = 1 - pbaCurrentBuffer; if (m1 > 1) { // Passing information between bands CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) ); kernelPropagateInterband<<< grid, block >>>(pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1); CudaCheckError(); CudaSafeCall( cudaBindTexture(0, pbaTexLinks, pbaTextures[1 - pbaCurrentBuffer]) ); kernelUpdateVertical<<< grid, block >>>(pbaTextures[pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1); CudaCheckError(); } } // Phase 2 of PBA. m2 must divides texture size. // This method work along the Y axis void pba3DComputeProximatePointsYAxis(int m2) { int iStack = 1 - pbaCurrentBuffer; int iForward = pbaCurrentBuffer; dim3 block = dim3(BLOCKX, BLOCKY); dim3 grid = dim3((pbaTexSize / block.x) * m2, pbaTexSize / block.y); // Compute proximate points locally in each band CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) ); kernelMaurerAxis<<< grid, block >>>(pbaTextures[iStack], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2); CudaCheckError(); // Construct forward pointers CudaSafeCall( cudaBindTexture(0, pbaTexLinks, pbaTextures[iStack]) ); kernelCreateForwardPointers<<< grid, block >>>((short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2); CudaCheckError(); CudaSafeCall( cudaBindTexture(0, pbaTexPointer, pbaTextures[iForward], pbaTexSize * pbaTexSize * pbaTexSize * sizeof( short ) ) ); // Repeatly merging two bands into one for (int noBand = m2; noBand > 1; noBand /= 2) { grid = dim3((pbaTexSize / block.x) * (noBand / 2), pbaTexSize / block.y); kernelMergeBands<<< grid, block >>>(pbaTextures[iStack], (short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / noBand); CudaCheckError(); } CudaSafeCall( cudaUnbindTexture(pbaTexLinks) ); CudaSafeCall( cudaUnbindTexture(pbaTexColor) ); CudaSafeCall( cudaUnbindTexture(pbaTexPointer) ); } // Phase 3 of PBA. m3 must divides texture size // This method color along the Y axis void pba3DColorYAxis(int m3) { dim3 block = dim3(BLOCKX, m3); dim3 grid = dim3(pbaTexSize / block.x, pbaTexSize); CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[1 - pbaCurrentBuffer] ) ); kernelColorAxis<<< grid, block >>>(pbaTextures[pbaCurrentBuffer], pbaTexSize); CudaCheckError(); CudaSafeCall( cudaUnbindTexture(pbaTexColor) ); return; } void pba3DCompute(int m1, int m2, int m3) { /************* Compute Z axis *************/ // --> (X, Y, Z) pba3DColorZAxis(m1); /************* Compute Y axis *************/ // --> (X, Y, Z) pba3DComputeProximatePointsYAxis(m2); pba3DColorYAxis(m3); // --> (Y, X, Z) pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]); /************** Compute X axis *************/ // Compute X axis pba3DComputeProximatePointsYAxis(m2); pba3DColorYAxis(m3); // --> (X, Y, Z) pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]); } // Compute 3D Voronoi diagram // Input: a 3D texture. Each pixel is an integer encoding 3 coordinates. // For each site at (x, y, z), the pixel at coordinate (x, y, z) should contain // the encoded coordinate (x, y, z). Pixels that are not sites should contain // the integer MARKER. Use ENCODE (and DECODE) macro to encode (and decode). // See original paper for the effect of the three parameters: // phase1Band, phase2Band, phase3Band // Parameters must divide textureSize // Note: input texture will be released after this. void pba3DVoronoiDiagram(int *dInput, int **dOutput, int phase1Band, int phase2Band, int phase3Band) { // Initialization pba3DInitializeInput(dInput, *dOutput); // Compute the 3D Voronoi Diagram pba3DCompute(phase1Band, phase2Band, phase3Band); // Pass back the result *dOutput = pbaTextures[pbaCurrentBuffer]; return; } // A function to draw points onto GPU texture void setPointsInGrid( Point3DVec& pointDVec, int *dInputVoronoi ) { const int BlocksPerGrid = 64; const int ThreadsPerBlock = 256; CudaSafeCall( cudaMemset( dInputVoronoi, MARKER, pbaMemSize ) ); kerSetPointsInGrid<<< BlocksPerGrid, ThreadsPerBlock >>>( thrust::raw_pointer_cast( &pointDVec[0] ), ( int ) pointDVec.size(), dInputVoronoi, pbaTexSize ); CudaCheckError(); return; } // A function to draw point's IDs onto GPU texture void setPointIndicesInGrid( Point3DVec& pointDVec, int* dMapToID ) { const int BlocksPerGrid = 64; const int ThreadsPerBlock = 256; kerSetPointIndicesInGrid<<< BlocksPerGrid, ThreadsPerBlock >>>( thrust::raw_pointer_cast( &pointDVec[0] ), ( int ) pointDVec.size(), dMapToID, pbaTexSize ); CudaCheckError(); return; } void setIndexInGrid( int gridWidth, int* dPointIndexGrid, int* dGrid ) { const int BlocksPerGrid = 64; const int ThreadsPerBlock = 256; kerSetIndexInGrid<<< BlocksPerGrid, ThreadsPerBlock >>>( gridWidth, dPointIndexGrid, dGrid ); CudaCheckError(); // Free grid CudaSafeCall( cudaFree( dPointIndexGrid ) ); return; }
7559fdc669f321acb84ce4ee6329458f6cbd1295.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <math.h> // CUDA runtime #include <hip/hip_runtime.h> #include <device_launch_parameters.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float* C, float* A, float* B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Declaration of the shared memory arrays __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; float as; float bs; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix as = A[a + wA * ty + tx]; bs = B[b + wB * ty + tx]; As[ty][tx] = as; Bs[ty][tx] = bs; __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float* data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char** argv, int block_size, dim3& dimsA, dim3& dimsB) { // Allocate host memory for matrices A and B hipError_t error; unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A; error = hipHostMalloc(&h_A, mem_size_A); if (error != hipSuccess) { printf("hipHostMalloc h_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B; error = hipHostMalloc(&h_B, mem_size_B); if (error != hipSuccess) { printf("hipHostMalloc h_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float* d_A, * d_B, * d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float* h_C; error = hipHostMalloc(&h_C, mem_size_C); if (error != hipSuccess) { printf("hipHostMalloc h_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } error = hipMalloc((void**)&d_A, mem_size_A); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void**)&d_B, mem_size_B); if (error != hipSuccess) { printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void**)&d_C, mem_size_C); if (error != hipSuccess) { printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // 3x4 4x3 // 4x4 // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // dim3 grid(dimsB.x (640) / threads.x (32), dimsA.y (320) / threads.y (32)); // dim3 grid(20, 10) // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { matrixMulCUDA<16> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } else if (block_size == 32) { matrixMulCUDA<32> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<8> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } printf("done\n"); hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 1; for (int j = 0; j < nIter; j++) { if (block_size == 16) { matrixMulCUDA<16> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } else if (block_size == 32) { matrixMulCUDA<32> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<8> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } } // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy result from device to host error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-3) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-3\n", i, h_C[i], dimsA.x * valB); correct = false; } } printf("%s\n", correct ? "OK" : "FAIL"); // Clean up memory hipHostFree(h_A); hipHostFree(h_B); hipHostFree(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n"); hipDeviceReset(); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char** argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Use a larger block size for Fermi and above //int block_size = (deviceProp.major < 2) ? 16 : 32; int block_size = 16; // Default matrix size 320, 320 dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1); dim3 dimsB(5 * 2 * block_size, 5 * 2 * block_size, 1); if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
7559fdc669f321acb84ce4ee6329458f6cbd1295.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <math.h> // CUDA runtime #include <cuda_runtime.h> #include <device_launch_parameters.h> // Helper functions and utilities to work with CUDA //#include <helper_functions.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float* C, float* A, float* B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Declaration of the shared memory arrays __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; float as; float bs; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix as = A[a + wA * ty + tx]; bs = B[b + wB * ty + tx]; As[ty][tx] = as; Bs[ty][tx] = bs; __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float* data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char** argv, int block_size, dim3& dimsA, dim3& dimsB) { // Allocate host memory for matrices A and B cudaError_t error; unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A; error = cudaMallocHost(&h_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMallocHost h_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B; error = cudaMallocHost(&h_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMallocHost h_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float* d_A, * d_B, * d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float* h_C; error = cudaMallocHost(&h_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMallocHost h_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } error = cudaMalloc((void**)&d_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void**)&d_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void**)&d_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // 3x4 4x3 // 4x4 // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // dim3 grid(dimsB.x (640) / threads.x (32), dimsA.y (320) / threads.y (32)); // dim3 grid(20, 10) // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { matrixMulCUDA<16> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } else if (block_size == 32) { matrixMulCUDA<32> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<8> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } printf("done\n"); cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 1; for (int j = 0; j < nIter; j++) { if (block_size == 16) { matrixMulCUDA<16> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } else if (block_size == 32) { matrixMulCUDA<32> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<8> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x); } } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy result from device to host error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-3) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-3\n", i, h_C[i], dimsA.x * valB); correct = false; } } printf("%s\n", correct ? "OK" : "FAIL"); // Clean up memory cudaFreeHost(h_A); cudaFreeHost(h_B); cudaFreeHost(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n"); cudaDeviceReset(); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char** argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Use a larger block size for Fermi and above //int block_size = (deviceProp.major < 2) ? 16 : 32; int block_size = 16; // Default matrix size 320, 320 dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1); dim3 dimsB(5 * 2 * block_size, 5 * 2 * block_size, 1); if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
9ee559e6b89dd8ee0e7627a39664fe37a0f641b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file spline.cu * * \brief CUDA kernels to perform operations on splines. */ #include "gpu/acc_common.hpp" #include "gpu/acc_runtime.hpp" __global__ void spline_inner_product_gpu_kernel_v3(int num_points__, int const* idx_ri__, double const* x__, double const* dx__, double const* f__, double const* g__, double* result__) { int nb = num_blocks(num_points__, blockDim.x); int idx_f = idx_ri__[array2D_offset(0, blockIdx.x, 2)]; int idx_g = idx_ri__[array2D_offset(1, blockIdx.x, 2)]; ACC_DYNAMIC_SHARED( char, sdata_ptr) double* sdata = (double*)&sdata_ptr[0]; int a_offs_f = array3D_offset(0, 0, idx_f, num_points__, 4); int b_offs_f = array3D_offset(0, 1, idx_f, num_points__, 4); int c_offs_f = array3D_offset(0, 2, idx_f, num_points__, 4); int d_offs_f = array3D_offset(0, 3, idx_f, num_points__, 4); int a_offs_g = array3D_offset(0, 0, idx_g, num_points__, 4); int b_offs_g = array3D_offset(0, 1, idx_g, num_points__, 4); int c_offs_g = array3D_offset(0, 2, idx_g, num_points__, 4); int d_offs_g = array3D_offset(0, 3, idx_g, num_points__, 4); sdata[threadIdx.x] = 0; for (int ib = 0; ib < nb; ib++) { int i = ib * blockDim.x + threadIdx.x; if (i < num_points__ - 1) { double xi = x__[i]; double dxi = dx__[i]; double a1 = f__[a_offs_f + i]; double b1 = f__[b_offs_f + i]; double c1 = f__[c_offs_f + i]; double d1 = f__[d_offs_f + i]; double a2 = g__[a_offs_g + i]; double b2 = g__[b_offs_g + i]; double c2 = g__[c_offs_g + i]; double d2 = g__[d_offs_g + i]; double k0 = a1 * a2; double k1 = d1 * b2 + c1 * c2 + b1 * d2; double k2 = d1 * a2 + c1 * b2 + b1 * c2 + a1 * d2; double k3 = c1 * a2 + b1 * b2 + a1 * c2; double k4 = d1 * c2 + c1 * d2; double k5 = b1 * a2 + a1 * b2; double k6 = d1 * d2; // 25 flop in total //double v1 = dxi * k6 * (1.0 / 9.0); //double r = (k4 + 2.0 * k6 * xi) * 0.125; //double v2 = dxi * (r + v1); //double v3 = dxi * ((k1 + xi * (2.0 * k4 + k6 * xi)) * (1.0 / 7.0) + v2); //double v4 = dxi * ((k2 + xi * (2.0 * k1 + k4 * xi)) * (1.0 / 6.0) + v3); //double v5 = dxi * ((k3 + xi * (2.0 * k2 + k1 * xi)) * 0.2 + v4); //double v6 = dxi * ((k5 + xi * (2.0 * k3 + k2 * xi)) * 0.25 + v5); //double v7 = dxi * ((k0 + xi * (2.0 * k5 + k3 * xi)) / 3.0 + v6); //double v8 = dxi * ((xi * (2.0 * k0 + xi * k5)) * 0.5 + v7); double v = dxi * k6 * 0.11111111111111111111; double r1 = k4 * 0.125 + k6 * xi * 0.25; v = dxi * (r1 + v); double r2 = (k1 + xi * (2.0 * k4 + k6 * xi)) * 0.14285714285714285714; v = dxi * (r2 + v); double r3 = (k2 + xi * (2.0 * k1 + k4 * xi)) * 0.16666666666666666667; v = dxi * (r3 + v); double r4 = (k3 + xi * (2.0 * k2 + k1 * xi)) * 0.2; v = dxi * (r4 + v); double r5 = (k5 + xi * (2.0 * k3 + k2 * xi)) * 0.25; v = dxi * (r5 + v); double r6 = (k0 + xi * (2.0 * k5 + k3 * xi)) * 0.33333333333333333333; v = dxi * (r6 + v); double r7 = (xi * (2.0 * k0 + xi * k5)) * 0.5; v = dxi * (r7 + v); sdata[threadIdx.x] += dxi * (k0 * xi * xi + v); } } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (threadIdx.x % (2 * s) == 0) sdata[threadIdx.x] += sdata[threadIdx.x + s]; __syncthreads(); } result__[blockIdx.x] = sdata[0]; } extern "C" void spline_inner_product_gpu_v3(int const* idx_ri__, int num_ri__, int num_points__, double const* x__, double const* dx__, double const* f__, double const* g__, double* result__) { dim3 grid_t(64); dim3 grid_b(num_ri__); accLaunchKernel((spline_inner_product_gpu_kernel_v3), dim3(grid_b), dim3(grid_t), grid_t.x * sizeof(double), 0, num_points__, idx_ri__, x__, dx__, f__, g__, result__ ); }
9ee559e6b89dd8ee0e7627a39664fe37a0f641b4.cu
// Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file spline.cu * * \brief CUDA kernels to perform operations on splines. */ #include "gpu/acc_common.hpp" #include "gpu/acc_runtime.hpp" __global__ void spline_inner_product_gpu_kernel_v3(int num_points__, int const* idx_ri__, double const* x__, double const* dx__, double const* f__, double const* g__, double* result__) { int nb = num_blocks(num_points__, blockDim.x); int idx_f = idx_ri__[array2D_offset(0, blockIdx.x, 2)]; int idx_g = idx_ri__[array2D_offset(1, blockIdx.x, 2)]; ACC_DYNAMIC_SHARED( char, sdata_ptr) double* sdata = (double*)&sdata_ptr[0]; int a_offs_f = array3D_offset(0, 0, idx_f, num_points__, 4); int b_offs_f = array3D_offset(0, 1, idx_f, num_points__, 4); int c_offs_f = array3D_offset(0, 2, idx_f, num_points__, 4); int d_offs_f = array3D_offset(0, 3, idx_f, num_points__, 4); int a_offs_g = array3D_offset(0, 0, idx_g, num_points__, 4); int b_offs_g = array3D_offset(0, 1, idx_g, num_points__, 4); int c_offs_g = array3D_offset(0, 2, idx_g, num_points__, 4); int d_offs_g = array3D_offset(0, 3, idx_g, num_points__, 4); sdata[threadIdx.x] = 0; for (int ib = 0; ib < nb; ib++) { int i = ib * blockDim.x + threadIdx.x; if (i < num_points__ - 1) { double xi = x__[i]; double dxi = dx__[i]; double a1 = f__[a_offs_f + i]; double b1 = f__[b_offs_f + i]; double c1 = f__[c_offs_f + i]; double d1 = f__[d_offs_f + i]; double a2 = g__[a_offs_g + i]; double b2 = g__[b_offs_g + i]; double c2 = g__[c_offs_g + i]; double d2 = g__[d_offs_g + i]; double k0 = a1 * a2; double k1 = d1 * b2 + c1 * c2 + b1 * d2; double k2 = d1 * a2 + c1 * b2 + b1 * c2 + a1 * d2; double k3 = c1 * a2 + b1 * b2 + a1 * c2; double k4 = d1 * c2 + c1 * d2; double k5 = b1 * a2 + a1 * b2; double k6 = d1 * d2; // 25 flop in total //double v1 = dxi * k6 * (1.0 / 9.0); //double r = (k4 + 2.0 * k6 * xi) * 0.125; //double v2 = dxi * (r + v1); //double v3 = dxi * ((k1 + xi * (2.0 * k4 + k6 * xi)) * (1.0 / 7.0) + v2); //double v4 = dxi * ((k2 + xi * (2.0 * k1 + k4 * xi)) * (1.0 / 6.0) + v3); //double v5 = dxi * ((k3 + xi * (2.0 * k2 + k1 * xi)) * 0.2 + v4); //double v6 = dxi * ((k5 + xi * (2.0 * k3 + k2 * xi)) * 0.25 + v5); //double v7 = dxi * ((k0 + xi * (2.0 * k5 + k3 * xi)) / 3.0 + v6); //double v8 = dxi * ((xi * (2.0 * k0 + xi * k5)) * 0.5 + v7); double v = dxi * k6 * 0.11111111111111111111; double r1 = k4 * 0.125 + k6 * xi * 0.25; v = dxi * (r1 + v); double r2 = (k1 + xi * (2.0 * k4 + k6 * xi)) * 0.14285714285714285714; v = dxi * (r2 + v); double r3 = (k2 + xi * (2.0 * k1 + k4 * xi)) * 0.16666666666666666667; v = dxi * (r3 + v); double r4 = (k3 + xi * (2.0 * k2 + k1 * xi)) * 0.2; v = dxi * (r4 + v); double r5 = (k5 + xi * (2.0 * k3 + k2 * xi)) * 0.25; v = dxi * (r5 + v); double r6 = (k0 + xi * (2.0 * k5 + k3 * xi)) * 0.33333333333333333333; v = dxi * (r6 + v); double r7 = (xi * (2.0 * k0 + xi * k5)) * 0.5; v = dxi * (r7 + v); sdata[threadIdx.x] += dxi * (k0 * xi * xi + v); } } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (threadIdx.x % (2 * s) == 0) sdata[threadIdx.x] += sdata[threadIdx.x + s]; __syncthreads(); } result__[blockIdx.x] = sdata[0]; } extern "C" void spline_inner_product_gpu_v3(int const* idx_ri__, int num_ri__, int num_points__, double const* x__, double const* dx__, double const* f__, double const* g__, double* result__) { dim3 grid_t(64); dim3 grid_b(num_ri__); accLaunchKernel((spline_inner_product_gpu_kernel_v3), dim3(grid_b), dim3(grid_t), grid_t.x * sizeof(double), 0, num_points__, idx_ri__, x__, dx__, f__, g__, result__ ); }
79f73fafb5065e92d847c40ebddd5b170248a976.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; __global__ void setValue(float *data, int idx, float value) { if(threadIdx.x == 0) { data[idx] = value; } }
79f73fafb5065e92d847c40ebddd5b170248a976.cu
#include "includes.h" using namespace std; __global__ void setValue(float *data, int idx, float value) { if(threadIdx.x == 0) { data[idx] = value; } }
06d1417a102049c481fd616187a91022454bd9c8.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include <thrust/sort.h> #include "BatchManager.cuh" #include "Compact.cuh" #include "Copy.cuh" #include "CudaContext.cuh" #include "CudaContextManager.cuh" #include "DevGraph.cuh" #include "DevPlan.cuh" #include "DeviceArray.cuh" #include "GPUFilter.cuh" #include "GPUUtil.cuh" #include "Intersect.cuh" #include "LoadBalance.cuh" #include "SegmentReduce.cuh" #include "TestGPUCommon.cuh" #include "ThrustContext.cuh" #include "Transform.cuh" using namespace mgpu; // global helper function template <typename T> T GetValue(T *val, hipMemcpyKind kind = hipMemcpyDeviceToHost) { assert(kind == hipMemcpyDeviceToHost); T ret; CUDA_ERROR(hipMemcpy(&ret, val, sizeof(T), kind)); return ret; } template <typename T> void SetValue(T *from, T *to, hipMemcpyKind kind) { CUDA_ERROR(hipMemcpy(from, to, sizeof(T), kind)); } template <typename T> T *AllocDevPtr() { T *ret = NULL; CUDA_ERROR(hipMalloc(&ret, sizeof(T))); return ret; } template <typename T> T *AllocDevPtr(T *h_ptr) { T *d_ptr = AllocDevPtr<T>(); SetValue<T>(h_ptr, d_ptr, hipMemcpyHostToDevice); return d_ptr; } template <typename T> void FreePointer(T *val) { CUDA_ERROR(hipFree(val)); } // =============================================== // TEST(BasicUtils, NewDeviceArray) { const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); DeviceArray<uintV> *arr = NULL; ReAllocate(arr, 10, context); ASSERT_EQ(arr->GetSize(), 10); delete arr; arr = NULL; } // ================================================= TEST(ParallelOperations, Compact) { const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const int N = 100; std::vector<int> h_input(N); bool *h_bitmaps = new bool[N]; for (int i = 0; i < N; ++i) { h_input[i] = i; h_bitmaps[i] = i % 2; } std::vector<int> h_output; for (int i = 0; i < N; ++i) { if (i % 2) { h_output.push_back(i); } } DeviceArray<int> *d_input = new DeviceArray<int>(N, context); HToD(d_input->GetArray(), h_input.data(), N); DeviceArray<bool> *d_bitmaps = new DeviceArray<bool>(N, context); HToD(d_bitmaps->GetArray(), h_bitmaps, N); DeviceArray<int> *d_output = NULL; int output_count = 0; GpuUtils::Compact::Compact(d_input, N, d_bitmaps->GetArray(), d_output, output_count, context); std::vector<int> actual_output(output_count); DToH(actual_output.data(), d_output->GetArray(), output_count); ASSERT_EQ(output_count, h_output.size()); for (int i = 0; i < output_count; ++i) { ASSERT_EQ(actual_output[i], h_output[i]); } delete d_input; d_input = NULL; delete d_bitmaps; d_bitmaps = NULL; delete d_output; d_output = NULL; delete[] h_bitmaps; h_bitmaps = NULL; } TEST(ParallelOperations, Gather) { const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const int kInputCount = 20; const int kIndicesCount = 10; std::vector<int> h_input(kInputCount); std::vector<int> h_indices(kIndicesCount); for (int i = 0; i < kInputCount; ++i) { h_input.push_back(i); if (i < kIndicesCount) { h_indices.push_back(i); } } std::vector<int> exp_output(h_indices); DeviceArray<int> *d_indices = new DeviceArray<int>(kIndicesCount, context); DeviceArray<int> *d_input = new DeviceArray<int>(kInputCount, context); DeviceArray<int> *d_output = new DeviceArray<int>(kIndicesCount, context); HToD(d_indices->GetArray(), h_indices.data(), kIndicesCount); HToD(d_input->GetArray(), h_input.data(), kInputCount); GpuUtils::Copy::Gather(d_indices->GetArray(), kIndicesCount, d_input->GetArray(), d_output->GetArray(), context); std::vector<int> actual_output(kIndicesCount); DToH(actual_output.data(), d_output->GetArray(), kIndicesCount); for (int i = 0; i < kIndicesCount; ++i) { ASSERT_EQ(actual_output[i], exp_output[i]); } } static void LoadBalanceTransformTestWrapper() { const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); TrackPartitionedGraph *cpu_graph = Generate6Clique(); DevGraph *dev_graph = new DevGraph(cpu_graph, context); size_t vertex_count = cpu_graph->GetVertexCount(); size_t edge_count = cpu_graph->GetEdgeCount(); DeviceArray<uintV> *output = new DeviceArray<uintV>(edge_count, context); uintV *output_array = output->GetArray(); uintE *row_ptrs_array = dev_graph->GetRowPtrs()->GetArray(); uintV *cols_array = dev_graph->GetCols()->GetArray(); auto f = [=] DEVICE(int index, int seg, int rank) { output_array[index] = cols_array[row_ptrs_array[seg] + rank]; }; GpuUtils::LoadBalance::LBSTransform(f, edge_count, row_ptrs_array, vertex_count, context); std::vector<uintV> actual_output(edge_count); DToH(actual_output.data(), output_array, edge_count); for (size_t i = 0; i < edge_count; ++i) { ASSERT_EQ(actual_output[i], cpu_graph->GetCols()[i]); } delete output; output = NULL; delete cpu_graph; cpu_graph = NULL; delete dev_graph; dev_graph = NULL; } TEST(ParallelOperations, LoadBalanceTransform) { LoadBalanceTransformTestWrapper(); } TEST(ParallelOperations, LoadBalanceSearch) { const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const int segments_num = 5; int total = 0; std::vector<int> h_segments(segments_num + 1); for (int i = 0; i < segments_num; ++i) { h_segments[i] = total; total += i + 1; } h_segments[segments_num] = total; std::vector<int> exp_output(total); for (int seg_id = 0; seg_id < segments_num; ++seg_id) { for (size_t j = h_segments[seg_id]; j < h_segments[seg_id + 1]; ++j) { exp_output[j] = seg_id; } } DeviceArray<int> *d_segments = new DeviceArray<int>(segments_num + 1, context); DeviceArray<int> *output = new DeviceArray<int>(total, context); HToD(d_segments->GetArray(), h_segments.data(), segments_num + 1); GpuUtils::LoadBalance::LoadBalanceSearch( total, d_segments->GetArray(), segments_num, output->GetArray(), context); Verify(output, exp_output.data(), total); delete d_segments; d_segments = NULL; delete output; output = NULL; } TEST(ParallelOperations, Scan) { const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const int N = 10; int sum = 0; std::vector<int> h_input(N); std::vector<int> h_output(N + 1); for (int i = 0; i < N; ++i) { h_input[i] = i; h_output[i] = sum; sum += i; } h_output[N] = sum; DeviceArray<int> *d_input = new DeviceArray<int>(N, context); HToD(d_input->GetArray(), h_input.data(), N); DeviceArray<int> *d_output = new DeviceArray<int>(N + 1, context); GpuUtils::Scan::ExclusiveSum(d_input->GetArray(), N, d_output->GetArray(), d_output->GetArray() + N, context); std::vector<int> actual_output(N + 1); DToH(actual_output.data(), d_output->GetArray(), N + 1); for (int i = 0; i <= N; ++i) { ASSERT_EQ(actual_output[i], h_output[i]); } delete d_input; d_input = NULL; delete d_output; d_output = NULL; } static void SegmentReduceWrapper() { const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const int kWidth = 3; const int kSegmentsNum = 100; const int N = kWidth * kSegmentsNum; std::vector<int> h_segments(kSegmentsNum + 1); int prefix_sum = 0; for (int i = 0; i <= kSegmentsNum; ++i) { h_segments[i] = prefix_sum; prefix_sum += kWidth; } std::vector<int> h_values(N); for (int i = 0; i < kSegmentsNum; ++i) { for (int j = h_segments[i]; j < h_segments[i + 1]; ++j) { h_values[j] = i; } } DeviceArray<int> *d_values = new DeviceArray<int>(N, context); HToD(d_values->GetArray(), h_values.data(), N); int *d_values_array = d_values->GetArray(); auto f = [=] DEVICE(int index) { return d_values_array[index]; }; std::vector<int> exp_output(kSegmentsNum); for (int i = 0; i < kSegmentsNum; ++i) { exp_output[i] = kWidth * i; } DeviceArray<int> *d_segments = new DeviceArray<int>(kSegmentsNum + 1, context); HToD(d_segments->GetArray(), h_segments.data(), kSegmentsNum + 1); DeviceArray<int> *d_output = new DeviceArray<int>(kSegmentsNum, context); GpuUtils::SegReduce::TransformSegReduce(f, N, d_segments->GetArray(), kSegmentsNum, d_output->GetArray(), 0, context); std::vector<int> actual_output(kSegmentsNum); DToH(actual_output.data(), d_output->GetArray(), kSegmentsNum); for (int i = 0; i < kSegmentsNum; ++i) { ASSERT_EQ(actual_output[i], exp_output[i]); } delete d_values; d_values = NULL; delete d_output; d_output = NULL; delete d_segments; d_segments = NULL; } TEST(ParallelOperations, SegmentReduce) { SegmentReduceWrapper(); } TEST(ParallelOperations, Transform) { const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const size_t N = 10; std::vector<int> h_input(N, 0); DeviceArray<int> *d_input = new DeviceArray<int>(N, context); HToD(d_input->GetArray(), h_input.data(), N); GpuUtils::Transform::Apply<ADD>(d_input->GetArray(), N, 10, context); std::vector<int> actual_output(N); DToH(actual_output.data(), d_input->GetArray(), N); for (size_t i = 0; i < N; ++i) { ASSERT_EQ(actual_output[i], 10); } delete d_input; d_input = NULL; } static void TransformLambdaWrapper() { const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const size_t N = 10; std::vector<int> h_input(N); for (size_t i = 0; i < N; ++i) { h_input[i] = i; } DeviceArray<int> *d_input = new DeviceArray<int>(N, context); DeviceArray<int> *d_output = new DeviceArray<int>(N, context); HToD(d_input->GetArray(), h_input.data(), N); int *d_input_array = d_input->GetArray(); int *d_output_array = d_output->GetArray(); GpuUtils::Transform::Transform( [=] DEVICE(int index) { d_output_array[index] = d_input_array[index] + index; }, N, context); std::vector<int> actual_output(N); DToH(actual_output.data(), d_output->GetArray(), N); for (size_t i = 0; i < N; ++i) { ASSERT_EQ(actual_output[i], h_input[i] + i); } delete d_input; d_input = NULL; delete d_output; d_output = NULL; } TEST(ParallelOperations, TransformLambda) { TransformLambdaWrapper(); } // ==================================================== TEST(Tools, BatchManagerTestOneLevel) { size_t dev_id = 0; CUDA_ERROR(hipSetDevice(dev_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext(dev_id); size_t batch_size = 4; BatchManager *batch_manager = new BatchManager(context, batch_size); size_t parent_count = 3; batch_manager->OrganizeBatch(parent_count, sizeof(int)); for (int i = 0; i < parent_count; ++i) { BatchSpec batch_spec = batch_manager->GetBatch(i); ASSERT_EQ(i, batch_spec.GetBatchLeftEnd()); ASSERT_EQ(i, batch_spec.GetBatchRightEnd()); } delete batch_manager; batch_manager = NULL; } // ================================================= TEST(BasicUtils, CacheCudaContextTest) { const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); DeviceMemoryInfo *dev_mem = new DeviceMemoryInfo(d_partition_id, 1ULL * 1024 * 1024 * 1024 * 8); CacheCudaContext *context = new CacheCudaContext(dev_mem, 0); size_t before_size = context->GetDeviceMemoryInfo()->GetAvailableMemorySize(); size_t cache_size = 10000000; context->MallocCache(cache_size); context->SetMallocFromCache(true); const int N = 10000; { mem_t<int> arr(N, *context); CUDA_ERROR(hipMemset(arr.data(), 0, sizeof(int) * N)); } { int *d_arr = (int *)context->Malloc(N * sizeof(int)); CUDA_ERROR(hipMemset(d_arr, 0, sizeof(int) * N)); } context->SetMallocFromCache(false); context->FreeCache(); size_t after_size = context->GetDeviceMemoryInfo()->GetAvailableMemorySize(); ASSERT_EQ(before_size, after_size); delete context; context = NULL; delete dev_mem; dev_mem = NULL; } static void CnmemCudaContextTestWrapper(CudaContextType cuda_context_type) { CudaContextManager *manager = new CudaContextManager(1, cuda_context_type); const size_t d_partition_id = 0; CUDA_ERROR(hipSetDevice(d_partition_id)); CnmemCudaContext *context = static_cast<CnmemCudaContext *>(manager->GetCudaContext(d_partition_id)); size_t before_size = context->GetDeviceMemoryInfo()->GetAvailableMemorySize(); size_t large_base = 1024ULL * 1024 * 1024; size_t small_base = 1024; ///// test small allocation std::vector<void *> mem_ptrs; std::vector<size_t> mem_sizes; for (size_t i = 0; i < 10; ++i) { void *ptr = context->Malloc((i + 1) * small_base); mem_sizes.push_back((i + 1) * small_base); mem_ptrs.push_back(ptr); } for (size_t i = 0; i < 10; ++i) { if (i % 2 == 1) { context->Free(mem_ptrs[i], mem_sizes[i]); } } for (size_t i = 0; i < 10; ++i) { if (i % 2 == 0) { context->Free(mem_ptrs[i], mem_sizes[i]); } } // test large allocation mem_ptrs.resize(0); mem_sizes.resize(0); for (size_t i = 0; i < 10; ++i) { void *ptr = context->Malloc((i + 1) * large_base); mem_sizes.push_back((i + 1) * large_base); mem_ptrs.push_back(ptr); } for (size_t i = 0; i < 10; ++i) { if (i % 2 == 1) { context->Free(mem_ptrs[i], mem_sizes[i]); } } for (size_t i = 0; i < 10; ++i) { if (i % 2 == 0) { context->Free(mem_ptrs[i], mem_sizes[i]); } } size_t after_size = context->GetDeviceMemoryInfo()->GetAvailableMemorySize(); ASSERT_EQ(before_size, after_size); delete manager; manager = NULL; } TEST(BasicUtils, CnmemCudaContextTest) { CnmemCudaContextTestWrapper(CudaContextType::CNMEM); } TEST(BasicUtils, CnmemManagedCudaContextTest) { CnmemCudaContextTestWrapper(CudaContextType::CNMEM_MANAGED); } CudaContextManager *CudaContextManager::gCudaContextManager = NULL; int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); CudaContextManager::CreateCudaContextManager(2, BASIC); return RUN_ALL_TESTS(); }
06d1417a102049c481fd616187a91022454bd9c8.cu
#include <gtest/gtest.h> #include <thrust/sort.h> #include "BatchManager.cuh" #include "Compact.cuh" #include "Copy.cuh" #include "CudaContext.cuh" #include "CudaContextManager.cuh" #include "DevGraph.cuh" #include "DevPlan.cuh" #include "DeviceArray.cuh" #include "GPUFilter.cuh" #include "GPUUtil.cuh" #include "Intersect.cuh" #include "LoadBalance.cuh" #include "SegmentReduce.cuh" #include "TestGPUCommon.cuh" #include "ThrustContext.cuh" #include "Transform.cuh" using namespace mgpu; // global helper function template <typename T> T GetValue(T *val, cudaMemcpyKind kind = cudaMemcpyDeviceToHost) { assert(kind == cudaMemcpyDeviceToHost); T ret; CUDA_ERROR(cudaMemcpy(&ret, val, sizeof(T), kind)); return ret; } template <typename T> void SetValue(T *from, T *to, cudaMemcpyKind kind) { CUDA_ERROR(cudaMemcpy(from, to, sizeof(T), kind)); } template <typename T> T *AllocDevPtr() { T *ret = NULL; CUDA_ERROR(cudaMalloc(&ret, sizeof(T))); return ret; } template <typename T> T *AllocDevPtr(T *h_ptr) { T *d_ptr = AllocDevPtr<T>(); SetValue<T>(h_ptr, d_ptr, cudaMemcpyHostToDevice); return d_ptr; } template <typename T> void FreePointer(T *val) { CUDA_ERROR(cudaFree(val)); } // =============================================== // TEST(BasicUtils, NewDeviceArray) { const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); DeviceArray<uintV> *arr = NULL; ReAllocate(arr, 10, context); ASSERT_EQ(arr->GetSize(), 10); delete arr; arr = NULL; } // ================================================= TEST(ParallelOperations, Compact) { const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const int N = 100; std::vector<int> h_input(N); bool *h_bitmaps = new bool[N]; for (int i = 0; i < N; ++i) { h_input[i] = i; h_bitmaps[i] = i % 2; } std::vector<int> h_output; for (int i = 0; i < N; ++i) { if (i % 2) { h_output.push_back(i); } } DeviceArray<int> *d_input = new DeviceArray<int>(N, context); HToD(d_input->GetArray(), h_input.data(), N); DeviceArray<bool> *d_bitmaps = new DeviceArray<bool>(N, context); HToD(d_bitmaps->GetArray(), h_bitmaps, N); DeviceArray<int> *d_output = NULL; int output_count = 0; GpuUtils::Compact::Compact(d_input, N, d_bitmaps->GetArray(), d_output, output_count, context); std::vector<int> actual_output(output_count); DToH(actual_output.data(), d_output->GetArray(), output_count); ASSERT_EQ(output_count, h_output.size()); for (int i = 0; i < output_count; ++i) { ASSERT_EQ(actual_output[i], h_output[i]); } delete d_input; d_input = NULL; delete d_bitmaps; d_bitmaps = NULL; delete d_output; d_output = NULL; delete[] h_bitmaps; h_bitmaps = NULL; } TEST(ParallelOperations, Gather) { const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const int kInputCount = 20; const int kIndicesCount = 10; std::vector<int> h_input(kInputCount); std::vector<int> h_indices(kIndicesCount); for (int i = 0; i < kInputCount; ++i) { h_input.push_back(i); if (i < kIndicesCount) { h_indices.push_back(i); } } std::vector<int> exp_output(h_indices); DeviceArray<int> *d_indices = new DeviceArray<int>(kIndicesCount, context); DeviceArray<int> *d_input = new DeviceArray<int>(kInputCount, context); DeviceArray<int> *d_output = new DeviceArray<int>(kIndicesCount, context); HToD(d_indices->GetArray(), h_indices.data(), kIndicesCount); HToD(d_input->GetArray(), h_input.data(), kInputCount); GpuUtils::Copy::Gather(d_indices->GetArray(), kIndicesCount, d_input->GetArray(), d_output->GetArray(), context); std::vector<int> actual_output(kIndicesCount); DToH(actual_output.data(), d_output->GetArray(), kIndicesCount); for (int i = 0; i < kIndicesCount; ++i) { ASSERT_EQ(actual_output[i], exp_output[i]); } } static void LoadBalanceTransformTestWrapper() { const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); TrackPartitionedGraph *cpu_graph = Generate6Clique(); DevGraph *dev_graph = new DevGraph(cpu_graph, context); size_t vertex_count = cpu_graph->GetVertexCount(); size_t edge_count = cpu_graph->GetEdgeCount(); DeviceArray<uintV> *output = new DeviceArray<uintV>(edge_count, context); uintV *output_array = output->GetArray(); uintE *row_ptrs_array = dev_graph->GetRowPtrs()->GetArray(); uintV *cols_array = dev_graph->GetCols()->GetArray(); auto f = [=] DEVICE(int index, int seg, int rank) { output_array[index] = cols_array[row_ptrs_array[seg] + rank]; }; GpuUtils::LoadBalance::LBSTransform(f, edge_count, row_ptrs_array, vertex_count, context); std::vector<uintV> actual_output(edge_count); DToH(actual_output.data(), output_array, edge_count); for (size_t i = 0; i < edge_count; ++i) { ASSERT_EQ(actual_output[i], cpu_graph->GetCols()[i]); } delete output; output = NULL; delete cpu_graph; cpu_graph = NULL; delete dev_graph; dev_graph = NULL; } TEST(ParallelOperations, LoadBalanceTransform) { LoadBalanceTransformTestWrapper(); } TEST(ParallelOperations, LoadBalanceSearch) { const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const int segments_num = 5; int total = 0; std::vector<int> h_segments(segments_num + 1); for (int i = 0; i < segments_num; ++i) { h_segments[i] = total; total += i + 1; } h_segments[segments_num] = total; std::vector<int> exp_output(total); for (int seg_id = 0; seg_id < segments_num; ++seg_id) { for (size_t j = h_segments[seg_id]; j < h_segments[seg_id + 1]; ++j) { exp_output[j] = seg_id; } } DeviceArray<int> *d_segments = new DeviceArray<int>(segments_num + 1, context); DeviceArray<int> *output = new DeviceArray<int>(total, context); HToD(d_segments->GetArray(), h_segments.data(), segments_num + 1); GpuUtils::LoadBalance::LoadBalanceSearch( total, d_segments->GetArray(), segments_num, output->GetArray(), context); Verify(output, exp_output.data(), total); delete d_segments; d_segments = NULL; delete output; output = NULL; } TEST(ParallelOperations, Scan) { const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const int N = 10; int sum = 0; std::vector<int> h_input(N); std::vector<int> h_output(N + 1); for (int i = 0; i < N; ++i) { h_input[i] = i; h_output[i] = sum; sum += i; } h_output[N] = sum; DeviceArray<int> *d_input = new DeviceArray<int>(N, context); HToD(d_input->GetArray(), h_input.data(), N); DeviceArray<int> *d_output = new DeviceArray<int>(N + 1, context); GpuUtils::Scan::ExclusiveSum(d_input->GetArray(), N, d_output->GetArray(), d_output->GetArray() + N, context); std::vector<int> actual_output(N + 1); DToH(actual_output.data(), d_output->GetArray(), N + 1); for (int i = 0; i <= N; ++i) { ASSERT_EQ(actual_output[i], h_output[i]); } delete d_input; d_input = NULL; delete d_output; d_output = NULL; } static void SegmentReduceWrapper() { const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const int kWidth = 3; const int kSegmentsNum = 100; const int N = kWidth * kSegmentsNum; std::vector<int> h_segments(kSegmentsNum + 1); int prefix_sum = 0; for (int i = 0; i <= kSegmentsNum; ++i) { h_segments[i] = prefix_sum; prefix_sum += kWidth; } std::vector<int> h_values(N); for (int i = 0; i < kSegmentsNum; ++i) { for (int j = h_segments[i]; j < h_segments[i + 1]; ++j) { h_values[j] = i; } } DeviceArray<int> *d_values = new DeviceArray<int>(N, context); HToD(d_values->GetArray(), h_values.data(), N); int *d_values_array = d_values->GetArray(); auto f = [=] DEVICE(int index) { return d_values_array[index]; }; std::vector<int> exp_output(kSegmentsNum); for (int i = 0; i < kSegmentsNum; ++i) { exp_output[i] = kWidth * i; } DeviceArray<int> *d_segments = new DeviceArray<int>(kSegmentsNum + 1, context); HToD(d_segments->GetArray(), h_segments.data(), kSegmentsNum + 1); DeviceArray<int> *d_output = new DeviceArray<int>(kSegmentsNum, context); GpuUtils::SegReduce::TransformSegReduce(f, N, d_segments->GetArray(), kSegmentsNum, d_output->GetArray(), 0, context); std::vector<int> actual_output(kSegmentsNum); DToH(actual_output.data(), d_output->GetArray(), kSegmentsNum); for (int i = 0; i < kSegmentsNum; ++i) { ASSERT_EQ(actual_output[i], exp_output[i]); } delete d_values; d_values = NULL; delete d_output; d_output = NULL; delete d_segments; d_segments = NULL; } TEST(ParallelOperations, SegmentReduce) { SegmentReduceWrapper(); } TEST(ParallelOperations, Transform) { const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const size_t N = 10; std::vector<int> h_input(N, 0); DeviceArray<int> *d_input = new DeviceArray<int>(N, context); HToD(d_input->GetArray(), h_input.data(), N); GpuUtils::Transform::Apply<ADD>(d_input->GetArray(), N, 10, context); std::vector<int> actual_output(N); DToH(actual_output.data(), d_input->GetArray(), N); for (size_t i = 0; i < N; ++i) { ASSERT_EQ(actual_output[i], 10); } delete d_input; d_input = NULL; } static void TransformLambdaWrapper() { const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext( d_partition_id); const size_t N = 10; std::vector<int> h_input(N); for (size_t i = 0; i < N; ++i) { h_input[i] = i; } DeviceArray<int> *d_input = new DeviceArray<int>(N, context); DeviceArray<int> *d_output = new DeviceArray<int>(N, context); HToD(d_input->GetArray(), h_input.data(), N); int *d_input_array = d_input->GetArray(); int *d_output_array = d_output->GetArray(); GpuUtils::Transform::Transform( [=] DEVICE(int index) { d_output_array[index] = d_input_array[index] + index; }, N, context); std::vector<int> actual_output(N); DToH(actual_output.data(), d_output->GetArray(), N); for (size_t i = 0; i < N; ++i) { ASSERT_EQ(actual_output[i], h_input[i] + i); } delete d_input; d_input = NULL; delete d_output; d_output = NULL; } TEST(ParallelOperations, TransformLambda) { TransformLambdaWrapper(); } // ==================================================== TEST(Tools, BatchManagerTestOneLevel) { size_t dev_id = 0; CUDA_ERROR(cudaSetDevice(dev_id)); CudaContext *context = CudaContextManager::GetCudaContextManager()->GetCudaContext(dev_id); size_t batch_size = 4; BatchManager *batch_manager = new BatchManager(context, batch_size); size_t parent_count = 3; batch_manager->OrganizeBatch(parent_count, sizeof(int)); for (int i = 0; i < parent_count; ++i) { BatchSpec batch_spec = batch_manager->GetBatch(i); ASSERT_EQ(i, batch_spec.GetBatchLeftEnd()); ASSERT_EQ(i, batch_spec.GetBatchRightEnd()); } delete batch_manager; batch_manager = NULL; } // ================================================= TEST(BasicUtils, CacheCudaContextTest) { const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); DeviceMemoryInfo *dev_mem = new DeviceMemoryInfo(d_partition_id, 1ULL * 1024 * 1024 * 1024 * 8); CacheCudaContext *context = new CacheCudaContext(dev_mem, 0); size_t before_size = context->GetDeviceMemoryInfo()->GetAvailableMemorySize(); size_t cache_size = 10000000; context->MallocCache(cache_size); context->SetMallocFromCache(true); const int N = 10000; { mem_t<int> arr(N, *context); CUDA_ERROR(cudaMemset(arr.data(), 0, sizeof(int) * N)); } { int *d_arr = (int *)context->Malloc(N * sizeof(int)); CUDA_ERROR(cudaMemset(d_arr, 0, sizeof(int) * N)); } context->SetMallocFromCache(false); context->FreeCache(); size_t after_size = context->GetDeviceMemoryInfo()->GetAvailableMemorySize(); ASSERT_EQ(before_size, after_size); delete context; context = NULL; delete dev_mem; dev_mem = NULL; } static void CnmemCudaContextTestWrapper(CudaContextType cuda_context_type) { CudaContextManager *manager = new CudaContextManager(1, cuda_context_type); const size_t d_partition_id = 0; CUDA_ERROR(cudaSetDevice(d_partition_id)); CnmemCudaContext *context = static_cast<CnmemCudaContext *>(manager->GetCudaContext(d_partition_id)); size_t before_size = context->GetDeviceMemoryInfo()->GetAvailableMemorySize(); size_t large_base = 1024ULL * 1024 * 1024; size_t small_base = 1024; ///// test small allocation std::vector<void *> mem_ptrs; std::vector<size_t> mem_sizes; for (size_t i = 0; i < 10; ++i) { void *ptr = context->Malloc((i + 1) * small_base); mem_sizes.push_back((i + 1) * small_base); mem_ptrs.push_back(ptr); } for (size_t i = 0; i < 10; ++i) { if (i % 2 == 1) { context->Free(mem_ptrs[i], mem_sizes[i]); } } for (size_t i = 0; i < 10; ++i) { if (i % 2 == 0) { context->Free(mem_ptrs[i], mem_sizes[i]); } } // test large allocation mem_ptrs.resize(0); mem_sizes.resize(0); for (size_t i = 0; i < 10; ++i) { void *ptr = context->Malloc((i + 1) * large_base); mem_sizes.push_back((i + 1) * large_base); mem_ptrs.push_back(ptr); } for (size_t i = 0; i < 10; ++i) { if (i % 2 == 1) { context->Free(mem_ptrs[i], mem_sizes[i]); } } for (size_t i = 0; i < 10; ++i) { if (i % 2 == 0) { context->Free(mem_ptrs[i], mem_sizes[i]); } } size_t after_size = context->GetDeviceMemoryInfo()->GetAvailableMemorySize(); ASSERT_EQ(before_size, after_size); delete manager; manager = NULL; } TEST(BasicUtils, CnmemCudaContextTest) { CnmemCudaContextTestWrapper(CudaContextType::CNMEM); } TEST(BasicUtils, CnmemManagedCudaContextTest) { CnmemCudaContextTestWrapper(CudaContextType::CNMEM_MANAGED); } CudaContextManager *CudaContextManager::gCudaContextManager = NULL; int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); CudaContextManager::CreateCudaContextManager(2, BASIC); return RUN_ALL_TESTS(); }
a6d81bbfbf32462ac56f6e7107eacc37cab9a381.hip
// !!! This is a file automatically generated by hipify!!! //===============================================================================================================================================================================================================200 // SET_DEVICE CODE //===============================================================================================================================================================================================================200 //======================================================================================================================================================150 // INCLUDE/DEFINE //======================================================================================================================================================150 #include "device.h" // (in library path specified to compiler) //======================================================================================================================================================150 // FUNCTIONS //======================================================================================================================================================150 //====================================================================================================100 // SET DEVICE //====================================================================================================100 void setdevice(void){ // variables int num_devices; int device; // work hipGetDeviceCount(&num_devices); if (num_devices > 1) { // variables int max_multiprocessors; int max_device; hipDeviceProp_t properties; // initialize variables max_multiprocessors = 0; max_device = 0; for (device = 0; device < num_devices; device++) { hipGetDeviceProperties(&properties, device); if (max_multiprocessors < properties.multiProcessorCount) { max_multiprocessors = properties.multiProcessorCount; max_device = device; } } hipSetDevice(max_device); } } //====================================================================================================100 // GET LAST ERROR //====================================================================================================100 void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { // fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); printf("Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); fflush(NULL); exit(EXIT_FAILURE); } } //===============================================================================================================================================================================================================200 // END SET_DEVICE CODE //===============================================================================================================================================================================================================200
a6d81bbfbf32462ac56f6e7107eacc37cab9a381.cu
//===============================================================================================================================================================================================================200 // SET_DEVICE CODE //===============================================================================================================================================================================================================200 //======================================================================================================================================================150 // INCLUDE/DEFINE //======================================================================================================================================================150 #include "device.h" // (in library path specified to compiler) //======================================================================================================================================================150 // FUNCTIONS //======================================================================================================================================================150 //====================================================================================================100 // SET DEVICE //====================================================================================================100 void setdevice(void){ // variables int num_devices; int device; // work cudaGetDeviceCount(&num_devices); if (num_devices > 1) { // variables int max_multiprocessors; int max_device; cudaDeviceProp properties; // initialize variables max_multiprocessors = 0; max_device = 0; for (device = 0; device < num_devices; device++) { cudaGetDeviceProperties(&properties, device); if (max_multiprocessors < properties.multiProcessorCount) { max_multiprocessors = properties.multiProcessorCount; max_device = device; } } cudaSetDevice(max_device); } } //====================================================================================================100 // GET LAST ERROR //====================================================================================================100 void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { // fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); printf("Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); fflush(NULL); exit(EXIT_FAILURE); } } //===============================================================================================================================================================================================================200 // END SET_DEVICE CODE //===============================================================================================================================================================================================================200
ee67375f386be3ad9dac863ded617cb8280e190b.hip
// !!! This is a file automatically generated by hipify!!! #include "IntersectionTest.h" TestResult IntersectionTest::runTest(TestData* data) { TestResult retResult; retResult.intersectionResults.resize(data->triangleCount); //std::vector<IntersectionResult> resultVector(data->triangleCount); //if (result != nullptr) //{ // hipFree(result); //} if (result == nullptr) hipMalloc((void**) &result, data->triangleCount * sizeof(IntersectionResult)); // start timer StopWatch sw; sw.start(); test(data); hipDeviceSynchronize(); // end timer retResult.duration = sw.getTimeInSeconds(); // collect results hipMemcpy(retResult.intersectionResults.data(), result, data->triangleCount * sizeof(IntersectionResult), hipMemcpyKind::hipMemcpyDeviceToHost); return retResult; } IntersectionTest::~IntersectionTest() { hipFree(result); }
ee67375f386be3ad9dac863ded617cb8280e190b.cu
#include "IntersectionTest.h" TestResult IntersectionTest::runTest(TestData* data) { TestResult retResult; retResult.intersectionResults.resize(data->triangleCount); //std::vector<IntersectionResult> resultVector(data->triangleCount); //if (result != nullptr) //{ // cudaFree(result); //} if (result == nullptr) cudaMalloc((void**) &result, data->triangleCount * sizeof(IntersectionResult)); // start timer StopWatch sw; sw.start(); test(data); cudaDeviceSynchronize(); // end timer retResult.duration = sw.getTimeInSeconds(); // collect results cudaMemcpy(retResult.intersectionResults.data(), result, data->triangleCount * sizeof(IntersectionResult), cudaMemcpyKind::cudaMemcpyDeviceToHost); return retResult; } IntersectionTest::~IntersectionTest() { cudaFree(result); }
3b9c9e0e1a531d61ade4c10f4a1411c34ffad4cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include<cuda_runtime.h> #include "device_launch_parameters.h" #include "HalconCpp.h" #include "common_val.h" using namespace HalconCpp; using namespace std; unsigned char* d_src, * d_dst; double* d_matrix; __global__ void d_affine(unsigned char* src, double* matrix, int width, int height, int dstWidth, int dstHeight, unsigned char* dst) { unsigned int r = blockDim.y * blockIdx.y + threadIdx.y; unsigned int c = blockDim.x * blockIdx.x + threadIdx.x; if ((c < dstWidth) && (r < dstHeight)) { double refX = matrix[0] * (c - ((double)dstWidth / 2)) + matrix[1] * (r - ((double)dstHeight / 2)) + ((double)width / 2); double refY = matrix[3] * (c - ((double)dstWidth / 2)) + matrix[4] * (r - ((double)dstHeight / 2)) + ((double)height / 2); int rX = (int)(refX + 0.5); int rY = (int)(refY + 0.5); unsigned char val = 0; if (rX >= 0 && rY >= 0 && rX < width && rY < height) { int idxRef = rY * width + rX; val = src[idxRef]; } int idx = (r * dstWidth) + c; if (idx < dstWidth * dstHeight) { dst[idx] = val; } } return; } #define BLOCKWIDTH 128 #define BLOCKHEIGHT 8 void gpu_affine() { HObject image; HalconCpp::ReadImage(&image, READIMAGE); HObject imgR, imgG, imgB; Decompose3(image, &imgR, &imgG, &imgB); HObject gray; Rgb3ToGray(imgR, imgG, imgB, &gray); HTuple p, tp, w, h; GetImagePointer1(gray, &p, &tp, &w, &h); HTuple hommat2dIdentity; HomMat2dIdentity(&hommat2dIdentity); HTuple hommat2dscale; HomMat2dScale(hommat2dIdentity, 2, 2, 0, 0, &hommat2dscale); HTuple hommat2dinvert; HomMat2dInvert(hommat2dscale, &hommat2dinvert); HTuple hommat2drotate; double rad = (45.0 / 180.0) * PI; HomMat2dRotate(hommat2dinvert, rad, 0, 0, &hommat2drotate); unsigned char* ptr = (unsigned char*)p.L(); int length = w.I() * h.I(); int length_2 = w.I() * 2 * h.I() * 2; size_t matrixSize = sizeof(double) * 6; unsigned char* buffer = new unsigned char[length_2]; hipMalloc((void**)&d_src, length); hipMalloc((void**)&d_dst, length_2); hipMalloc((void**)&d_matrix, matrixSize); clock_t start = clock(); hipMemcpy(d_src, ptr, length, hipMemcpyHostToDevice); hipMemcpy(d_dst, buffer, length_2, hipMemcpyHostToDevice); hipMemcpy(d_matrix, hommat2drotate.ToDArr(), matrixSize, hipMemcpyHostToDevice); int width = w.I() * 2; int height = h.I() * 2; hipFuncAttributes attributes; hipFuncGetAttributes(&attributes, d_affine); cout << "thread/block:" << attributes.maxThreadsPerBlock << endl; dim3 grid((width + BLOCKWIDTH-1)/ BLOCKWIDTH, (height + BLOCKHEIGHT-1) / BLOCKHEIGHT); dim3 block(BLOCKWIDTH, BLOCKHEIGHT); double s = HSystem::CountSeconds(); d_affine << < grid, block >> > (d_src, d_matrix, w.I(), h.I(), w.I() * 2, h.I() * 2, d_dst); double e = HSystem::CountSeconds(); cout << (e-s) * 1000 << ":only" << endl; hipMemcpy(buffer, d_dst, length_2, hipMemcpyDeviceToHost); hipFree(d_src); hipFree(d_dst); hipFree(d_matrix); clock_t end = clock(); cout << end - start << ":" << CLOCKS_PER_SEC << endl; HImage dst; GenImage1(&dst, "byte", w.I() * 2, h.I() * 2, (Hlong)buffer); WriteImage(dst, "tiff", 0, "gpu_affine"); //for (int i = 0; i < width; i++) //{ // cout << "[" << +buffer[i] << "]"; //} //cout << endl; delete[] buffer; }
3b9c9e0e1a531d61ade4c10f4a1411c34ffad4cf.cu
#include <stdio.h> #include<cuda_runtime.h> #include "device_launch_parameters.h" #include "HalconCpp.h" #include "common_val.h" using namespace HalconCpp; using namespace std; unsigned char* d_src, * d_dst; double* d_matrix; __global__ void d_affine(unsigned char* src, double* matrix, int width, int height, int dstWidth, int dstHeight, unsigned char* dst) { unsigned int r = blockDim.y * blockIdx.y + threadIdx.y; unsigned int c = blockDim.x * blockIdx.x + threadIdx.x; if ((c < dstWidth) && (r < dstHeight)) { double refX = matrix[0] * (c - ((double)dstWidth / 2)) + matrix[1] * (r - ((double)dstHeight / 2)) + ((double)width / 2); double refY = matrix[3] * (c - ((double)dstWidth / 2)) + matrix[4] * (r - ((double)dstHeight / 2)) + ((double)height / 2); int rX = (int)(refX + 0.5); int rY = (int)(refY + 0.5); unsigned char val = 0; if (rX >= 0 && rY >= 0 && rX < width && rY < height) { int idxRef = rY * width + rX; val = src[idxRef]; } int idx = (r * dstWidth) + c; if (idx < dstWidth * dstHeight) { dst[idx] = val; } } return; } #define BLOCKWIDTH 128 #define BLOCKHEIGHT 8 void gpu_affine() { HObject image; HalconCpp::ReadImage(&image, READIMAGE); HObject imgR, imgG, imgB; Decompose3(image, &imgR, &imgG, &imgB); HObject gray; Rgb3ToGray(imgR, imgG, imgB, &gray); HTuple p, tp, w, h; GetImagePointer1(gray, &p, &tp, &w, &h); HTuple hommat2dIdentity; HomMat2dIdentity(&hommat2dIdentity); HTuple hommat2dscale; HomMat2dScale(hommat2dIdentity, 2, 2, 0, 0, &hommat2dscale); HTuple hommat2dinvert; HomMat2dInvert(hommat2dscale, &hommat2dinvert); HTuple hommat2drotate; double rad = (45.0 / 180.0) * PI; HomMat2dRotate(hommat2dinvert, rad, 0, 0, &hommat2drotate); unsigned char* ptr = (unsigned char*)p.L(); int length = w.I() * h.I(); int length_2 = w.I() * 2 * h.I() * 2; size_t matrixSize = sizeof(double) * 6; unsigned char* buffer = new unsigned char[length_2]; cudaMalloc((void**)&d_src, length); cudaMalloc((void**)&d_dst, length_2); cudaMalloc((void**)&d_matrix, matrixSize); clock_t start = clock(); cudaMemcpy(d_src, ptr, length, cudaMemcpyHostToDevice); cudaMemcpy(d_dst, buffer, length_2, cudaMemcpyHostToDevice); cudaMemcpy(d_matrix, hommat2drotate.ToDArr(), matrixSize, cudaMemcpyHostToDevice); int width = w.I() * 2; int height = h.I() * 2; cudaFuncAttributes attributes; cudaFuncGetAttributes(&attributes, d_affine); cout << "thread/block:" << attributes.maxThreadsPerBlock << endl; dim3 grid((width + BLOCKWIDTH-1)/ BLOCKWIDTH, (height + BLOCKHEIGHT-1) / BLOCKHEIGHT); dim3 block(BLOCKWIDTH, BLOCKHEIGHT); double s = HSystem::CountSeconds(); d_affine << < grid, block >> > (d_src, d_matrix, w.I(), h.I(), w.I() * 2, h.I() * 2, d_dst); double e = HSystem::CountSeconds(); cout << (e-s) * 1000 << ":only" << endl; cudaMemcpy(buffer, d_dst, length_2, cudaMemcpyDeviceToHost); cudaFree(d_src); cudaFree(d_dst); cudaFree(d_matrix); clock_t end = clock(); cout << end - start << ":" << CLOCKS_PER_SEC << endl; HImage dst; GenImage1(&dst, "byte", w.I() * 2, h.I() * 2, (Hlong)buffer); WriteImage(dst, "tiff", 0, "gpu_affine"); //for (int i = 0; i < width; i++) //{ // cout << "[" << +buffer[i] << "]"; //} //cout << endl; delete[] buffer; }
0aba6b58a6e21af7b2b098cf9419082ab1641eef.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "ShapeConvexPolygon.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeConvexPolygon template hipError_t gpu_hpmc_free_volume<ShapeConvexPolygon>(const hpmc_free_volume_args_t &args, const typename ShapeConvexPolygon::param_type *d_params); template hipError_t gpu_hpmc_update<ShapeConvexPolygon>(const hpmc_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); template void gpu_hpmc_implicit_count_overlaps<ShapeConvexPolygon>(const hpmc_implicit_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolygon>(const hpmc_implicit_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); }; // end namespace detail } // end namespace hpmc
0aba6b58a6e21af7b2b098cf9419082ab1641eef.cu
// Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "ShapeConvexPolygon.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeConvexPolygon template cudaError_t gpu_hpmc_free_volume<ShapeConvexPolygon>(const hpmc_free_volume_args_t &args, const typename ShapeConvexPolygon::param_type *d_params); template cudaError_t gpu_hpmc_update<ShapeConvexPolygon>(const hpmc_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); template void gpu_hpmc_implicit_count_overlaps<ShapeConvexPolygon>(const hpmc_implicit_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolygon>(const hpmc_implicit_args_t& args, const typename ShapeConvexPolygon::param_type *d_params); }; // end namespace detail } // end namespace hpmc
2a4c9990dede20c8e89f9e43f0ecd91cdf0d9b4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> struct Pair { __device__ __forceinline__ Pair() {} __device__ __forceinline__ Pair(T value, int64_t id) : v(value), id(id) {} __device__ __forceinline__ void set(T value, int64_t id) { v = value; id = id; } __device__ __forceinline__ void operator=(const Pair<T>& in) { v = in.v; id = in.id; } __device__ __forceinline__ bool operator<(const T value) const { return (v < value); } __device__ __forceinline__ bool operator<(const Pair<T>& in) const { return (v < in.v) || ((v == in.v) && (id > in.id)); } __device__ __forceinline__ bool operator>(const Pair<T>& in) const { return (v > in.v) || ((v == in.v) && (id < in.id)); } T v; int64_t id; }; template <typename T> __device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p, int beam_size) { for (int k = beam_size - 2; k >= 0; k--) { if (topk[k] < p) { topk[k + 1] = topk[k]; } else { topk[k + 1] = p; return; } } topk[0] = p; } template <typename T, int beam_size> __device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p) { for (int k = beam_size - 2; k >= 0; k--) { if (topk[k] < p) { topk[k + 1] = topk[k]; } else { topk[k + 1] = p; return; } } topk[0] = p; } template <typename T, int BlockSize> __device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx, int dim, int beam_size) { while (idx < dim) { if (topk[beam_size - 1] < src[idx]) { Pair<T> tmp(src[idx], idx); AddTo<T>(topk, tmp, beam_size); } idx += BlockSize; } } template <typename T, int BlockSize> __device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx, int dim, const Pair<T>& max, int beam_size) { while (idx < dim) { if (topk[beam_size - 1] < src[idx]) { Pair<T> tmp(src[idx], idx); if (tmp < max) { AddTo<T>(topk, tmp, beam_size); } } idx += BlockSize; } } template <typename T, int BlockSize> __device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col, int idx, int dim, int beam_size) { while (idx < dim) { if (topk[beam_size - 1] < val[idx]) { Pair<T> tmp(val[idx], col[idx]); AddTo<T>(topk, tmp, beam_size); } idx += BlockSize; } } template <typename T, int BlockSize> __device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col, int idx, int dim, const Pair<T>& max, int beam_size) { while (idx < dim) { if (topk[beam_size - 1] < val[idx]) { Pair<T> tmp(val[idx], col[idx]); if (tmp < max) { AddTo<T>(topk, tmp, beam_size); } } idx += BlockSize; } } template <typename T, int MaxLength, int BlockSize> __device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int* beam, int beam_size, const T* src, bool* firstStep, bool* is_empty, Pair<T>* max, int dim, const int tid) { if (*beam > 0) { int length = (*beam) < beam_size ? *beam : beam_size; if (*firstStep) { *firstStep = false; GetTopK<T, BlockSize>(topk, src, tid, dim, length); } else { for (int k = 0; k < MaxLength; k++) { if (k < MaxLength - (*beam)) { topk[k] = topk[k + *beam]; } else { topk[k].set(-INFINITY, -1); } } if (!(*is_empty)) { GetTopK<T, BlockSize>(topk + MaxLength - *beam, src, tid, dim, *max, length); } } *max = topk[MaxLength - 1]; if ((*max).v == -1) *is_empty = true; *beam = 0; } } template <typename T, int MaxLength, int BlockSize> __device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int* beam, int beam_size, const T* val, int* col, bool* firstStep, bool* is_empty, Pair<T>* max, int dim, const int tid) { if (*beam > 0) { int length = (*beam) < beam_size ? *beam : beam_size; if (*firstStep) { *firstStep = false; GetTopK<T, BlockSize>(topk, val, col, tid, dim, length); } else { for (int k = 0; k < MaxLength; k++) { if (k < MaxLength - *beam) { topk[k] = topk[k + *beam]; } else { topk[k].set(-INFINITY, -1); } } if (!(*is_empty)) { GetTopK<T, BlockSize>(topk + MaxLength - *beam, val, col, tid, dim, max, length); } } *max = topk[MaxLength - 1]; if ((*max).v == -1) *is_empty = true; *beam = 0; } } template <typename T, int MaxLength, int BlockSize> __device__ __forceinline__ void BlockReduce(Pair<T>* sh_topk, int* maxid, Pair<T> topk[], T** topVal, int64_t** topIds, int* beam, int* k, const int tid, const int warp) { while (true) { __syncthreads(); if (tid < BlockSize / 2) { if (sh_topk[tid] < sh_topk[tid + BlockSize / 2]) { maxid[tid] = tid + BlockSize / 2; } else { maxid[tid] = tid; } } __syncthreads(); for (int stride = BlockSize / 4; stride > 0; stride = stride / 2) { if (tid < stride) { if (sh_topk[maxid[tid]] < sh_topk[maxid[tid + stride]]) { maxid[tid] = maxid[tid + stride]; } } __syncthreads(); } __syncthreads(); if (tid == 0) { **topVal = sh_topk[maxid[0]].v; **topIds = sh_topk[maxid[0]].id; (*topVal)++; (*topIds)++; } if (tid == maxid[0]) (*beam)++; if (--(*k) == 0) break; __syncthreads(); if (tid == maxid[0]) { if (*beam < MaxLength) { sh_topk[tid] = topk[*beam]; } } // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); if (maxid[0] / 32 == warp) { if (platform::CudaShuffleSync(mask, *beam, (maxid[0]) % 32, 32) == MaxLength) break; } } } /** * Each block compute one sample. * In a block: * 1. every thread get top MaxLength value; * 2. merge to sh_topk, block reduce and get max value; * 3. go to the second setp, until one thread's topk value is null; * 4. go to the first setp, until get the topk value. */ template <typename T, int MaxLength, int BlockSize> __global__ void KeMatrixTopK(T* output, int output_stride, int64_t* indices, const T* src, int lds, int dim, int k, int grid_dim, int num) { __shared__ Pair<T> sh_topk[BlockSize]; const int tid = threadIdx.x; const int warp = threadIdx.x / 32; const int bid = blockIdx.x; for (int i = bid; i < num; i += grid_dim) { int top_num = k; __shared__ int maxid[BlockSize / 2]; T* out = output + i * output_stride; int64_t* inds = indices + i * k; Pair<T> topk[MaxLength]; int beam = MaxLength; Pair<T> max; bool is_empty = false; bool firststep = true; for (int j = 0; j < MaxLength; j++) { topk[j].set(-INFINITY, -1); } while (top_num) { ThreadGetTopK<T, MaxLength, BlockSize>( topk, &beam, k, src + i * lds, &firststep, &is_empty, &max, dim, tid); sh_topk[tid] = topk[0]; BlockReduce<T, MaxLength, BlockSize>(sh_topk, maxid, topk, &out, &inds, &beam, &top_num, tid, warp); } } } inline static int GetDesiredBlockDim(int dim) { if (dim > 128) { return 256; } else if (dim > 64) { return 128; } else if (dim > 32) { return 64; } else { return 32; } } #define FIXED_BLOCK_DIM_BASE(dim, ...) \ case (dim): { \ constexpr auto kBlockDim = (dim); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM(...) \ FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__) template <typename T> class TopkOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use CUDAPlace."); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto* indices = ctx.Output<Tensor>("Indices"); size_t k = static_cast<int>(ctx.Attr<int>("k")); const T* input_data = input->data<T>(); T* output_data = output->mutable_data<T>(ctx.GetPlace()); // FIXME(typhoonzero): data is always converted to type T? int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace()); framework::DDim inputdims = input->dims(); const size_t input_height = framework::product( framework::slice_ddim(inputdims, 0, inputdims.size() - 1)); const size_t input_width = inputdims[inputdims.size() - 1]; if (k > input_width) k = input_width; // NOTE: pass lds and dim same to input width. // NOTE: old matrix implementation of stride is different to eigen. // TODO(typhoonzero): refine this kernel. const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; auto& dev_ctx = ctx.cuda_device_context(); switch (GetDesiredBlockDim(input_width)) { FIXED_BLOCK_DIM( hipLaunchKernelGGL(( KeMatrixTopK<T, 5, kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height)); default: PADDLE_THROW("Error"); } } }; #undef FIXED_BLOCK_DIM_BASE #undef FIXED_BLOCK_DIM } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel<float>, paddle::operators::TopkOpCUDAKernel<double>);
2a4c9990dede20c8e89f9e43f0ecd91cdf0d9b4a.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> struct Pair { __device__ __forceinline__ Pair() {} __device__ __forceinline__ Pair(T value, int64_t id) : v(value), id(id) {} __device__ __forceinline__ void set(T value, int64_t id) { v = value; id = id; } __device__ __forceinline__ void operator=(const Pair<T>& in) { v = in.v; id = in.id; } __device__ __forceinline__ bool operator<(const T value) const { return (v < value); } __device__ __forceinline__ bool operator<(const Pair<T>& in) const { return (v < in.v) || ((v == in.v) && (id > in.id)); } __device__ __forceinline__ bool operator>(const Pair<T>& in) const { return (v > in.v) || ((v == in.v) && (id < in.id)); } T v; int64_t id; }; template <typename T> __device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p, int beam_size) { for (int k = beam_size - 2; k >= 0; k--) { if (topk[k] < p) { topk[k + 1] = topk[k]; } else { topk[k + 1] = p; return; } } topk[0] = p; } template <typename T, int beam_size> __device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p) { for (int k = beam_size - 2; k >= 0; k--) { if (topk[k] < p) { topk[k + 1] = topk[k]; } else { topk[k + 1] = p; return; } } topk[0] = p; } template <typename T, int BlockSize> __device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx, int dim, int beam_size) { while (idx < dim) { if (topk[beam_size - 1] < src[idx]) { Pair<T> tmp(src[idx], idx); AddTo<T>(topk, tmp, beam_size); } idx += BlockSize; } } template <typename T, int BlockSize> __device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx, int dim, const Pair<T>& max, int beam_size) { while (idx < dim) { if (topk[beam_size - 1] < src[idx]) { Pair<T> tmp(src[idx], idx); if (tmp < max) { AddTo<T>(topk, tmp, beam_size); } } idx += BlockSize; } } template <typename T, int BlockSize> __device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col, int idx, int dim, int beam_size) { while (idx < dim) { if (topk[beam_size - 1] < val[idx]) { Pair<T> tmp(val[idx], col[idx]); AddTo<T>(topk, tmp, beam_size); } idx += BlockSize; } } template <typename T, int BlockSize> __device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col, int idx, int dim, const Pair<T>& max, int beam_size) { while (idx < dim) { if (topk[beam_size - 1] < val[idx]) { Pair<T> tmp(val[idx], col[idx]); if (tmp < max) { AddTo<T>(topk, tmp, beam_size); } } idx += BlockSize; } } template <typename T, int MaxLength, int BlockSize> __device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int* beam, int beam_size, const T* src, bool* firstStep, bool* is_empty, Pair<T>* max, int dim, const int tid) { if (*beam > 0) { int length = (*beam) < beam_size ? *beam : beam_size; if (*firstStep) { *firstStep = false; GetTopK<T, BlockSize>(topk, src, tid, dim, length); } else { for (int k = 0; k < MaxLength; k++) { if (k < MaxLength - (*beam)) { topk[k] = topk[k + *beam]; } else { topk[k].set(-INFINITY, -1); } } if (!(*is_empty)) { GetTopK<T, BlockSize>(topk + MaxLength - *beam, src, tid, dim, *max, length); } } *max = topk[MaxLength - 1]; if ((*max).v == -1) *is_empty = true; *beam = 0; } } template <typename T, int MaxLength, int BlockSize> __device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int* beam, int beam_size, const T* val, int* col, bool* firstStep, bool* is_empty, Pair<T>* max, int dim, const int tid) { if (*beam > 0) { int length = (*beam) < beam_size ? *beam : beam_size; if (*firstStep) { *firstStep = false; GetTopK<T, BlockSize>(topk, val, col, tid, dim, length); } else { for (int k = 0; k < MaxLength; k++) { if (k < MaxLength - *beam) { topk[k] = topk[k + *beam]; } else { topk[k].set(-INFINITY, -1); } } if (!(*is_empty)) { GetTopK<T, BlockSize>(topk + MaxLength - *beam, val, col, tid, dim, max, length); } } *max = topk[MaxLength - 1]; if ((*max).v == -1) *is_empty = true; *beam = 0; } } template <typename T, int MaxLength, int BlockSize> __device__ __forceinline__ void BlockReduce(Pair<T>* sh_topk, int* maxid, Pair<T> topk[], T** topVal, int64_t** topIds, int* beam, int* k, const int tid, const int warp) { while (true) { __syncthreads(); if (tid < BlockSize / 2) { if (sh_topk[tid] < sh_topk[tid + BlockSize / 2]) { maxid[tid] = tid + BlockSize / 2; } else { maxid[tid] = tid; } } __syncthreads(); for (int stride = BlockSize / 4; stride > 0; stride = stride / 2) { if (tid < stride) { if (sh_topk[maxid[tid]] < sh_topk[maxid[tid + stride]]) { maxid[tid] = maxid[tid + stride]; } } __syncthreads(); } __syncthreads(); if (tid == 0) { **topVal = sh_topk[maxid[0]].v; **topIds = sh_topk[maxid[0]].id; (*topVal)++; (*topIds)++; } if (tid == maxid[0]) (*beam)++; if (--(*k) == 0) break; __syncthreads(); if (tid == maxid[0]) { if (*beam < MaxLength) { sh_topk[tid] = topk[*beam]; } } // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); if (maxid[0] / 32 == warp) { if (platform::CudaShuffleSync(mask, *beam, (maxid[0]) % 32, 32) == MaxLength) break; } } } /** * Each block compute one sample. * In a block: * 1. every thread get top MaxLength value; * 2. merge to sh_topk, block reduce and get max value; * 3. go to the second setp, until one thread's topk value is null; * 4. go to the first setp, until get the topk value. */ template <typename T, int MaxLength, int BlockSize> __global__ void KeMatrixTopK(T* output, int output_stride, int64_t* indices, const T* src, int lds, int dim, int k, int grid_dim, int num) { __shared__ Pair<T> sh_topk[BlockSize]; const int tid = threadIdx.x; const int warp = threadIdx.x / 32; const int bid = blockIdx.x; for (int i = bid; i < num; i += grid_dim) { int top_num = k; __shared__ int maxid[BlockSize / 2]; T* out = output + i * output_stride; int64_t* inds = indices + i * k; Pair<T> topk[MaxLength]; int beam = MaxLength; Pair<T> max; bool is_empty = false; bool firststep = true; for (int j = 0; j < MaxLength; j++) { topk[j].set(-INFINITY, -1); } while (top_num) { ThreadGetTopK<T, MaxLength, BlockSize>( topk, &beam, k, src + i * lds, &firststep, &is_empty, &max, dim, tid); sh_topk[tid] = topk[0]; BlockReduce<T, MaxLength, BlockSize>(sh_topk, maxid, topk, &out, &inds, &beam, &top_num, tid, warp); } } } inline static int GetDesiredBlockDim(int dim) { if (dim > 128) { return 256; } else if (dim > 64) { return 128; } else if (dim > 32) { return 64; } else { return 32; } } #define FIXED_BLOCK_DIM_BASE(dim, ...) \ case (dim): { \ constexpr auto kBlockDim = (dim); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM(...) \ FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__) template <typename T> class TopkOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use CUDAPlace."); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto* indices = ctx.Output<Tensor>("Indices"); size_t k = static_cast<int>(ctx.Attr<int>("k")); const T* input_data = input->data<T>(); T* output_data = output->mutable_data<T>(ctx.GetPlace()); // FIXME(typhoonzero): data is always converted to type T? int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace()); framework::DDim inputdims = input->dims(); const size_t input_height = framework::product( framework::slice_ddim(inputdims, 0, inputdims.size() - 1)); const size_t input_width = inputdims[inputdims.size() - 1]; if (k > input_width) k = input_width; // NOTE: pass lds and dim same to input width. // NOTE: old matrix implementation of stride is different to eigen. // TODO(typhoonzero): refine this kernel. const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; auto& dev_ctx = ctx.cuda_device_context(); switch (GetDesiredBlockDim(input_width)) { FIXED_BLOCK_DIM( KeMatrixTopK<T, 5, kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>( output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height)); default: PADDLE_THROW("Error"); } } }; #undef FIXED_BLOCK_DIM_BASE #undef FIXED_BLOCK_DIM } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel<float>, paddle::operators::TopkOpCUDAKernel<double>);
5b303a1bbe128c9cc2c0be84843de4301f71cbae.hip
// !!! This is a file automatically generated by hipify!!! #include "mtbs_cu.h" #include <pthread.h> #include "stream.h" extern unsigned n_streams; static vStrm_t *vStrms; static unsigned idx_allocator; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; vstream_t create_vstream(void) { vstrm_t vstrm; pthread_mutex_lock(&mutex); vstrm = &vStrms[idx_allocator]; idx_allocator = (idx_allocator + 1) % n_streams; vstrm->refcnt++; pthread_mutex_unlock(&mutex); return vstrm; } void destroy_vstream(vstream_t strm) { vstrm_t vstrm = (vstrm_t)strm; pthread_mutex_lock(&mutex); assert(vstrm->refcnt > 0); vstrm->refcnt--; pthread_mutex_unlock(&mutex); } void init_streams(void) { unsigned i; vStrms = (vStrm_t *)malloc(sizeof(vStrm_t) * n_streams); for (i = 0; i < n_streams; i++) { hipError_t res; res = hipStreamCreate__(&vStrms[i].cudaStrm, hipStreamNonBlocking); if (res != hipSuccess) { error("stream creation failed: too many streams?"); exit(11); } vStrms[i].refcnt = 0; } }
5b303a1bbe128c9cc2c0be84843de4301f71cbae.cu
#include "mtbs_cu.h" #include <pthread.h> #include "stream.h" extern unsigned n_streams; static vStrm_t *vStrms; static unsigned idx_allocator; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; vstream_t create_vstream(void) { vstrm_t vstrm; pthread_mutex_lock(&mutex); vstrm = &vStrms[idx_allocator]; idx_allocator = (idx_allocator + 1) % n_streams; vstrm->refcnt++; pthread_mutex_unlock(&mutex); return vstrm; } void destroy_vstream(vstream_t strm) { vstrm_t vstrm = (vstrm_t)strm; pthread_mutex_lock(&mutex); assert(vstrm->refcnt > 0); vstrm->refcnt--; pthread_mutex_unlock(&mutex); } void init_streams(void) { unsigned i; vStrms = (vStrm_t *)malloc(sizeof(vStrm_t) * n_streams); for (i = 0; i < n_streams; i++) { CUresult res; res = cuStreamCreate(&vStrms[i].cudaStrm, CU_STREAM_NON_BLOCKING); if (res != CUDA_SUCCESS) { error("stream creation failed: too many streams?"); exit(11); } vStrms[i].refcnt = 0; } }
83fa906f2e24d593eb4426b19bc0666c7f8b46cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017 NVIDIA Corporation * * The U.S. Department of Energy funded the development of this software * under subcontract B609478 with Lawrence Livermore National Security, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include "snap_types.h" #include "accessor.h" #include "snap_cuda_help.h" using namespace LegionRuntime::Arrays; using namespace LegionRuntime::Accessor; // Some bounds for use of GPU kernels, can be modified easily // Be careful about memory usage, modifying MAX_X_CHUNK and // MAX_Y_CHUNK will influence how much local memory must be // allocated for each kernel #define MAX_ANGLES 2048 #define MAX_X_CHUNK 16 #define MAX_Y_CHUNK 16 // Don't use the __constant__ qualifier here! // Each thread in a warp will be indexing on // a per angle basis and we don't want replays // when they don't all hit the same constant index __device__ double device_ec[8/*corners*/*4/*moments*/*MAX_ANGLES]; __device__ double device_mu[MAX_ANGLES]; __device__ double device_eta[MAX_ANGLES]; __device__ double device_xi[MAX_ANGLES]; __device__ double device_w[MAX_ANGLES]; __host__ void initialize_gpu_context(const double *ec_h, const double *mu_h, const double *eta_h, const double *xi_h, const double *w_h, const int num_angles, const int num_moments, const int num_octants, const int nx_per_chunk, const int ny_per_chunk) { // Check the bounds first if (num_angles > MAX_ANGLES) printf("ERROR: adjust MAX_ANGLES in gpu_sweep.cu to %d", num_angles); assert(num_angles <= MAX_ANGLES); if (nx_per_chunk > MAX_X_CHUNK) printf("ERROR: adjust MAX_X_CHUNK in gpu_sweep.cu to %d", nx_per_chunk); assert(nx_per_chunk <= MAX_X_CHUNK); if (ny_per_chunk > MAX_Y_CHUNK) printf("ERROR: adjust MAX_Y_CHUNK in gpu_sweep.cu to %d", ny_per_chunk); assert(ny_per_chunk <= MAX_Y_CHUNK); hipMemcpyToSymbol(device_ec, ec_h, num_angles * num_moments * num_octants * sizeof(double)); hipMemcpyToSymbol(device_mu, mu_h, num_angles * sizeof(double)); hipMemcpyToSymbol(device_eta, eta_h, num_angles * sizeof(double)); hipMemcpyToSymbol(device_xi, xi_h, num_angles * sizeof(double)); hipMemcpyToSymbol(device_w, w_h, num_angles * sizeof(double)); } // This is from expxs but it uses the same constants template<int GROUPS> __global__ void gpu_geometry_param(const PointerBuffer<GROUPS,double> xs_ptrs, PointerBuffer<GROUPS,double> dinv_ptrs, const ByteOffsetArray<3> xs_offsets, const ByteOffsetArray<3> dinv_offsets, const ConstBuffer<GROUPS,double> vdelt, const double hi, const double hj, const double hk, const int angles_per_thread) { const int x = blockIdx.x; const int y = blockIdx.y; const int z = blockIdx.z; for (int i = 0; i < angles_per_thread; i++) { const int ang = i * blockDim.x + threadIdx.x; const double sum = hi * device_mu[ang] + hj * device_eta[ang] + hk * device_xi[ang]; #pragma unroll for (int g = 0; g < GROUPS; g++) { const double *xs_ptr = xs_ptrs[g] + x * xs_offsets[0] + y * xs_offsets[1] + z * xs_offsets[2]; double xs; // Cache this at all levels since it is shared across all threads in the CTA asm volatile("ld.global.ca.f64 %0, [%1];" : "=d"(xs) : "l"(xs_ptr) : "memory"); double result = 1.0 / (xs + vdelt[g] + sum); double *dinv_ptr = dinv_ptrs[g] + x * dinv_offsets[0] + y * dinv_offsets[1] + z * dinv_offsets[2]; asm volatile("st.global.cs.f64 [%0], %1;" : : "l"(dinv_ptr+ang), "d"(result) : "memory"); } } } __host__ void run_geometry_param(const std::vector<double*> &xs_ptrs, const std::vector<double*> &dinv_ptrs, const ByteOffset xs_offsets[3], const ByteOffset dinv_offsets[3], const std::vector<double> &vdelts, const double hi, const double hj, const double hk, const Rect<3> &subgrid_bounds, const int num_angles) { // Figure out the launch bounds, then dispatch const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; const int max_threads_per_cta = 1024; const int angles_per_thread = (num_angles + max_threads_per_cta - 1) / max_threads_per_cta; // Have to be evenly divisible for now assert((num_angles % angles_per_thread) == 0); const int threads_per_cta = num_angles / angles_per_thread; dim3 block(threads_per_cta, 1, 1); dim3 grid(x_range, y_range, z_range); // TODO: Replace template foolishness with terra assert(xs_ptrs.size() == dinv_ptrs.size()); switch (xs_ptrs.size()) { case 1: { gpu_geometry_param<1><<<grid,block>>>( PointerBuffer<1,double>(xs_ptrs), PointerBuffer<1,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<1,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 2: { gpu_geometry_param<2><<<grid,block>>>( PointerBuffer<2,double>(xs_ptrs), PointerBuffer<2,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<2,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 3: { gpu_geometry_param<3><<<grid,block>>>( PointerBuffer<3,double>(xs_ptrs), PointerBuffer<3,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<3,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 4: { gpu_geometry_param<4><<<grid,block>>>( PointerBuffer<4,double>(xs_ptrs), PointerBuffer<4,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<4,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 5: { gpu_geometry_param<5><<<grid,block>>>( PointerBuffer<5,double>(xs_ptrs), PointerBuffer<5,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<5,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 6: { gpu_geometry_param<6><<<grid,block>>>( PointerBuffer<6,double>(xs_ptrs), PointerBuffer<6,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<6,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 7: { gpu_geometry_param<7><<<grid,block>>>( PointerBuffer<7,double>(xs_ptrs), PointerBuffer<7,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<7,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 8: { gpu_geometry_param<8><<<grid,block>>>( PointerBuffer<8,double>(xs_ptrs), PointerBuffer<8,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<8,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 9: { gpu_geometry_param<9><<<grid,block>>>( PointerBuffer<9,double>(xs_ptrs), PointerBuffer<9,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<9,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 10: { gpu_geometry_param<10><<<grid,block>>>( PointerBuffer<10,double>(xs_ptrs), PointerBuffer<10,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<10,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 11: { gpu_geometry_param<11><<<grid,block>>>( PointerBuffer<11,double>(xs_ptrs), PointerBuffer<11,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<11,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 12: { gpu_geometry_param<12><<<grid,block>>>( PointerBuffer<12,double>(xs_ptrs), PointerBuffer<12,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<12,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 13: { gpu_geometry_param<13><<<grid,block>>>( PointerBuffer<13,double>(xs_ptrs), PointerBuffer<13,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<13,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 14: { gpu_geometry_param<14><<<grid,block>>>( PointerBuffer<14,double>(xs_ptrs), PointerBuffer<14,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<14,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 15: { gpu_geometry_param<15><<<grid,block>>>( PointerBuffer<15,double>(xs_ptrs), PointerBuffer<15,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<15,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 16: { gpu_geometry_param<16><<<grid,block>>>( PointerBuffer<16,double>(xs_ptrs), PointerBuffer<16,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<16,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 24: { gpu_geometry_param<24><<<grid,block>>>( PointerBuffer<24,double>(xs_ptrs), PointerBuffer<24,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<24,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 32: { gpu_geometry_param<32><<<grid,block>>>( PointerBuffer<32,double>(xs_ptrs), PointerBuffer<32,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<32,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 40: { gpu_geometry_param<40><<<grid,block>>>( PointerBuffer<40,double>(xs_ptrs), PointerBuffer<40,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<40,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 48: { gpu_geometry_param<48><<<grid,block>>>( PointerBuffer<48,double>(xs_ptrs), PointerBuffer<48,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<48,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 56: { gpu_geometry_param<56><<<grid,block>>>( PointerBuffer<56,double>(xs_ptrs), PointerBuffer<56,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<56,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 64: { gpu_geometry_param<64><<<grid,block>>>( PointerBuffer<64,double>(xs_ptrs), PointerBuffer<64,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<64,double>(vdelts), hi, hj, hk, angles_per_thread); break; } default: assert(false); // need more cases } } __device__ __forceinline__ ByteOffset operator*(const ByteOffsetArray<2> &offsets, const Point<2> &point) { return (offsets[0] * point.x[0] + offsets[1] * point.x[1]); } __device__ __forceinline__ ByteOffset operator*(const ByteOffsetArray<3> &offsets, const Point<3> &point) { return (offsets[0] * point.x[0] + offsets[1] * point.x[1] + offsets[2] * point.x[2]); } __device__ __forceinline__ void ourAtomicAdd(double *ptr, double value) { #if __CUDA_ARCH__ < 600 unsigned long long int* address_as_ull = (unsigned long long int*)ptr; unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(value + __longlong_as_double(assumed))); } while (assumed != old); #else // We have double precision atomicAdd starting in Pascal atomicAdd(ptr, value); #endif } template<int DIM> __device__ __forceinline__ double angle_read(const double *ptr, const ByteOffsetArray<DIM> &offset, const Point<DIM> &point, int ang) { ptr += (offset * point); ptr += ang * blockDim.x + threadIdx.x; double result; asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(result) : "l"(ptr) : "memory"); return result; } template<int DIM> __device__ __forceinline__ void angle_write(double *ptr, const ByteOffsetArray<DIM> &offset, const Point<DIM> &point, int ang, double val) { ptr += (offset * point); ptr += ang * blockDim.x + threadIdx.x; asm volatile("st.global.cs.f64 [%0], %1;" : : "l"(ptr), "d"(val) : "memory"); } __device__ __forceinline__ Point<2> ghostx_point(const Point<3> &local_point) { Point<2> ghost; ghost.x[0] = local_point.x[1]; // y ghost.x[1] = local_point.x[2]; // z return ghost; } __device__ __forceinline__ Point<2> ghosty_point(const Point<3> &local_point) { Point<2> ghost; ghost.x[0] = local_point.x[0]; // x ghost.x[1] = local_point.x[2]; // z return ghost; } __device__ __forceinline__ Point<2> ghostz_point(const Point<3> &local_point) { Point<2> ghost; ghost.x[0] = local_point.x[0]; // x ghost.x[1] = local_point.x[1]; // y return ghost; } template<int THR_ANGLES> __global__ void gpu_time_dependent_sweep_with_fixup(const Point<3> origin, const MomentQuad *qtot_ptr, double *flux_ptr, MomentTriple *fluxm_ptr, const double *dinv_ptr, const double *time_flux_in_ptr, double *time_flux_out_ptr, const double *t_xs_ptr, double *ghostx_ptr, double *ghosty_ptr, double *ghostz_ptr, const double *qim_ptr, const ByteOffsetArray<3> qtot_offsets, const ByteOffsetArray<3> flux_offsets, const ByteOffsetArray<3> fluxm_offsets, const ByteOffsetArray<3> dinv_offsets, const ByteOffsetArray<3> time_flux_in_offsets, const ByteOffsetArray<3> time_flux_out_offsets, const ByteOffsetArray<3> t_xs_offsets, const ByteOffsetArray<2> ghostx_offsets, const ByteOffsetArray<2> ghosty_offsets, const ByteOffsetArray<2> ghostz_offsets, const ByteOffsetArray<3> qim_offsets, const int x_range, const int y_range, const int z_range, const int corner, const bool stride_x_positive, const bool stride_y_positive, const bool stride_z_positive, const bool mms_source, const int num_moments, const double hi, const double hj, const double hk, const double vdelt) { __shared__ int int_trampoline[32]; __shared__ double double_trampoline[32]; double psi[THR_ANGLES]; double pc[THR_ANGLES]; double psii[THR_ANGLES]; double psij[THR_ANGLES]; double psik[THR_ANGLES]; double hv_x[THR_ANGLES]; double hv_y[THR_ANGLES]; double hv_z[THR_ANGLES]; double hv_t[THR_ANGLES]; double fx_hv_x[THR_ANGLES]; double fx_hv_y[THR_ANGLES]; double fx_hv_z[THR_ANGLES]; double fx_hv_t[THR_ANGLES]; double time_flux_in[THR_ANGLES]; const int num_angles = THR_ANGLES * blockDim.x; const int corner_offset = corner * num_angles * num_moments; unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // These will be intentionally spilled to local memory // because the CUDA compiler can't statically understand // all their accesses, which is where we actualy want them double yflux_pencil[MAX_X_CHUNK][THR_ANGLES]; double zflux_plane[MAX_Y_CHUNK][MAX_X_CHUNK][THR_ANGLES]; const double tolr = 1.0e-12; for (int z = 0; z < z_range; z++) { for (int y = 0; y < y_range; y++) { for (int x = 0; x < x_range; x++) { // Figure out the local point that we are working on Point<3> local_point = origin; if (stride_x_positive) local_point.x[0] += x; else local_point.x[0] -= x; if (stride_y_positive) local_point.x[1] += y; else local_point.x[1] -= y; if (stride_z_positive) local_point.x[2] += z; else local_point.x[2] -= z; // Compute the angular source MomentQuad quad = *(qtot_ptr + qtot_offsets * local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] = quad[0]; if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int moment_offset = corner_offset + l * num_angles; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] += device_ec[moment_offset+ang*blockDim.x+threadIdx.x] * quad[l]; } } } // If we're doing MMS if (mms_source) { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] += angle_read<3>(qim_ptr, qim_offsets, local_point, ang); } // Compute the initial solution #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] = psi[ang]; // X ghost cells if (x == 0) { // Ghost cell array Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = angle_read<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang); } // Else nothing: psii already contains next flux #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi; // Y ghost cells if (y == 0) { // Ghost cell array Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = angle_read<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = yflux_pencil[x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psij[ang] * device_eta[ang * blockDim.x + threadIdx.x] * hj; // Z ghost cells if (z == 0) { // Ghost cell array Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = angle_read<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = zflux_plane[y][x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psik[ang] * device_xi[ang * blockDim.x + threadIdx.x] * hk; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { time_flux_in[ang] = angle_read<3>(time_flux_in_ptr, time_flux_in_offsets, local_point, ang); pc[ang] += vdelt * time_flux_in[ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double dinv = angle_read<3>(dinv_ptr, dinv_offsets, local_point, ang); pc[ang] *= dinv; } // DO THE FIXUP #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_x[ang] = 1.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_y[ang] = 1.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_z[ang] = 1.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_t[ang] = 1.0; const double t_xs = *(t_xs_ptr + t_xs_offsets * local_point); int old_negative_fluxes = 0; while (true) { unsigned negative_fluxes = 0; // Figure out how many negative fluxes we have #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_x[ang] = 2.0 * pc[ang] - psii[ang]; if (fx_hv_x[ang] < 0.0) { hv_x[ang] = 0.0; negative_fluxes++; } } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_y[ang] = 2.0 * pc[ang] - psij[ang]; if (fx_hv_y[ang] < 0.0) { hv_y[ang] = 0.0; negative_fluxes++; } } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_z[ang] = 2.0 * pc[ang] - psik[ang]; if (fx_hv_z[ang] < 0.0) { hv_z[ang] = 0.0; negative_fluxes++; } } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_t[ang] = 2.0 * pc[ang] - time_flux_in[ang]; if (fx_hv_t[ang] < 0.0) { hv_t[ang] = 0.0; negative_fluxes++; } } // CTA-wide reduction #pragma unroll for (int i = 16; i >= 1; i /= 2) negative_fluxes += __shfl_xor(negative_fluxes, i, 32); // Initialize if (warpid == 0) int_trampoline[laneid] = 0; __syncthreads(); if (laneid == 0) int_trampoline[warpid] = negative_fluxes; __syncthreads(); negative_fluxes = int_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) negative_fluxes += __shfl_xor(negative_fluxes, i, 32); // All threads have the same negative flux count now if (negative_fluxes == old_negative_fluxes) break; old_negative_fluxes = negative_fluxes; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { pc[ang] = psi[ang] + 0.5 * ( psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi * (1.0 + hv_x[ang]) + psij[ang] * device_eta[ang*blockDim.x + threadIdx.x] * hj * (1.0 + hv_y[ang]) + psik[ang] * device_xi[ang*blockDim.x + threadIdx.x] * hk * (1.0 + hv_z[ang]) + time_flux_in[ang] * vdelt * (1.0 + hv_t[ang]) ); double den = (pc[ang] <= 0.0) ? 0.0 : (t_xs + device_mu[ang*blockDim.x + threadIdx.x] * hi * hv_x[ang] + device_eta[ang*blockDim.x + threadIdx.x] * hj * hv_y[ang] + device_xi[ang*blockDim.x + threadIdx.x] * hk * hv_z[ang] + vdelt * hv_t[ang]); if (den < tolr) pc[ang] = 0.0; else pc[ang] /= den; } } // Fixup done so compute the update values #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = fx_hv_x[ang] * hv_x[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = fx_hv_y[ang] * hv_y[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = fx_hv_z[ang] * hv_z[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double time_flux_out = fx_hv_t[ang] * hv_t[ang]; angle_write<3>(time_flux_out_ptr, time_flux_out_offsets, local_point, ang, time_flux_out); } // Write out the ghost regions // X ghost if (x == (x_range - 1)) { Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang, psii[ang]); } // Y ghost if (y == (y_range - 1)) { Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang, psij[ang]); } else { // Write to the pencil #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) yflux_pencil[x][ang] = psij[ang]; } // Z ghost if (z == (z_range - 1)) { Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang, psik[ang]); } else { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) zflux_plane[y][x][ang] = psik[ang]; } // Finally we apply reductions to the flux moments double total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] = device_w[ang * blockDim.x + threadIdx.x] * pc[ang]; total += psi[ang]; } // CTA-wide reduction to one warp and then down to one thread #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) { total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } // Do the reduction if (laneid == 0) { double *local_flux = flux_ptr + flux_offsets * local_point; ourAtomicAdd(local_flux, total); } } if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int offset = l * num_angles + corner * num_angles * num_moments; total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) total += device_ec[offset + ang] * psi[ang]; __syncthreads(); if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (laneid == 0) { double *local_fluxm = (double*)(fluxm_ptr + fluxm_offsets * local_point); local_fluxm += (l-1); ourAtomicAdd(local_fluxm, total); } } } } } } } template<int THR_ANGLES> __global__ void gpu_time_dependent_sweep_without_fixup(const Point<3> origin, const MomentQuad *qtot_ptr, double *flux_ptr, MomentTriple *fluxm_ptr, const double *dinv_ptr, const double *time_flux_in_ptr, double *time_flux_out_ptr, double *ghostx_ptr, double *ghosty_ptr, double *ghostz_ptr, const double *qim_ptr, const ByteOffsetArray<3> qtot_offsets, const ByteOffsetArray<3> flux_offsets, const ByteOffsetArray<3> fluxm_offsets, const ByteOffsetArray<3> dinv_offsets, const ByteOffsetArray<3> time_flux_in_offsets, const ByteOffsetArray<3> time_flux_out_offsets, const ByteOffsetArray<2> ghostx_offsets, const ByteOffsetArray<2> ghosty_offsets, const ByteOffsetArray<2> ghostz_offsets, const ByteOffsetArray<3> qim_offsets, const int x_range, const int y_range, const int z_range, const int corner, const bool stride_x_positive, const bool stride_y_positive, const bool stride_z_positive, const bool mms_source, const int num_moments, const double hi, const double hj, const double hk, const double vdelt) { __shared__ double double_trampoline[32]; double psi[THR_ANGLES]; double pc[THR_ANGLES]; double psii[THR_ANGLES]; double psij[THR_ANGLES]; double psik[THR_ANGLES]; double time_flux_in[THR_ANGLES]; const int num_angles = THR_ANGLES * blockDim.x; const int corner_offset = corner * num_angles * num_moments; unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // These will be intentionally spilled to local memory // because the CUDA compiler can't statically understand // all their accesses, which is where we actualy want them double yflux_pencil[MAX_X_CHUNK][THR_ANGLES]; double zflux_plane[MAX_Y_CHUNK][MAX_X_CHUNK][THR_ANGLES]; for (int z = 0; z < z_range; z++) { for (int y = 0; y < y_range; y++) { for (int x = 0; x < x_range; x++) { // Figure out the local point that we are working on Point<3> local_point = origin; if (stride_x_positive) local_point.x[0] += x; else local_point.x[0] -= x; if (stride_y_positive) local_point.x[1] += y; else local_point.x[1] -= y; if (stride_z_positive) local_point.x[2] += z; else local_point.x[2] -= z; // Compute the angular source MomentQuad quad = *(qtot_ptr + qtot_offsets * local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] = quad[0]; if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int moment_offset = corner_offset + l * num_angles; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] += device_ec[moment_offset+ang*blockDim.x+threadIdx.x] * quad[l]; } } } // If we're doing MMS if (mms_source) { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] += angle_read<3>(qim_ptr, qim_offsets, local_point, ang); } // Compute the initial solution #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] = psi[ang]; // X ghost cells if (x == 0) { // Ghost cell array Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = angle_read<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang); } // Else nothing: psii already contains next flux #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi; // Y ghost cells if (y == 0) { // Ghost cell array Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = angle_read<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = yflux_pencil[x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psij[ang] * device_eta[ang * blockDim.x + threadIdx.x] * hj; // Z ghost cells if (z == 0) { // Ghost cell array Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = angle_read<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = zflux_plane[y][x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psik[ang] * device_xi[ang * blockDim.x + threadIdx.x] * hk; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { time_flux_in[ang] = angle_read<3>(time_flux_in_ptr, time_flux_in_offsets, local_point, ang); pc[ang] += vdelt * time_flux_in[ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double dinv = angle_read<3>(dinv_ptr, dinv_offsets, local_point, ang); pc[ang] *= dinv; } // NO FIXUP #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = 2.0 * pc[ang] - psii[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = 2.0 * pc[ang] - psij[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = 2.0 * pc[ang] - psik[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double time_flux_out = 2.0 * pc[ang] - time_flux_in[ang]; angle_write<3>(time_flux_out_ptr, time_flux_out_offsets, local_point, ang, time_flux_out); } // Write out the ghost regions // X ghost if (x == (x_range - 1)) { Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang, psii[ang]); } // Y ghost if (y == (y_range - 1)) { Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang, psij[ang]); } else { // Write to the pencil #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) yflux_pencil[x][ang] = psij[ang]; } // Z ghost if (z == (z_range - 1)) { Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang, psik[ang]); } else { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) zflux_plane[y][x][ang] = psik[ang]; } // Finally we apply reductions to the flux moments double total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] = device_w[ang * blockDim.x + threadIdx.x] * pc[ang]; total += psi[ang]; } // CTA-wide reduction to one warp and then down to one thread #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) { total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } // Do the reduction if (laneid == 0) { double *local_flux = flux_ptr + flux_offsets * local_point; ourAtomicAdd(local_flux, total); } } if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int offset = l * num_angles + corner * num_angles * num_moments; total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) total += device_ec[offset + ang] * psi[ang]; __syncthreads(); if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (laneid == 0) { double *local_fluxm = (double*)(fluxm_ptr + fluxm_offsets * local_point); local_fluxm += (l-1); ourAtomicAdd(local_fluxm, total); } } } } } } } template<int THR_ANGLES> __global__ void gpu_time_independent_sweep_with_fixup(const Point<3> origin, const MomentQuad *qtot_ptr, double *flux_ptr, MomentTriple *fluxm_ptr, const double *dinv_ptr, const double *t_xs_ptr, double *ghostx_ptr, double *ghosty_ptr, double *ghostz_ptr, const double *qim_ptr, const ByteOffsetArray<3> qtot_offsets, const ByteOffsetArray<3> flux_offsets, const ByteOffsetArray<3> fluxm_offsets, const ByteOffsetArray<3> dinv_offsets, const ByteOffsetArray<3> t_xs_offsets, const ByteOffsetArray<2> ghostx_offsets, const ByteOffsetArray<2> ghosty_offsets, const ByteOffsetArray<2> ghostz_offsets, const ByteOffsetArray<3> qim_offsets, const int x_range, const int y_range, const int z_range, const int corner, const bool stride_x_positive, const bool stride_y_positive, const bool stride_z_positive, const bool mms_source, const int num_moments, const double hi, const double hj, const double hk) { __shared__ int int_trampoline[32]; __shared__ double double_trampoline[32]; double psi[THR_ANGLES]; double pc[THR_ANGLES]; double psii[THR_ANGLES]; double psij[THR_ANGLES]; double psik[THR_ANGLES]; double hv_x[THR_ANGLES]; double hv_y[THR_ANGLES]; double hv_z[THR_ANGLES]; double fx_hv_x[THR_ANGLES]; double fx_hv_y[THR_ANGLES]; double fx_hv_z[THR_ANGLES]; const int num_angles = THR_ANGLES * blockDim.x; const int corner_offset = corner * num_angles * num_moments; unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // These will be intentionally spilled to local memory // because the CUDA compiler can't statically understand // all their accesses, which is where we actualy want them double yflux_pencil[MAX_X_CHUNK][THR_ANGLES]; double zflux_plane[MAX_Y_CHUNK][MAX_X_CHUNK][THR_ANGLES]; const double tolr = 1.0e-12; for (int z = 0; z < z_range; z++) { for (int y = 0; y < y_range; y++) { for (int x = 0; x < x_range; x++) { // Figure out the local point that we are working on Point<3> local_point = origin; if (stride_x_positive) local_point.x[0] += x; else local_point.x[0] -= x; if (stride_y_positive) local_point.x[1] += y; else local_point.x[1] -= y; if (stride_z_positive) local_point.x[2] += z; else local_point.x[2] -= z; // Compute the angular source MomentQuad quad = *(qtot_ptr + qtot_offsets * local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] = quad[0]; if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int moment_offset = corner_offset + l * num_angles; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] += device_ec[moment_offset+ang*blockDim.x+threadIdx.x] * quad[l]; } } } // If we're doing MMS if (mms_source) { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] += angle_read<3>(qim_ptr, qim_offsets, local_point, ang); } // Compute the initial solution #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] = psi[ang]; // X ghost cells if (x == 0) { // Ghost cell array Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = angle_read<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang); } // Else nothing: psii already contains next flux #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi; // Y ghost cells if (y == 0) { // Ghost cell array Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = angle_read<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = yflux_pencil[x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psij[ang] * device_eta[ang * blockDim.x + threadIdx.x] * hj; // Z ghost cells if (z == 0) { // Ghost cell array Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = angle_read<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = zflux_plane[y][x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psik[ang] * device_xi[ang * blockDim.x + threadIdx.x] * hk; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double dinv = angle_read<3>(dinv_ptr, dinv_offsets, local_point, ang); pc[ang] *= dinv; } // DO THE FIXUP #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_x[ang] = 1.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_y[ang] = 1.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_z[ang] = 1.0; const double t_xs = *(t_xs_ptr + t_xs_offsets * local_point); int old_negative_fluxes = 0; while (true) { unsigned negative_fluxes = 0; // Figure out how many negative fluxes we have #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_x[ang] = 2.0 * pc[ang] - psii[ang]; if (fx_hv_x[ang] < 0.0) { hv_x[ang] = 0.0; negative_fluxes++; } } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_y[ang] = 2.0 * pc[ang] - psij[ang]; if (fx_hv_y[ang] < 0.0) { hv_y[ang] = 0.0; negative_fluxes++; } } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_z[ang] = 2.0 * pc[ang] - psik[ang]; if (fx_hv_z[ang] < 0.0) { hv_z[ang] = 0.0; negative_fluxes++; } } // CTA-wide reduction #pragma unroll for (int i = 16; i >= 1; i /= 2) negative_fluxes += __shfl_xor(negative_fluxes, i, 32); // Initialize if (warpid == 0) int_trampoline[laneid] = 0; __syncthreads(); if (laneid == 0) int_trampoline[warpid] = negative_fluxes; __syncthreads(); negative_fluxes = int_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) negative_fluxes += __shfl_xor(negative_fluxes, i, 32); // All threads have the same negative flux count now if (negative_fluxes == old_negative_fluxes) break; old_negative_fluxes = negative_fluxes; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { pc[ang] = psi[ang] + 0.5 * ( psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi * (1.0 + hv_x[ang]) + psij[ang] * device_eta[ang*blockDim.x + threadIdx.x] * hj * (1.0 + hv_y[ang]) + psik[ang] * device_xi[ang*blockDim.x + threadIdx.x] * hk * (1.0 + hv_z[ang]) ); double den = (pc[ang] <= 0.0) ? 0.0 : (t_xs + device_mu[ang*blockDim.x + threadIdx.x] * hi * hv_x[ang] + device_eta[ang*blockDim.x + threadIdx.x] * hj * hv_y[ang] + device_xi[ang*blockDim.x + threadIdx.x] * hk * hv_z[ang]); if (den < tolr) pc[ang] = 0.0; else pc[ang] /= den; } } // Fixup done so compute the update values #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = fx_hv_x[ang] * hv_x[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = fx_hv_y[ang] * hv_y[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = fx_hv_z[ang] * hv_z[ang]; // Write out the ghost regions // X ghost if (x == (x_range - 1)) { Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang, psii[ang]); } // Y ghost if (y == (y_range - 1)) { Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang, psij[ang]); } else { // Write to the pencil #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) yflux_pencil[x][ang] = psij[ang]; } // Z ghost if (z == (z_range - 1)) { Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang, psik[ang]); } else { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) zflux_plane[y][x][ang] = psik[ang]; } // Finally we apply reductions to the flux moments double total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] = device_w[ang * blockDim.x + threadIdx.x] * pc[ang]; total += psi[ang]; } // CTA-wide reduction to one warp and then down to one thread #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) { total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } // Do the reduction if (laneid == 0) { double *local_flux = flux_ptr + flux_offsets * local_point; ourAtomicAdd(local_flux, total); } } if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int offset = l * num_angles + corner * num_angles * num_moments; total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) total += device_ec[offset + ang] * psi[ang]; __syncthreads(); if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (laneid == 0) { double *local_fluxm = (double*)(fluxm_ptr + fluxm_offsets * local_point); local_fluxm += (l-1); ourAtomicAdd(local_fluxm, total); } } } } } } } template<int THR_ANGLES> __global__ void gpu_time_independent_sweep_without_fixup(const Point<3> origin, const MomentQuad *qtot_ptr, double *flux_ptr, MomentTriple *fluxm_ptr, const double *dinv_ptr, const double *t_xs_ptr, double *ghostx_ptr, double *ghosty_ptr, double *ghostz_ptr, const double *qim_ptr, const ByteOffsetArray<3> qtot_offsets, const ByteOffsetArray<3> flux_offsets, const ByteOffsetArray<3> fluxm_offsets, const ByteOffsetArray<3> dinv_offsets, const ByteOffsetArray<3> t_xs_offsets, const ByteOffsetArray<2> ghostx_offsets, const ByteOffsetArray<2> ghosty_offsets, const ByteOffsetArray<2> ghostz_offsets, const ByteOffsetArray<3> qim_offsets, const int x_range, const int y_range, const int z_range, const int corner, const bool stride_x_positive, const bool stride_y_positive, const bool stride_z_positive, const bool mms_source, const int num_moments, const double hi, const double hj, const double hk) { __shared__ double double_trampoline[32]; double psi[THR_ANGLES]; double pc[THR_ANGLES]; double psii[THR_ANGLES]; double psij[THR_ANGLES]; double psik[THR_ANGLES]; const int num_angles = THR_ANGLES * blockDim.x; const int corner_offset = corner * num_angles * num_moments; unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // These will be intentionally spilled to local memory // because the CUDA compiler can't statically understand // all their accesses, which is where we actualy want them double yflux_pencil[MAX_X_CHUNK][THR_ANGLES]; double zflux_plane[MAX_Y_CHUNK][MAX_X_CHUNK][THR_ANGLES]; for (int z = 0; z < z_range; z++) { for (int y = 0; y < y_range; y++) { for (int x = 0; x < x_range; x++) { // Figure out the local point that we are working on Point<3> local_point = origin; if (stride_x_positive) local_point.x[0] += x; else local_point.x[0] -= x; if (stride_y_positive) local_point.x[1] += y; else local_point.x[1] -= y; if (stride_z_positive) local_point.x[2] += z; else local_point.x[2] -= z; // Compute the angular source MomentQuad quad = *(qtot_ptr + qtot_offsets * local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] = quad[0]; if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int moment_offset = corner_offset + l * num_angles; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] += device_ec[moment_offset+ang*blockDim.x+threadIdx.x] * quad[l]; } } } // If we're doing MMS if (mms_source) { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] += angle_read<3>(qim_ptr, qim_offsets, local_point, ang); } // Compute the initial solution #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] = psi[ang]; // X ghost cells if (x == 0) { // Ghost cell array Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = angle_read<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang); } // Else nothing: psii already contains next flux #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi; // Y ghost cells if (y == 0) { // Ghost cell array Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = angle_read<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = yflux_pencil[x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psij[ang] * device_eta[ang * blockDim.x + threadIdx.x] * hj; // Z ghost cells if (z == 0) { // Ghost cell array Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = angle_read<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = zflux_plane[y][x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psik[ang] * device_xi[ang * blockDim.x + threadIdx.x] * hk; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double dinv = angle_read<3>(dinv_ptr, dinv_offsets, local_point, ang); pc[ang] *= dinv; } // NO FIXUP #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = 2.0 * pc[ang] - psii[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = 2.0 * pc[ang] - psij[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = 2.0 * pc[ang] - psik[ang]; // Write out the ghost regions // X ghost if (x == (x_range - 1)) { Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang, psii[ang]); } // Y ghost if (y == (y_range - 1)) { Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang, psij[ang]); } else { // Write to the pencil #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) yflux_pencil[x][ang] = psij[ang]; } // Z ghost if (z == (z_range - 1)) { Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang, psik[ang]); } else { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) zflux_plane[y][x][ang] = psik[ang]; } // Finally we apply reductions to the flux moments double total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] = device_w[ang * blockDim.x + threadIdx.x] * pc[ang]; total += psi[ang]; } // CTA-wide reduction to one warp and then down to one thread #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) { total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } // Do the reduction if (laneid == 0) { double *local_flux = flux_ptr + flux_offsets * local_point; ourAtomicAdd(local_flux, total); } } if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int offset = l * num_angles + corner * num_angles * num_moments; total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) total += device_ec[offset + ang] * psi[ang]; __syncthreads(); if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (laneid == 0) { double *local_fluxm = (double*)(fluxm_ptr + fluxm_offsets * local_point); local_fluxm += (l-1); ourAtomicAdd(local_fluxm, total); } } } } } } } __host__ void run_gpu_sweep(const Point<3> origin, const MomentQuad *qtot_ptr, double *flux_ptr, MomentTriple *fluxm_ptr, const double *dinv_ptr, const double *time_flux_in_ptr, double *time_flux_out_ptr, const double *t_xs_ptr, double *ghostx_ptr, double *ghosty_ptr, double *ghostz_ptr, const double *qim_ptr, const ByteOffset qtot_offsets[3], const ByteOffset flux_offsets[3], const ByteOffset fluxm_offsets[3], const ByteOffset dinv_offsets[3], const ByteOffset time_flux_in_offsets[3], const ByteOffset time_flux_out_offsets[3], const ByteOffset t_xs_offsets[3], const ByteOffset ghostx_offsets[2], const ByteOffset ghosty_offsets[2], const ByteOffset ghostz_offsets[2], const ByteOffset qim_offsets[3], const int x_range, const int y_range, const int z_range, const int corner, const bool stride_x_positive, const bool stride_y_positive, const bool stride_z_positive, const bool mms_source, const int num_moments, const double hi, const double hj, const double hk, const double vdelt, const int num_angles, const bool fixup) { // Figure out how many angles per thread we need const int max_threads_per_cta = 1024; const int angles_per_thread = (num_angles + max_threads_per_cta - 1) / max_threads_per_cta; // Have to be evenly divisible for now assert((num_angles % angles_per_thread) == 0); const int threads_per_cta = num_angles / angles_per_thread; dim3 block(threads_per_cta, 1, 1); // Teehee screw SKED! dim3 grid(1,1,1); if (fixup) { // Need fixup if (vdelt != 0.0) { // Time dependent switch (angles_per_thread) { case 1: { gpu_time_dependent_sweep_with_fixup<1><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, time_flux_in_ptr, time_flux_out_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(time_flux_in_offsets), ByteOffsetArray<3>(time_flux_out_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk, vdelt); break; } case 2: { gpu_time_dependent_sweep_with_fixup<2><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, time_flux_in_ptr, time_flux_out_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(time_flux_in_offsets), ByteOffsetArray<3>(time_flux_out_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk, vdelt); break; } default: printf("OH SNAP! That is a lot of angles! Add more cases!\n"); assert(false); } } else { // Time independent switch (angles_per_thread) { case 1: { gpu_time_independent_sweep_with_fixup<1><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk); break; } case 2: { gpu_time_independent_sweep_with_fixup<2><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk); break; } default: printf("ON SNAP! That is a lot of angles! Add more cases!\n"); assert(false); } } } else { // No fixup if (vdelt != 0.0) { // Time dependent switch (angles_per_thread) { case 1: { gpu_time_dependent_sweep_without_fixup<1><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, time_flux_in_ptr, time_flux_out_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(time_flux_in_offsets), ByteOffsetArray<3>(time_flux_out_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk, vdelt); break; } case 2: { gpu_time_dependent_sweep_without_fixup<2><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, time_flux_in_ptr, time_flux_out_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(time_flux_in_offsets), ByteOffsetArray<3>(time_flux_out_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk, vdelt); break; } default: printf("OH SNAP! That is a lot of angles! Add more cases!\n"); assert(false); } } else { // Time independent switch (angles_per_thread) { case 1: { gpu_time_independent_sweep_without_fixup<1><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk); break; } case 2: { gpu_time_independent_sweep_without_fixup<2><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk); break; } default: printf("ON SNAP! That is a lot of angles! Add more cases!\n"); assert(false); } } } }
83fa906f2e24d593eb4426b19bc0666c7f8b46cd.cu
/* Copyright 2017 NVIDIA Corporation * * The U.S. Department of Energy funded the development of this software * under subcontract B609478 with Lawrence Livermore National Security, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include "snap_types.h" #include "accessor.h" #include "snap_cuda_help.h" using namespace LegionRuntime::Arrays; using namespace LegionRuntime::Accessor; // Some bounds for use of GPU kernels, can be modified easily // Be careful about memory usage, modifying MAX_X_CHUNK and // MAX_Y_CHUNK will influence how much local memory must be // allocated for each kernel #define MAX_ANGLES 2048 #define MAX_X_CHUNK 16 #define MAX_Y_CHUNK 16 // Don't use the __constant__ qualifier here! // Each thread in a warp will be indexing on // a per angle basis and we don't want replays // when they don't all hit the same constant index __device__ double device_ec[8/*corners*/*4/*moments*/*MAX_ANGLES]; __device__ double device_mu[MAX_ANGLES]; __device__ double device_eta[MAX_ANGLES]; __device__ double device_xi[MAX_ANGLES]; __device__ double device_w[MAX_ANGLES]; __host__ void initialize_gpu_context(const double *ec_h, const double *mu_h, const double *eta_h, const double *xi_h, const double *w_h, const int num_angles, const int num_moments, const int num_octants, const int nx_per_chunk, const int ny_per_chunk) { // Check the bounds first if (num_angles > MAX_ANGLES) printf("ERROR: adjust MAX_ANGLES in gpu_sweep.cu to %d", num_angles); assert(num_angles <= MAX_ANGLES); if (nx_per_chunk > MAX_X_CHUNK) printf("ERROR: adjust MAX_X_CHUNK in gpu_sweep.cu to %d", nx_per_chunk); assert(nx_per_chunk <= MAX_X_CHUNK); if (ny_per_chunk > MAX_Y_CHUNK) printf("ERROR: adjust MAX_Y_CHUNK in gpu_sweep.cu to %d", ny_per_chunk); assert(ny_per_chunk <= MAX_Y_CHUNK); cudaMemcpyToSymbol(device_ec, ec_h, num_angles * num_moments * num_octants * sizeof(double)); cudaMemcpyToSymbol(device_mu, mu_h, num_angles * sizeof(double)); cudaMemcpyToSymbol(device_eta, eta_h, num_angles * sizeof(double)); cudaMemcpyToSymbol(device_xi, xi_h, num_angles * sizeof(double)); cudaMemcpyToSymbol(device_w, w_h, num_angles * sizeof(double)); } // This is from expxs but it uses the same constants template<int GROUPS> __global__ void gpu_geometry_param(const PointerBuffer<GROUPS,double> xs_ptrs, PointerBuffer<GROUPS,double> dinv_ptrs, const ByteOffsetArray<3> xs_offsets, const ByteOffsetArray<3> dinv_offsets, const ConstBuffer<GROUPS,double> vdelt, const double hi, const double hj, const double hk, const int angles_per_thread) { const int x = blockIdx.x; const int y = blockIdx.y; const int z = blockIdx.z; for (int i = 0; i < angles_per_thread; i++) { const int ang = i * blockDim.x + threadIdx.x; const double sum = hi * device_mu[ang] + hj * device_eta[ang] + hk * device_xi[ang]; #pragma unroll for (int g = 0; g < GROUPS; g++) { const double *xs_ptr = xs_ptrs[g] + x * xs_offsets[0] + y * xs_offsets[1] + z * xs_offsets[2]; double xs; // Cache this at all levels since it is shared across all threads in the CTA asm volatile("ld.global.ca.f64 %0, [%1];" : "=d"(xs) : "l"(xs_ptr) : "memory"); double result = 1.0 / (xs + vdelt[g] + sum); double *dinv_ptr = dinv_ptrs[g] + x * dinv_offsets[0] + y * dinv_offsets[1] + z * dinv_offsets[2]; asm volatile("st.global.cs.f64 [%0], %1;" : : "l"(dinv_ptr+ang), "d"(result) : "memory"); } } } __host__ void run_geometry_param(const std::vector<double*> &xs_ptrs, const std::vector<double*> &dinv_ptrs, const ByteOffset xs_offsets[3], const ByteOffset dinv_offsets[3], const std::vector<double> &vdelts, const double hi, const double hj, const double hk, const Rect<3> &subgrid_bounds, const int num_angles) { // Figure out the launch bounds, then dispatch const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; const int max_threads_per_cta = 1024; const int angles_per_thread = (num_angles + max_threads_per_cta - 1) / max_threads_per_cta; // Have to be evenly divisible for now assert((num_angles % angles_per_thread) == 0); const int threads_per_cta = num_angles / angles_per_thread; dim3 block(threads_per_cta, 1, 1); dim3 grid(x_range, y_range, z_range); // TODO: Replace template foolishness with terra assert(xs_ptrs.size() == dinv_ptrs.size()); switch (xs_ptrs.size()) { case 1: { gpu_geometry_param<1><<<grid,block>>>( PointerBuffer<1,double>(xs_ptrs), PointerBuffer<1,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<1,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 2: { gpu_geometry_param<2><<<grid,block>>>( PointerBuffer<2,double>(xs_ptrs), PointerBuffer<2,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<2,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 3: { gpu_geometry_param<3><<<grid,block>>>( PointerBuffer<3,double>(xs_ptrs), PointerBuffer<3,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<3,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 4: { gpu_geometry_param<4><<<grid,block>>>( PointerBuffer<4,double>(xs_ptrs), PointerBuffer<4,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<4,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 5: { gpu_geometry_param<5><<<grid,block>>>( PointerBuffer<5,double>(xs_ptrs), PointerBuffer<5,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<5,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 6: { gpu_geometry_param<6><<<grid,block>>>( PointerBuffer<6,double>(xs_ptrs), PointerBuffer<6,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<6,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 7: { gpu_geometry_param<7><<<grid,block>>>( PointerBuffer<7,double>(xs_ptrs), PointerBuffer<7,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<7,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 8: { gpu_geometry_param<8><<<grid,block>>>( PointerBuffer<8,double>(xs_ptrs), PointerBuffer<8,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<8,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 9: { gpu_geometry_param<9><<<grid,block>>>( PointerBuffer<9,double>(xs_ptrs), PointerBuffer<9,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<9,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 10: { gpu_geometry_param<10><<<grid,block>>>( PointerBuffer<10,double>(xs_ptrs), PointerBuffer<10,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<10,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 11: { gpu_geometry_param<11><<<grid,block>>>( PointerBuffer<11,double>(xs_ptrs), PointerBuffer<11,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<11,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 12: { gpu_geometry_param<12><<<grid,block>>>( PointerBuffer<12,double>(xs_ptrs), PointerBuffer<12,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<12,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 13: { gpu_geometry_param<13><<<grid,block>>>( PointerBuffer<13,double>(xs_ptrs), PointerBuffer<13,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<13,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 14: { gpu_geometry_param<14><<<grid,block>>>( PointerBuffer<14,double>(xs_ptrs), PointerBuffer<14,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<14,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 15: { gpu_geometry_param<15><<<grid,block>>>( PointerBuffer<15,double>(xs_ptrs), PointerBuffer<15,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<15,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 16: { gpu_geometry_param<16><<<grid,block>>>( PointerBuffer<16,double>(xs_ptrs), PointerBuffer<16,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<16,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 24: { gpu_geometry_param<24><<<grid,block>>>( PointerBuffer<24,double>(xs_ptrs), PointerBuffer<24,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<24,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 32: { gpu_geometry_param<32><<<grid,block>>>( PointerBuffer<32,double>(xs_ptrs), PointerBuffer<32,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<32,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 40: { gpu_geometry_param<40><<<grid,block>>>( PointerBuffer<40,double>(xs_ptrs), PointerBuffer<40,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<40,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 48: { gpu_geometry_param<48><<<grid,block>>>( PointerBuffer<48,double>(xs_ptrs), PointerBuffer<48,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<48,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 56: { gpu_geometry_param<56><<<grid,block>>>( PointerBuffer<56,double>(xs_ptrs), PointerBuffer<56,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<56,double>(vdelts), hi, hj, hk, angles_per_thread); break; } case 64: { gpu_geometry_param<64><<<grid,block>>>( PointerBuffer<64,double>(xs_ptrs), PointerBuffer<64,double>(dinv_ptrs), ByteOffsetArray<3>(xs_offsets), ByteOffsetArray<3>(dinv_offsets), ConstBuffer<64,double>(vdelts), hi, hj, hk, angles_per_thread); break; } default: assert(false); // need more cases } } __device__ __forceinline__ ByteOffset operator*(const ByteOffsetArray<2> &offsets, const Point<2> &point) { return (offsets[0] * point.x[0] + offsets[1] * point.x[1]); } __device__ __forceinline__ ByteOffset operator*(const ByteOffsetArray<3> &offsets, const Point<3> &point) { return (offsets[0] * point.x[0] + offsets[1] * point.x[1] + offsets[2] * point.x[2]); } __device__ __forceinline__ void ourAtomicAdd(double *ptr, double value) { #if __CUDA_ARCH__ < 600 unsigned long long int* address_as_ull = (unsigned long long int*)ptr; unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(value + __longlong_as_double(assumed))); } while (assumed != old); #else // We have double precision atomicAdd starting in Pascal atomicAdd(ptr, value); #endif } template<int DIM> __device__ __forceinline__ double angle_read(const double *ptr, const ByteOffsetArray<DIM> &offset, const Point<DIM> &point, int ang) { ptr += (offset * point); ptr += ang * blockDim.x + threadIdx.x; double result; asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(result) : "l"(ptr) : "memory"); return result; } template<int DIM> __device__ __forceinline__ void angle_write(double *ptr, const ByteOffsetArray<DIM> &offset, const Point<DIM> &point, int ang, double val) { ptr += (offset * point); ptr += ang * blockDim.x + threadIdx.x; asm volatile("st.global.cs.f64 [%0], %1;" : : "l"(ptr), "d"(val) : "memory"); } __device__ __forceinline__ Point<2> ghostx_point(const Point<3> &local_point) { Point<2> ghost; ghost.x[0] = local_point.x[1]; // y ghost.x[1] = local_point.x[2]; // z return ghost; } __device__ __forceinline__ Point<2> ghosty_point(const Point<3> &local_point) { Point<2> ghost; ghost.x[0] = local_point.x[0]; // x ghost.x[1] = local_point.x[2]; // z return ghost; } __device__ __forceinline__ Point<2> ghostz_point(const Point<3> &local_point) { Point<2> ghost; ghost.x[0] = local_point.x[0]; // x ghost.x[1] = local_point.x[1]; // y return ghost; } template<int THR_ANGLES> __global__ void gpu_time_dependent_sweep_with_fixup(const Point<3> origin, const MomentQuad *qtot_ptr, double *flux_ptr, MomentTriple *fluxm_ptr, const double *dinv_ptr, const double *time_flux_in_ptr, double *time_flux_out_ptr, const double *t_xs_ptr, double *ghostx_ptr, double *ghosty_ptr, double *ghostz_ptr, const double *qim_ptr, const ByteOffsetArray<3> qtot_offsets, const ByteOffsetArray<3> flux_offsets, const ByteOffsetArray<3> fluxm_offsets, const ByteOffsetArray<3> dinv_offsets, const ByteOffsetArray<3> time_flux_in_offsets, const ByteOffsetArray<3> time_flux_out_offsets, const ByteOffsetArray<3> t_xs_offsets, const ByteOffsetArray<2> ghostx_offsets, const ByteOffsetArray<2> ghosty_offsets, const ByteOffsetArray<2> ghostz_offsets, const ByteOffsetArray<3> qim_offsets, const int x_range, const int y_range, const int z_range, const int corner, const bool stride_x_positive, const bool stride_y_positive, const bool stride_z_positive, const bool mms_source, const int num_moments, const double hi, const double hj, const double hk, const double vdelt) { __shared__ int int_trampoline[32]; __shared__ double double_trampoline[32]; double psi[THR_ANGLES]; double pc[THR_ANGLES]; double psii[THR_ANGLES]; double psij[THR_ANGLES]; double psik[THR_ANGLES]; double hv_x[THR_ANGLES]; double hv_y[THR_ANGLES]; double hv_z[THR_ANGLES]; double hv_t[THR_ANGLES]; double fx_hv_x[THR_ANGLES]; double fx_hv_y[THR_ANGLES]; double fx_hv_z[THR_ANGLES]; double fx_hv_t[THR_ANGLES]; double time_flux_in[THR_ANGLES]; const int num_angles = THR_ANGLES * blockDim.x; const int corner_offset = corner * num_angles * num_moments; unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // These will be intentionally spilled to local memory // because the CUDA compiler can't statically understand // all their accesses, which is where we actualy want them double yflux_pencil[MAX_X_CHUNK][THR_ANGLES]; double zflux_plane[MAX_Y_CHUNK][MAX_X_CHUNK][THR_ANGLES]; const double tolr = 1.0e-12; for (int z = 0; z < z_range; z++) { for (int y = 0; y < y_range; y++) { for (int x = 0; x < x_range; x++) { // Figure out the local point that we are working on Point<3> local_point = origin; if (stride_x_positive) local_point.x[0] += x; else local_point.x[0] -= x; if (stride_y_positive) local_point.x[1] += y; else local_point.x[1] -= y; if (stride_z_positive) local_point.x[2] += z; else local_point.x[2] -= z; // Compute the angular source MomentQuad quad = *(qtot_ptr + qtot_offsets * local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] = quad[0]; if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int moment_offset = corner_offset + l * num_angles; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] += device_ec[moment_offset+ang*blockDim.x+threadIdx.x] * quad[l]; } } } // If we're doing MMS if (mms_source) { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] += angle_read<3>(qim_ptr, qim_offsets, local_point, ang); } // Compute the initial solution #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] = psi[ang]; // X ghost cells if (x == 0) { // Ghost cell array Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = angle_read<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang); } // Else nothing: psii already contains next flux #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi; // Y ghost cells if (y == 0) { // Ghost cell array Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = angle_read<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = yflux_pencil[x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psij[ang] * device_eta[ang * blockDim.x + threadIdx.x] * hj; // Z ghost cells if (z == 0) { // Ghost cell array Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = angle_read<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = zflux_plane[y][x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psik[ang] * device_xi[ang * blockDim.x + threadIdx.x] * hk; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { time_flux_in[ang] = angle_read<3>(time_flux_in_ptr, time_flux_in_offsets, local_point, ang); pc[ang] += vdelt * time_flux_in[ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double dinv = angle_read<3>(dinv_ptr, dinv_offsets, local_point, ang); pc[ang] *= dinv; } // DO THE FIXUP #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_x[ang] = 1.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_y[ang] = 1.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_z[ang] = 1.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_t[ang] = 1.0; const double t_xs = *(t_xs_ptr + t_xs_offsets * local_point); int old_negative_fluxes = 0; while (true) { unsigned negative_fluxes = 0; // Figure out how many negative fluxes we have #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_x[ang] = 2.0 * pc[ang] - psii[ang]; if (fx_hv_x[ang] < 0.0) { hv_x[ang] = 0.0; negative_fluxes++; } } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_y[ang] = 2.0 * pc[ang] - psij[ang]; if (fx_hv_y[ang] < 0.0) { hv_y[ang] = 0.0; negative_fluxes++; } } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_z[ang] = 2.0 * pc[ang] - psik[ang]; if (fx_hv_z[ang] < 0.0) { hv_z[ang] = 0.0; negative_fluxes++; } } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_t[ang] = 2.0 * pc[ang] - time_flux_in[ang]; if (fx_hv_t[ang] < 0.0) { hv_t[ang] = 0.0; negative_fluxes++; } } // CTA-wide reduction #pragma unroll for (int i = 16; i >= 1; i /= 2) negative_fluxes += __shfl_xor(negative_fluxes, i, 32); // Initialize if (warpid == 0) int_trampoline[laneid] = 0; __syncthreads(); if (laneid == 0) int_trampoline[warpid] = negative_fluxes; __syncthreads(); negative_fluxes = int_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) negative_fluxes += __shfl_xor(negative_fluxes, i, 32); // All threads have the same negative flux count now if (negative_fluxes == old_negative_fluxes) break; old_negative_fluxes = negative_fluxes; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { pc[ang] = psi[ang] + 0.5 * ( psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi * (1.0 + hv_x[ang]) + psij[ang] * device_eta[ang*blockDim.x + threadIdx.x] * hj * (1.0 + hv_y[ang]) + psik[ang] * device_xi[ang*blockDim.x + threadIdx.x] * hk * (1.0 + hv_z[ang]) + time_flux_in[ang] * vdelt * (1.0 + hv_t[ang]) ); double den = (pc[ang] <= 0.0) ? 0.0 : (t_xs + device_mu[ang*blockDim.x + threadIdx.x] * hi * hv_x[ang] + device_eta[ang*blockDim.x + threadIdx.x] * hj * hv_y[ang] + device_xi[ang*blockDim.x + threadIdx.x] * hk * hv_z[ang] + vdelt * hv_t[ang]); if (den < tolr) pc[ang] = 0.0; else pc[ang] /= den; } } // Fixup done so compute the update values #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = fx_hv_x[ang] * hv_x[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = fx_hv_y[ang] * hv_y[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = fx_hv_z[ang] * hv_z[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double time_flux_out = fx_hv_t[ang] * hv_t[ang]; angle_write<3>(time_flux_out_ptr, time_flux_out_offsets, local_point, ang, time_flux_out); } // Write out the ghost regions // X ghost if (x == (x_range - 1)) { Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang, psii[ang]); } // Y ghost if (y == (y_range - 1)) { Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang, psij[ang]); } else { // Write to the pencil #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) yflux_pencil[x][ang] = psij[ang]; } // Z ghost if (z == (z_range - 1)) { Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang, psik[ang]); } else { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) zflux_plane[y][x][ang] = psik[ang]; } // Finally we apply reductions to the flux moments double total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] = device_w[ang * blockDim.x + threadIdx.x] * pc[ang]; total += psi[ang]; } // CTA-wide reduction to one warp and then down to one thread #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) { total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } // Do the reduction if (laneid == 0) { double *local_flux = flux_ptr + flux_offsets * local_point; ourAtomicAdd(local_flux, total); } } if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int offset = l * num_angles + corner * num_angles * num_moments; total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) total += device_ec[offset + ang] * psi[ang]; __syncthreads(); if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (laneid == 0) { double *local_fluxm = (double*)(fluxm_ptr + fluxm_offsets * local_point); local_fluxm += (l-1); ourAtomicAdd(local_fluxm, total); } } } } } } } template<int THR_ANGLES> __global__ void gpu_time_dependent_sweep_without_fixup(const Point<3> origin, const MomentQuad *qtot_ptr, double *flux_ptr, MomentTriple *fluxm_ptr, const double *dinv_ptr, const double *time_flux_in_ptr, double *time_flux_out_ptr, double *ghostx_ptr, double *ghosty_ptr, double *ghostz_ptr, const double *qim_ptr, const ByteOffsetArray<3> qtot_offsets, const ByteOffsetArray<3> flux_offsets, const ByteOffsetArray<3> fluxm_offsets, const ByteOffsetArray<3> dinv_offsets, const ByteOffsetArray<3> time_flux_in_offsets, const ByteOffsetArray<3> time_flux_out_offsets, const ByteOffsetArray<2> ghostx_offsets, const ByteOffsetArray<2> ghosty_offsets, const ByteOffsetArray<2> ghostz_offsets, const ByteOffsetArray<3> qim_offsets, const int x_range, const int y_range, const int z_range, const int corner, const bool stride_x_positive, const bool stride_y_positive, const bool stride_z_positive, const bool mms_source, const int num_moments, const double hi, const double hj, const double hk, const double vdelt) { __shared__ double double_trampoline[32]; double psi[THR_ANGLES]; double pc[THR_ANGLES]; double psii[THR_ANGLES]; double psij[THR_ANGLES]; double psik[THR_ANGLES]; double time_flux_in[THR_ANGLES]; const int num_angles = THR_ANGLES * blockDim.x; const int corner_offset = corner * num_angles * num_moments; unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // These will be intentionally spilled to local memory // because the CUDA compiler can't statically understand // all their accesses, which is where we actualy want them double yflux_pencil[MAX_X_CHUNK][THR_ANGLES]; double zflux_plane[MAX_Y_CHUNK][MAX_X_CHUNK][THR_ANGLES]; for (int z = 0; z < z_range; z++) { for (int y = 0; y < y_range; y++) { for (int x = 0; x < x_range; x++) { // Figure out the local point that we are working on Point<3> local_point = origin; if (stride_x_positive) local_point.x[0] += x; else local_point.x[0] -= x; if (stride_y_positive) local_point.x[1] += y; else local_point.x[1] -= y; if (stride_z_positive) local_point.x[2] += z; else local_point.x[2] -= z; // Compute the angular source MomentQuad quad = *(qtot_ptr + qtot_offsets * local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] = quad[0]; if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int moment_offset = corner_offset + l * num_angles; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] += device_ec[moment_offset+ang*blockDim.x+threadIdx.x] * quad[l]; } } } // If we're doing MMS if (mms_source) { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] += angle_read<3>(qim_ptr, qim_offsets, local_point, ang); } // Compute the initial solution #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] = psi[ang]; // X ghost cells if (x == 0) { // Ghost cell array Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = angle_read<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang); } // Else nothing: psii already contains next flux #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi; // Y ghost cells if (y == 0) { // Ghost cell array Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = angle_read<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = yflux_pencil[x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psij[ang] * device_eta[ang * blockDim.x + threadIdx.x] * hj; // Z ghost cells if (z == 0) { // Ghost cell array Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = angle_read<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = zflux_plane[y][x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psik[ang] * device_xi[ang * blockDim.x + threadIdx.x] * hk; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { time_flux_in[ang] = angle_read<3>(time_flux_in_ptr, time_flux_in_offsets, local_point, ang); pc[ang] += vdelt * time_flux_in[ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double dinv = angle_read<3>(dinv_ptr, dinv_offsets, local_point, ang); pc[ang] *= dinv; } // NO FIXUP #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = 2.0 * pc[ang] - psii[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = 2.0 * pc[ang] - psij[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = 2.0 * pc[ang] - psik[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double time_flux_out = 2.0 * pc[ang] - time_flux_in[ang]; angle_write<3>(time_flux_out_ptr, time_flux_out_offsets, local_point, ang, time_flux_out); } // Write out the ghost regions // X ghost if (x == (x_range - 1)) { Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang, psii[ang]); } // Y ghost if (y == (y_range - 1)) { Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang, psij[ang]); } else { // Write to the pencil #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) yflux_pencil[x][ang] = psij[ang]; } // Z ghost if (z == (z_range - 1)) { Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang, psik[ang]); } else { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) zflux_plane[y][x][ang] = psik[ang]; } // Finally we apply reductions to the flux moments double total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] = device_w[ang * blockDim.x + threadIdx.x] * pc[ang]; total += psi[ang]; } // CTA-wide reduction to one warp and then down to one thread #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) { total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } // Do the reduction if (laneid == 0) { double *local_flux = flux_ptr + flux_offsets * local_point; ourAtomicAdd(local_flux, total); } } if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int offset = l * num_angles + corner * num_angles * num_moments; total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) total += device_ec[offset + ang] * psi[ang]; __syncthreads(); if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (laneid == 0) { double *local_fluxm = (double*)(fluxm_ptr + fluxm_offsets * local_point); local_fluxm += (l-1); ourAtomicAdd(local_fluxm, total); } } } } } } } template<int THR_ANGLES> __global__ void gpu_time_independent_sweep_with_fixup(const Point<3> origin, const MomentQuad *qtot_ptr, double *flux_ptr, MomentTriple *fluxm_ptr, const double *dinv_ptr, const double *t_xs_ptr, double *ghostx_ptr, double *ghosty_ptr, double *ghostz_ptr, const double *qim_ptr, const ByteOffsetArray<3> qtot_offsets, const ByteOffsetArray<3> flux_offsets, const ByteOffsetArray<3> fluxm_offsets, const ByteOffsetArray<3> dinv_offsets, const ByteOffsetArray<3> t_xs_offsets, const ByteOffsetArray<2> ghostx_offsets, const ByteOffsetArray<2> ghosty_offsets, const ByteOffsetArray<2> ghostz_offsets, const ByteOffsetArray<3> qim_offsets, const int x_range, const int y_range, const int z_range, const int corner, const bool stride_x_positive, const bool stride_y_positive, const bool stride_z_positive, const bool mms_source, const int num_moments, const double hi, const double hj, const double hk) { __shared__ int int_trampoline[32]; __shared__ double double_trampoline[32]; double psi[THR_ANGLES]; double pc[THR_ANGLES]; double psii[THR_ANGLES]; double psij[THR_ANGLES]; double psik[THR_ANGLES]; double hv_x[THR_ANGLES]; double hv_y[THR_ANGLES]; double hv_z[THR_ANGLES]; double fx_hv_x[THR_ANGLES]; double fx_hv_y[THR_ANGLES]; double fx_hv_z[THR_ANGLES]; const int num_angles = THR_ANGLES * blockDim.x; const int corner_offset = corner * num_angles * num_moments; unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // These will be intentionally spilled to local memory // because the CUDA compiler can't statically understand // all their accesses, which is where we actualy want them double yflux_pencil[MAX_X_CHUNK][THR_ANGLES]; double zflux_plane[MAX_Y_CHUNK][MAX_X_CHUNK][THR_ANGLES]; const double tolr = 1.0e-12; for (int z = 0; z < z_range; z++) { for (int y = 0; y < y_range; y++) { for (int x = 0; x < x_range; x++) { // Figure out the local point that we are working on Point<3> local_point = origin; if (stride_x_positive) local_point.x[0] += x; else local_point.x[0] -= x; if (stride_y_positive) local_point.x[1] += y; else local_point.x[1] -= y; if (stride_z_positive) local_point.x[2] += z; else local_point.x[2] -= z; // Compute the angular source MomentQuad quad = *(qtot_ptr + qtot_offsets * local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] = quad[0]; if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int moment_offset = corner_offset + l * num_angles; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] += device_ec[moment_offset+ang*blockDim.x+threadIdx.x] * quad[l]; } } } // If we're doing MMS if (mms_source) { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] += angle_read<3>(qim_ptr, qim_offsets, local_point, ang); } // Compute the initial solution #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] = psi[ang]; // X ghost cells if (x == 0) { // Ghost cell array Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = angle_read<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang); } // Else nothing: psii already contains next flux #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi; // Y ghost cells if (y == 0) { // Ghost cell array Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = angle_read<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = yflux_pencil[x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psij[ang] * device_eta[ang * blockDim.x + threadIdx.x] * hj; // Z ghost cells if (z == 0) { // Ghost cell array Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = angle_read<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = zflux_plane[y][x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psik[ang] * device_xi[ang * blockDim.x + threadIdx.x] * hk; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double dinv = angle_read<3>(dinv_ptr, dinv_offsets, local_point, ang); pc[ang] *= dinv; } // DO THE FIXUP #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_x[ang] = 1.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_y[ang] = 1.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) hv_z[ang] = 1.0; const double t_xs = *(t_xs_ptr + t_xs_offsets * local_point); int old_negative_fluxes = 0; while (true) { unsigned negative_fluxes = 0; // Figure out how many negative fluxes we have #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_x[ang] = 2.0 * pc[ang] - psii[ang]; if (fx_hv_x[ang] < 0.0) { hv_x[ang] = 0.0; negative_fluxes++; } } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_y[ang] = 2.0 * pc[ang] - psij[ang]; if (fx_hv_y[ang] < 0.0) { hv_y[ang] = 0.0; negative_fluxes++; } } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { fx_hv_z[ang] = 2.0 * pc[ang] - psik[ang]; if (fx_hv_z[ang] < 0.0) { hv_z[ang] = 0.0; negative_fluxes++; } } // CTA-wide reduction #pragma unroll for (int i = 16; i >= 1; i /= 2) negative_fluxes += __shfl_xor(negative_fluxes, i, 32); // Initialize if (warpid == 0) int_trampoline[laneid] = 0; __syncthreads(); if (laneid == 0) int_trampoline[warpid] = negative_fluxes; __syncthreads(); negative_fluxes = int_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) negative_fluxes += __shfl_xor(negative_fluxes, i, 32); // All threads have the same negative flux count now if (negative_fluxes == old_negative_fluxes) break; old_negative_fluxes = negative_fluxes; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { pc[ang] = psi[ang] + 0.5 * ( psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi * (1.0 + hv_x[ang]) + psij[ang] * device_eta[ang*blockDim.x + threadIdx.x] * hj * (1.0 + hv_y[ang]) + psik[ang] * device_xi[ang*blockDim.x + threadIdx.x] * hk * (1.0 + hv_z[ang]) ); double den = (pc[ang] <= 0.0) ? 0.0 : (t_xs + device_mu[ang*blockDim.x + threadIdx.x] * hi * hv_x[ang] + device_eta[ang*blockDim.x + threadIdx.x] * hj * hv_y[ang] + device_xi[ang*blockDim.x + threadIdx.x] * hk * hv_z[ang]); if (den < tolr) pc[ang] = 0.0; else pc[ang] /= den; } } // Fixup done so compute the update values #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = fx_hv_x[ang] * hv_x[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = fx_hv_y[ang] * hv_y[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = fx_hv_z[ang] * hv_z[ang]; // Write out the ghost regions // X ghost if (x == (x_range - 1)) { Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang, psii[ang]); } // Y ghost if (y == (y_range - 1)) { Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang, psij[ang]); } else { // Write to the pencil #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) yflux_pencil[x][ang] = psij[ang]; } // Z ghost if (z == (z_range - 1)) { Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang, psik[ang]); } else { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) zflux_plane[y][x][ang] = psik[ang]; } // Finally we apply reductions to the flux moments double total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] = device_w[ang * blockDim.x + threadIdx.x] * pc[ang]; total += psi[ang]; } // CTA-wide reduction to one warp and then down to one thread #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) { total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } // Do the reduction if (laneid == 0) { double *local_flux = flux_ptr + flux_offsets * local_point; ourAtomicAdd(local_flux, total); } } if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int offset = l * num_angles + corner * num_angles * num_moments; total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) total += device_ec[offset + ang] * psi[ang]; __syncthreads(); if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (laneid == 0) { double *local_fluxm = (double*)(fluxm_ptr + fluxm_offsets * local_point); local_fluxm += (l-1); ourAtomicAdd(local_fluxm, total); } } } } } } } template<int THR_ANGLES> __global__ void gpu_time_independent_sweep_without_fixup(const Point<3> origin, const MomentQuad *qtot_ptr, double *flux_ptr, MomentTriple *fluxm_ptr, const double *dinv_ptr, const double *t_xs_ptr, double *ghostx_ptr, double *ghosty_ptr, double *ghostz_ptr, const double *qim_ptr, const ByteOffsetArray<3> qtot_offsets, const ByteOffsetArray<3> flux_offsets, const ByteOffsetArray<3> fluxm_offsets, const ByteOffsetArray<3> dinv_offsets, const ByteOffsetArray<3> t_xs_offsets, const ByteOffsetArray<2> ghostx_offsets, const ByteOffsetArray<2> ghosty_offsets, const ByteOffsetArray<2> ghostz_offsets, const ByteOffsetArray<3> qim_offsets, const int x_range, const int y_range, const int z_range, const int corner, const bool stride_x_positive, const bool stride_y_positive, const bool stride_z_positive, const bool mms_source, const int num_moments, const double hi, const double hj, const double hk) { __shared__ double double_trampoline[32]; double psi[THR_ANGLES]; double pc[THR_ANGLES]; double psii[THR_ANGLES]; double psij[THR_ANGLES]; double psik[THR_ANGLES]; const int num_angles = THR_ANGLES * blockDim.x; const int corner_offset = corner * num_angles * num_moments; unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // These will be intentionally spilled to local memory // because the CUDA compiler can't statically understand // all their accesses, which is where we actualy want them double yflux_pencil[MAX_X_CHUNK][THR_ANGLES]; double zflux_plane[MAX_Y_CHUNK][MAX_X_CHUNK][THR_ANGLES]; for (int z = 0; z < z_range; z++) { for (int y = 0; y < y_range; y++) { for (int x = 0; x < x_range; x++) { // Figure out the local point that we are working on Point<3> local_point = origin; if (stride_x_positive) local_point.x[0] += x; else local_point.x[0] -= x; if (stride_y_positive) local_point.x[1] += y; else local_point.x[1] -= y; if (stride_z_positive) local_point.x[2] += z; else local_point.x[2] -= z; // Compute the angular source MomentQuad quad = *(qtot_ptr + qtot_offsets * local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] = quad[0]; if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int moment_offset = corner_offset + l * num_angles; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] += device_ec[moment_offset+ang*blockDim.x+threadIdx.x] * quad[l]; } } } // If we're doing MMS if (mms_source) { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psi[ang] += angle_read<3>(qim_ptr, qim_offsets, local_point, ang); } // Compute the initial solution #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] = psi[ang]; // X ghost cells if (x == 0) { // Ghost cell array Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = angle_read<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang); } // Else nothing: psii already contains next flux #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psii[ang] * device_mu[ang*blockDim.x + threadIdx.x] * hi; // Y ghost cells if (y == 0) { // Ghost cell array Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = angle_read<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = yflux_pencil[x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psij[ang] * device_eta[ang * blockDim.x + threadIdx.x] * hj; // Z ghost cells if (z == 0) { // Ghost cell array Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = angle_read<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang); } else { // Local array #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = zflux_plane[y][x][ang]; } #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) pc[ang] += psik[ang] * device_xi[ang * blockDim.x + threadIdx.x] * hk; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { double dinv = angle_read<3>(dinv_ptr, dinv_offsets, local_point, ang); pc[ang] *= dinv; } // NO FIXUP #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psii[ang] = 2.0 * pc[ang] - psii[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psij[ang] = 2.0 * pc[ang] - psij[ang]; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) psik[ang] = 2.0 * pc[ang] - psik[ang]; // Write out the ghost regions // X ghost if (x == (x_range - 1)) { Point<2> ghost_point = ghostx_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostx_ptr, ghostx_offsets, ghost_point, ang, psii[ang]); } // Y ghost if (y == (y_range - 1)) { Point<2> ghost_point = ghosty_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghosty_ptr, ghosty_offsets, ghost_point, ang, psij[ang]); } else { // Write to the pencil #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) yflux_pencil[x][ang] = psij[ang]; } // Z ghost if (z == (z_range - 1)) { Point<2> ghost_point = ghostz_point(local_point); #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) angle_write<2>(ghostz_ptr, ghostz_offsets, ghost_point, ang, psik[ang]); } else { #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) zflux_plane[y][x][ang] = psik[ang]; } // Finally we apply reductions to the flux moments double total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) { psi[ang] = device_w[ang * blockDim.x + threadIdx.x] * pc[ang]; total += psi[ang]; } // CTA-wide reduction to one warp and then down to one thread #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) { total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } // Do the reduction if (laneid == 0) { double *local_flux = flux_ptr + flux_offsets * local_point; ourAtomicAdd(local_flux, total); } } if (num_moments > 1) { for (int l = 1; l < num_moments; l++) { const int offset = l * num_angles + corner * num_angles * num_moments; total = 0.0; #pragma unroll for (int ang = 0; ang < THR_ANGLES; ang++) total += device_ec[offset + ang] * psi[ang]; __syncthreads(); if (warpid == 0) double_trampoline[laneid] = 0.0; __syncthreads(); if (laneid == 0) double_trampoline[warpid] = total; __syncthreads(); if (warpid == 0) total = double_trampoline[laneid]; #pragma unroll for (int i = 16; i >= 1; i /= 2) { int hi_part = __shfl_xor(__double2hiint(total), i, 32); int lo_part = __shfl_xor(__double2loint(total), i, 32); total += __hiloint2double(hi_part,lo_part); } if (laneid == 0) { double *local_fluxm = (double*)(fluxm_ptr + fluxm_offsets * local_point); local_fluxm += (l-1); ourAtomicAdd(local_fluxm, total); } } } } } } } __host__ void run_gpu_sweep(const Point<3> origin, const MomentQuad *qtot_ptr, double *flux_ptr, MomentTriple *fluxm_ptr, const double *dinv_ptr, const double *time_flux_in_ptr, double *time_flux_out_ptr, const double *t_xs_ptr, double *ghostx_ptr, double *ghosty_ptr, double *ghostz_ptr, const double *qim_ptr, const ByteOffset qtot_offsets[3], const ByteOffset flux_offsets[3], const ByteOffset fluxm_offsets[3], const ByteOffset dinv_offsets[3], const ByteOffset time_flux_in_offsets[3], const ByteOffset time_flux_out_offsets[3], const ByteOffset t_xs_offsets[3], const ByteOffset ghostx_offsets[2], const ByteOffset ghosty_offsets[2], const ByteOffset ghostz_offsets[2], const ByteOffset qim_offsets[3], const int x_range, const int y_range, const int z_range, const int corner, const bool stride_x_positive, const bool stride_y_positive, const bool stride_z_positive, const bool mms_source, const int num_moments, const double hi, const double hj, const double hk, const double vdelt, const int num_angles, const bool fixup) { // Figure out how many angles per thread we need const int max_threads_per_cta = 1024; const int angles_per_thread = (num_angles + max_threads_per_cta - 1) / max_threads_per_cta; // Have to be evenly divisible for now assert((num_angles % angles_per_thread) == 0); const int threads_per_cta = num_angles / angles_per_thread; dim3 block(threads_per_cta, 1, 1); // Teehee screw SKED! dim3 grid(1,1,1); if (fixup) { // Need fixup if (vdelt != 0.0) { // Time dependent switch (angles_per_thread) { case 1: { gpu_time_dependent_sweep_with_fixup<1><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, time_flux_in_ptr, time_flux_out_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(time_flux_in_offsets), ByteOffsetArray<3>(time_flux_out_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk, vdelt); break; } case 2: { gpu_time_dependent_sweep_with_fixup<2><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, time_flux_in_ptr, time_flux_out_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(time_flux_in_offsets), ByteOffsetArray<3>(time_flux_out_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk, vdelt); break; } default: printf("OH SNAP! That is a lot of angles! Add more cases!\n"); assert(false); } } else { // Time independent switch (angles_per_thread) { case 1: { gpu_time_independent_sweep_with_fixup<1><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk); break; } case 2: { gpu_time_independent_sweep_with_fixup<2><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk); break; } default: printf("ON SNAP! That is a lot of angles! Add more cases!\n"); assert(false); } } } else { // No fixup if (vdelt != 0.0) { // Time dependent switch (angles_per_thread) { case 1: { gpu_time_dependent_sweep_without_fixup<1><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, time_flux_in_ptr, time_flux_out_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(time_flux_in_offsets), ByteOffsetArray<3>(time_flux_out_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk, vdelt); break; } case 2: { gpu_time_dependent_sweep_without_fixup<2><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, time_flux_in_ptr, time_flux_out_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(time_flux_in_offsets), ByteOffsetArray<3>(time_flux_out_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk, vdelt); break; } default: printf("OH SNAP! That is a lot of angles! Add more cases!\n"); assert(false); } } else { // Time independent switch (angles_per_thread) { case 1: { gpu_time_independent_sweep_without_fixup<1><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk); break; } case 2: { gpu_time_independent_sweep_without_fixup<2><<<grid,block>>>(origin, qtot_ptr, flux_ptr, fluxm_ptr, dinv_ptr, t_xs_ptr, ghostx_ptr, ghosty_ptr, ghostz_ptr, qim_ptr, ByteOffsetArray<3>(qtot_offsets), ByteOffsetArray<3>(flux_offsets), ByteOffsetArray<3>(fluxm_offsets), ByteOffsetArray<3>(dinv_offsets), ByteOffsetArray<3>(t_xs_offsets), ByteOffsetArray<2>(ghostx_offsets), ByteOffsetArray<2>(ghosty_offsets), ByteOffsetArray<2>(ghostz_offsets), ByteOffsetArray<3>(qim_offsets), x_range, y_range, z_range, corner, stride_x_positive, stride_y_positive, stride_z_positive, mms_source, num_moments, hi, hj, hk); break; } default: printf("ON SNAP! That is a lot of angles! Add more cases!\n"); assert(false); } } } }
79fff1335244e9f3a813d321a56c1dabbc487de3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "axpb_y_f32.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float a = 2; float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float b = 2; float *y = NULL; hipMalloc(&y, XSIZE*YSIZE); int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( axpb_y_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, a,x,b,y,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( axpb_y_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, a,x,b,y,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( axpb_y_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, a,x,b,y,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
79fff1335244e9f3a813d321a56c1dabbc487de3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "axpb_y_f32.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float a = 2; float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float b = 2; float *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); axpb_y_f32<<<gridBlock,threadBlock>>>(a,x,b,y,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { axpb_y_f32<<<gridBlock,threadBlock>>>(a,x,b,y,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { axpb_y_f32<<<gridBlock,threadBlock>>>(a,x,b,y,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
aa4b4b6262a0e04a701493972f8ec41747e64c0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/zsymmetrize_tiles.cu normal z -> c, Tue Feb 9 16:05:33 2016 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = MAGMA_C_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = MAGMA_C_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /** Purpose ------- CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_csymmetrize_tiles_q( magma_uplo_t uplo, magma_int_t m, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( csymmetrize_tiles_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } else { hipLaunchKernelGGL(( csymmetrize_tiles_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } }
aa4b4b6262a0e04a701493972f8ec41747e64c0f.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/zsymmetrize_tiles.cu normal z -> c, Tue Feb 9 16:05:33 2016 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = MAGMA_C_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = MAGMA_C_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /** Purpose ------- CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_csymmetrize_tiles_q( magma_uplo_t uplo, magma_int_t m, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { csymmetrize_tiles_upper <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } else { csymmetrize_tiles_lower <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } }
eda5e1b68d61f356f0c02edd54a8f96b64b320f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <array/NDArrayFactory.h> #include <exceptions/cuda_exception.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <execution/cuda/LaunchDims.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void unsortedSegmentSqrtNLinearKernel(T* input, sd::LongType const* inputShape, I* indices, sd::LongType const* indicesShape, sd::LongType* starts, sd::LongType* lengths, sd::LongType numOfClasses, T* output, sd::LongType const* outputShape) { __shared__ sd::LongType xLen, zLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); zLen = shape::length(outputShape); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (auto idx = start; idx < xLen; idx += step) { auto yIndex = shape::getIndexOffset(idx, indicesShape); auto segment = indices[yIndex]; auto zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] == 0) continue; auto xIndex = shape::getIndexOffset(idx, inputShape); if(xIndex >= xLen) continue; sd::math::atomics::sd_atomicAdd(&output[zIndex], input[xIndex] / sd::math::sd_sqrt<sd::LongType, T>(lengths[segment])); } } // -------------------------------------------------------------------------------------------------------------- // // SegmentSqrtN kernel template <typename T, typename I> static SD_KERNEL void segmentSqrtNTadKernel(T* inputBuf, sd::LongType const* inputShape, sd::LongType const* inputTads, sd::LongType const* inputTadOffsets, I* indices, sd::LongType* starts, sd::LongType* lengths, sd::LongType numOfClasses, void* outputBuf, sd::LongType const* outputShape, sd::LongType const* outputTads, sd::LongType const* outputTadOffsets, sd::LongType numIndices) { if(blockIdx.x >= numIndices) return; __shared__ sd::LongType len, total; if (threadIdx.x == 0) { total = shape::sizeAt(inputShape, 0); len = shape::length(inputTads); } __syncthreads(); for (auto idx = blockIdx.x; idx < total; idx += gridDim.x) { auto segment = indices[idx]; auto x = inputBuf + inputTadOffsets[idx]; auto z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; auto start = starts[segment]; auto finish = start + lengths[segment]; for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::sd_atomicAdd(&z[zIndex], x[xIndex] / sd::math::sd_sqrt<sd::LongType, T>(lengths[segment])); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentSqrtNFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray classesRangesBegs = NDArrayFactory::create<sd::LongType>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<sd::LongType>('c', {numOfClasses}, context); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims= getLaunchDims("segmentSqrtN"); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); sd::LongType* begins = reinterpret_cast<sd::LongType*>(classesRangesBegs.specialBuffer()); sd::LongType* lengths = reinterpret_cast<sd::LongType*>(classesRangesLens.specialBuffer()); output->nullify(); if (input->isVector()) { hipLaunchKernelGGL(( unsortedSegmentSqrtNLinearKernel<T, I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), indices->dataBuffer()->specialAsT<I>(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->dataBuffer()->specialAsT<T>(), output->specialShapeInfo()); } else { output->nullify(); sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), 1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); dims.x = input->sizeAt(0); hipLaunchKernelGGL(( segmentSqrtNTadKernel<T, I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), inputTads, inputTadOffsets, indices->dataBuffer()->specialAsT<I>(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets, indices->lengthOf()); delete dimensions; } } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentSqrtNFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSqrtNFunctor_, (context, input, indices, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentSqrtNBPLinearKernel(void* inputBuf, sd::LongType const* inputShape, void* eps, sd::LongType const* epsShape, void* indicesBuf, sd::LongType const* indicesShape, sd::LongType* lengths, void* outputBuf, sd::LongType const* outputShape) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ sd::LongType xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); z[zOffset] = T(gradOut[gradOffsetO] / math::sd_sqrt<sd::LongType, float>(lengths[classIndex])); } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentSqrtNBPTadKernel(void* inputBuf, sd::LongType const* inputShape, void* eps, sd::LongType const* epsShape, void* indicesBuf, sd::LongType const* indicesShape, sd::LongType* lengths, void* outputBuf, sd::LongType const* outputShape, sd::LongType const* inputTad, sd::LongType const* inputOffsets, sd::LongType const* gradOutTad, sd::LongType const* gradOutOffsets, sd::LongType const* outTad, sd::LongType const* outOffsets) { __shared__ T* x; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ sd::LongType xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto segment = y[i]; // yIndex]; T* currentOut = z + outOffsets[i]; T* outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { auto zIndex = shape::getIndexOffset(e, outTad); auto gradIndex = shape::getIndexOffset(e, gradOutTad); if (lengths[segment] > 0) currentOut[zIndex] = T(outGrad[gradIndex] / math::sd_sqrt<sd::LongType, float>(lengths[segment])); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static sd::Status unsortedSegmentSqrtNFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); auto numClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1; NDArray classesRangesLens = NDArrayFactory::create<sd::LongType>('c', {numClasses}, context); NDArray classesRangesBegs = NDArrayFactory::create<sd::LongType>('c', {numClasses}, context); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); sd::LongType* begins = reinterpret_cast<sd::LongType*>(classesRangesBegs.specialBuffer()); sd::LongType* lengths = reinterpret_cast<sd::LongType*>(classesRangesLens.specialBuffer()); if (input->isVector()) { sd::LongType loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); hipLaunchKernelGGL(( segmentSqrtNBPLinearKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo()); } else { sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), 1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); auto gradOutTads = packGradOut->specialShapeInfo(); auto gradOutTadOffsets = packGradOut->specialOffsets(); dim3 segmentBpTad2 = segmentBpTad(indices->lengthOf(),input->lengthOf()); hipLaunchKernelGGL(( segmentSqrtNBPTadKernel<T, I>), dim3(segmentBpTad2.y), dim3(segmentBpTad2.x), segmentBpTad2.z, *stream, input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); delete dimensions; } NDArray::registerSpecialUse({output}, {input, indices, gradOut}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // sd::Status unsortedSegmentSqrtNFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSqrtNFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } // namespace helpers } // namespace ops } // namespace sd
eda5e1b68d61f356f0c02edd54a8f96b64b320f0.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <array/NDArrayFactory.h> #include <exceptions/cuda_exception.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <execution/cuda/LaunchDims.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void unsortedSegmentSqrtNLinearKernel(T* input, sd::LongType const* inputShape, I* indices, sd::LongType const* indicesShape, sd::LongType* starts, sd::LongType* lengths, sd::LongType numOfClasses, T* output, sd::LongType const* outputShape) { __shared__ sd::LongType xLen, zLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); zLen = shape::length(outputShape); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (auto idx = start; idx < xLen; idx += step) { auto yIndex = shape::getIndexOffset(idx, indicesShape); auto segment = indices[yIndex]; auto zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] == 0) continue; auto xIndex = shape::getIndexOffset(idx, inputShape); if(xIndex >= xLen) continue; sd::math::atomics::sd_atomicAdd(&output[zIndex], input[xIndex] / sd::math::sd_sqrt<sd::LongType, T>(lengths[segment])); } } // -------------------------------------------------------------------------------------------------------------- // // SegmentSqrtN kernel template <typename T, typename I> static SD_KERNEL void segmentSqrtNTadKernel(T* inputBuf, sd::LongType const* inputShape, sd::LongType const* inputTads, sd::LongType const* inputTadOffsets, I* indices, sd::LongType* starts, sd::LongType* lengths, sd::LongType numOfClasses, void* outputBuf, sd::LongType const* outputShape, sd::LongType const* outputTads, sd::LongType const* outputTadOffsets, sd::LongType numIndices) { if(blockIdx.x >= numIndices) return; __shared__ sd::LongType len, total; if (threadIdx.x == 0) { total = shape::sizeAt(inputShape, 0); len = shape::length(inputTads); } __syncthreads(); for (auto idx = blockIdx.x; idx < total; idx += gridDim.x) { auto segment = indices[idx]; auto x = inputBuf + inputTadOffsets[idx]; auto z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; auto start = starts[segment]; auto finish = start + lengths[segment]; for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::sd_atomicAdd(&z[zIndex], x[xIndex] / sd::math::sd_sqrt<sd::LongType, T>(lengths[segment])); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentSqrtNFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray classesRangesBegs = NDArrayFactory::create<sd::LongType>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<sd::LongType>('c', {numOfClasses}, context); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims= getLaunchDims("segmentSqrtN"); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); sd::LongType* begins = reinterpret_cast<sd::LongType*>(classesRangesBegs.specialBuffer()); sd::LongType* lengths = reinterpret_cast<sd::LongType*>(classesRangesLens.specialBuffer()); output->nullify(); if (input->isVector()) { unsortedSegmentSqrtNLinearKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>( input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), indices->dataBuffer()->specialAsT<I>(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->dataBuffer()->specialAsT<T>(), output->specialShapeInfo()); } else { output->nullify(); sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), 1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); dims.x = input->sizeAt(0); segmentSqrtNTadKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>( input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), inputTads, inputTadOffsets, indices->dataBuffer()->specialAsT<I>(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets, indices->lengthOf()); delete dimensions; } } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentSqrtNFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSqrtNFunctor_, (context, input, indices, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentSqrtNBPLinearKernel(void* inputBuf, sd::LongType const* inputShape, void* eps, sd::LongType const* epsShape, void* indicesBuf, sd::LongType const* indicesShape, sd::LongType* lengths, void* outputBuf, sd::LongType const* outputShape) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ sd::LongType xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); z[zOffset] = T(gradOut[gradOffsetO] / math::sd_sqrt<sd::LongType, float>(lengths[classIndex])); } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentSqrtNBPTadKernel(void* inputBuf, sd::LongType const* inputShape, void* eps, sd::LongType const* epsShape, void* indicesBuf, sd::LongType const* indicesShape, sd::LongType* lengths, void* outputBuf, sd::LongType const* outputShape, sd::LongType const* inputTad, sd::LongType const* inputOffsets, sd::LongType const* gradOutTad, sd::LongType const* gradOutOffsets, sd::LongType const* outTad, sd::LongType const* outOffsets) { __shared__ T* x; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ sd::LongType xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto segment = y[i]; // yIndex]; T* currentOut = z + outOffsets[i]; T* outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { auto zIndex = shape::getIndexOffset(e, outTad); auto gradIndex = shape::getIndexOffset(e, gradOutTad); if (lengths[segment] > 0) currentOut[zIndex] = T(outGrad[gradIndex] / math::sd_sqrt<sd::LongType, float>(lengths[segment])); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static sd::Status unsortedSegmentSqrtNFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); auto numClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1; NDArray classesRangesLens = NDArrayFactory::create<sd::LongType>('c', {numClasses}, context); NDArray classesRangesBegs = NDArrayFactory::create<sd::LongType>('c', {numClasses}, context); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); sd::LongType* begins = reinterpret_cast<sd::LongType*>(classesRangesBegs.specialBuffer()); sd::LongType* lengths = reinterpret_cast<sd::LongType*>(classesRangesLens.specialBuffer()); if (input->isVector()) { sd::LongType loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); segmentSqrtNBPLinearKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo()); } else { sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), 1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); auto gradOutTads = packGradOut->specialShapeInfo(); auto gradOutTadOffsets = packGradOut->specialOffsets(); dim3 segmentBpTad2 = segmentBpTad(indices->lengthOf(),input->lengthOf()); segmentSqrtNBPTadKernel<T, I><<<segmentBpTad2.y, segmentBpTad2.x, segmentBpTad2.z, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); delete dimensions; } NDArray::registerSpecialUse({output}, {input, indices, gradOut}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // sd::Status unsortedSegmentSqrtNFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSqrtNFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } // namespace helpers } // namespace ops } // namespace sd
ff1bd15d5887f0571e5360540be647a941088d2b.hip
// !!! This is a file automatically generated by hipify!!! // Bi Linear image interpolation using shared memeory optimization technique //In this method image is interpolated in 3 phase although shared memeory optimiztion did not gave much speed up in this case #include<Windows.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<cuda.h> #include <stdio.h> #include<time.h> #include <iostream> #include<device_atomic_functions.h> #include<hip/device_functions.h> #include<stdlib.h> __global__ void bilinear1(unsigned char *new_image, const unsigned char *image, int rows, int cols) //First phase of BI linear interpolation { int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; int index = row*cols / 2 + col; row *= 2; col *= 2; new_image[row*cols + col] = image[index]; __syncthreads(); } __global__ void bilinear2(unsigned char *new_image, const unsigned char *image, int rows, int cols) //Second phase of Image Interpolation { int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; int index = row*cols / 2 + col; row *= 2; col *= 2; new_image[(row + 1)*cols + col + 1] = (new_image[(row + 2)*cols + col] + new_image[(row*cols + col + 2)] + new_image[(row + 2)*cols + col + 2] + new_image[(row*cols + col)]) / 4; __syncthreads(); } __global__ void bilinear3(unsigned char *new_image, const unsigned char *image, int rows, int cols) //Third phase of Image Interpolation { int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; int index = row*cols / 2 + col; row *= 2; col *= 2; new_image[row*cols + col + 1] = (new_image[row*cols + col] + new_image[(row - 1)*cols + col + 1] + new_image[row*cols + col + 2] + new_image[(row + 1)*cols + col + 1]) / 4; new_image[(row + 1)*cols + col] = (new_image[row*cols + col] + new_image[(row + 1)*cols + col - 1] + new_image[(row + 1)*cols + col + 1] + new_image[(row + 2)*cols + col]) / 4; __syncthreads(); }
ff1bd15d5887f0571e5360540be647a941088d2b.cu
// Bi Linear image interpolation using shared memeory optimization technique //In this method image is interpolated in 3 phase although shared memeory optimiztion did not gave much speed up in this case #include<Windows.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include<cuda.h> #include <stdio.h> #include<time.h> #include <iostream> #include<device_atomic_functions.h> #include<device_functions.h> #include<stdlib.h> __global__ void bilinear1(unsigned char *new_image, const unsigned char *image, int rows, int cols) //First phase of BI linear interpolation { int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; int index = row*cols / 2 + col; row *= 2; col *= 2; new_image[row*cols + col] = image[index]; __syncthreads(); } __global__ void bilinear2(unsigned char *new_image, const unsigned char *image, int rows, int cols) //Second phase of Image Interpolation { int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; int index = row*cols / 2 + col; row *= 2; col *= 2; new_image[(row + 1)*cols + col + 1] = (new_image[(row + 2)*cols + col] + new_image[(row*cols + col + 2)] + new_image[(row + 2)*cols + col + 2] + new_image[(row*cols + col)]) / 4; __syncthreads(); } __global__ void bilinear3(unsigned char *new_image, const unsigned char *image, int rows, int cols) //Third phase of Image Interpolation { int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; int index = row*cols / 2 + col; row *= 2; col *= 2; new_image[row*cols + col + 1] = (new_image[row*cols + col] + new_image[(row - 1)*cols + col + 1] + new_image[row*cols + col + 2] + new_image[(row + 1)*cols + col + 1]) / 4; new_image[(row + 1)*cols + col] = (new_image[row*cols + col] + new_image[(row + 1)*cols + col - 1] + new_image[(row + 1)*cols + col + 1] + new_image[(row + 2)*cols + col]) / 4; __syncthreads(); }
7a4a3365c7a09bf3b3bc6ce945b83dc578991961.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
7a4a3365c7a09bf3b3bc6ce945b83dc578991961.cu
/*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::ColumnMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_tt_n_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
6f2cbc89f58fd7d9be85f997bfe41d6ffaeb2dae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define TOLERANCE 0.00001 #define TRUE 1 #define FALSE 0 long usecs(); void initialize(double **A, int rows, int cols); int calc_serial(double **A, int rows, int cols, int iters, double tolerance); int calc_serial_v1(double **A, int rows, int cols, int iters, double tolerance); int calc_omp(double **A, int rows, int cols, int iters, double tolerance, int num_threads); int calc_gpu(double **A, int rows, int cols, int iters, double tolerance); double verify(double **A, double **B, int rows, int cols); __global__ void calc_kernel(double* w, double* r, int rows, int cols, double tolerance) { int row = blockIdx.x; int col = threadIdx.x; int idx = row*blockDim.x + col; if (row < rows && row > 0 && col < cols) { w[idx] = 0.2*(r[idx+1] + r[idx - 1] + r[(row-1)*blockDim.x + col] + r[(row+1)*blockDim.x + col]); } }
6f2cbc89f58fd7d9be85f997bfe41d6ffaeb2dae.cu
#include "includes.h" #define TOLERANCE 0.00001 #define TRUE 1 #define FALSE 0 long usecs(); void initialize(double **A, int rows, int cols); int calc_serial(double **A, int rows, int cols, int iters, double tolerance); int calc_serial_v1(double **A, int rows, int cols, int iters, double tolerance); int calc_omp(double **A, int rows, int cols, int iters, double tolerance, int num_threads); int calc_gpu(double **A, int rows, int cols, int iters, double tolerance); double verify(double **A, double **B, int rows, int cols); __global__ void calc_kernel(double* w, double* r, int rows, int cols, double tolerance) { int row = blockIdx.x; int col = threadIdx.x; int idx = row*blockDim.x + col; if (row < rows && row > 0 && col < cols) { w[idx] = 0.2*(r[idx+1] + r[idx - 1] + r[(row-1)*blockDim.x + col] + r[(row+1)*blockDim.x + col]); } }
e6205b3528568f8f083d3eecfd518e3714c9244f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Thrust code needs to be compiled with nvcc #include <memory> #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "cudnn_common.h" namespace onnxruntime { namespace cuda { std::once_flag GridDim::s_cachedDevicePropsInitFlag; std::vector<hipDeviceProp_t> GridDim::s_cachedDeviceProps; template <typename T> __global__ void _Fill( T* output_data, T val, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); output_data[id] = val; } template <typename T> void Fill(T* output, T value, int64_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _Fill<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, output, value, N); } template <typename T> class ConstantBufferImpl : public IConstantBuffer<T> { public: ConstantBufferImpl(T val) : val_(val), buffer_(nullptr), count_(0) { } ~ConstantBufferImpl() { if (buffer_) hipFree(buffer_); } virtual const T* GetBuffer(size_t count) { if (count > count_) { if (buffer_) { hipFree(buffer_); buffer_ = nullptr; } CUDA_CALL_THROW(hipMalloc(&buffer_, count * sizeof(T))); count_ = count; Fill(buffer_, val_, count); } return buffer_; } private: T* buffer_; size_t count_; T val_; }; template <typename T> std::unique_ptr<IConstantBuffer<T>> CreateConstantOnes() { return std::make_unique<ConstantBufferImpl<T>>(Consts<T>::One); } template std::unique_ptr<IConstantBuffer<float>> CreateConstantOnes<float>(); template std::unique_ptr<IConstantBuffer<double>> CreateConstantOnes<double>(); template std::unique_ptr<IConstantBuffer<half>> CreateConstantOnes<half>(); #define SPECIALIZED_FILL(T) \ template void Fill<T>(T* output, T value, int64_t count); SPECIALIZED_FILL(int8_t) SPECIALIZED_FILL(int16_t) SPECIALIZED_FILL(int32_t) SPECIALIZED_FILL(int64_t) } // namespace cuda } // namespace onnxruntime
e6205b3528568f8f083d3eecfd518e3714c9244f.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Thrust code needs to be compiled with nvcc #include <memory> #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "cudnn_common.h" namespace onnxruntime { namespace cuda { std::once_flag GridDim::s_cachedDevicePropsInitFlag; std::vector<cudaDeviceProp> GridDim::s_cachedDeviceProps; template <typename T> __global__ void _Fill( T* output_data, T val, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); output_data[id] = val; } template <typename T> void Fill(T* output, T value, int64_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _Fill<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(output, value, N); } template <typename T> class ConstantBufferImpl : public IConstantBuffer<T> { public: ConstantBufferImpl(T val) : val_(val), buffer_(nullptr), count_(0) { } ~ConstantBufferImpl() { if (buffer_) cudaFree(buffer_); } virtual const T* GetBuffer(size_t count) { if (count > count_) { if (buffer_) { cudaFree(buffer_); buffer_ = nullptr; } CUDA_CALL_THROW(cudaMalloc(&buffer_, count * sizeof(T))); count_ = count; Fill(buffer_, val_, count); } return buffer_; } private: T* buffer_; size_t count_; T val_; }; template <typename T> std::unique_ptr<IConstantBuffer<T>> CreateConstantOnes() { return std::make_unique<ConstantBufferImpl<T>>(Consts<T>::One); } template std::unique_ptr<IConstantBuffer<float>> CreateConstantOnes<float>(); template std::unique_ptr<IConstantBuffer<double>> CreateConstantOnes<double>(); template std::unique_ptr<IConstantBuffer<half>> CreateConstantOnes<half>(); #define SPECIALIZED_FILL(T) \ template void Fill<T>(T* output, T value, int64_t count); SPECIALIZED_FILL(int8_t) SPECIALIZED_FILL(int16_t) SPECIALIZED_FILL(int32_t) SPECIALIZED_FILL(int64_t) } // namespace cuda } // namespace onnxruntime
6653989d4869967fc34ce71b18e743266cdd1c0b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gradient.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *u = NULL; hipMalloc(&u, XSIZE*YSIZE); float *g = NULL; hipMalloc(&g, XSIZE*YSIZE); int nx = 1; int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gradient), dim3(gridBlock),dim3(threadBlock), 0, 0, u,g,nx,ny); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gradient), dim3(gridBlock),dim3(threadBlock), 0, 0, u,g,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gradient), dim3(gridBlock),dim3(threadBlock), 0, 0, u,g,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6653989d4869967fc34ce71b18e743266cdd1c0b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gradient.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *u = NULL; cudaMalloc(&u, XSIZE*YSIZE); float *g = NULL; cudaMalloc(&g, XSIZE*YSIZE); int nx = 1; int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gradient<<<gridBlock,threadBlock>>>(u,g,nx,ny); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gradient<<<gridBlock,threadBlock>>>(u,g,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gradient<<<gridBlock,threadBlock>>>(u,g,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
22fe40aa30a111c45668122b76786ecc5ef51f28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. // int2 has 2 integer components and an instruction const int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int m = p.y * numCols + p.x; // Boundary Conditions check from instructions if(p.x >= numCols || p.y >= numRows){ return; } float result = 0.0f; for(int i_y = 0; i_y < filterWidth; i_y ++){ for(int i_x = 0; i_x < filterWidth; i_x ++){ int w_x = p.x + i_x - filterWidth/2; int w_y = p.y + i_y - filterWidth/2; w_x = min(max(w_x, 0), numCols - 1); w_y = min(max(w_y, 0), numRows - 1); float f_val = filter[i_y * filterWidth + i_x]; float i_val = static_cast<float>(inputChannel[w_y * numCols + w_x]); result += f_val * i_val; } } outputChannel[m] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int m = p.y * numCols + p.x; // Boundary Conditions check from instructions if(p.x >= numCols || p.y >= numRows){ return; } redChannel[m] = inputImageRGBA[m].x; greenChannel[m] = inputImageRGBA[m].y; blueChannel[m] = inputImageRGBA[m].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(32, 32, 1); const int rowSize = (numRows + 32 - 1) / 32; const int colSize = (numCols + 32 -1) / 32 ; //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(colSize, rowSize, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
22fe40aa30a111c45668122b76786ecc5ef51f28.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. // int2 has 2 integer components and an instruction const int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int m = p.y * numCols + p.x; // Boundary Conditions check from instructions if(p.x >= numCols || p.y >= numRows){ return; } float result = 0.0f; for(int i_y = 0; i_y < filterWidth; i_y ++){ for(int i_x = 0; i_x < filterWidth; i_x ++){ int w_x = p.x + i_x - filterWidth/2; int w_y = p.y + i_y - filterWidth/2; w_x = min(max(w_x, 0), numCols - 1); w_y = min(max(w_y, 0), numRows - 1); float f_val = filter[i_y * filterWidth + i_x]; float i_val = static_cast<float>(inputChannel[w_y * numCols + w_x]); result += f_val * i_val; } } outputChannel[m] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 p = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int m = p.y * numCols + p.x; // Boundary Conditions check from instructions if(p.x >= numCols || p.y >= numRows){ return; } redChannel[m] = inputImageRGBA[m].x; greenChannel[m] = inputImageRGBA[m].y; blueChannel[m] = inputImageRGBA[m].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(32, 32, 1); const int rowSize = (numRows + 32 - 1) / 32; const int colSize = (numCols + 32 -1) / 32 ; //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(colSize, rowSize, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
b2f8149cc61458bd9459061cb292ff4dc31fe216.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vadd(const float *A, const float *B, float *C, int ds){ int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < ds) C[idx] = A[idx] + B[idx]; }
b2f8149cc61458bd9459061cb292ff4dc31fe216.cu
#include "includes.h" __global__ void vadd(const float *A, const float *B, float *C, int ds){ int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < ds) C[idx] = A[idx] + B[idx]; }
822601605bdb5a8d05ff70ae5a040648ce29e04d.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/cudafeatures2d.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; namespace { // // This is a block-based algorithm. // Blocks are 2x2 sized, with internal pixels named as: // +---+ // |a b| // |c d| // +---+ // // Neighbour blocks of block X are named as: // +-+-+-+ // |P|Q|R| // +-+-+-+ // |S|X| // +-+-+ // enum class Info : unsigned char { a = 0, b = 1, c = 2, d = 3, P = 4, Q = 5, R = 6, S = 7 }; // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, Info pos) { return (bitmap >> static_cast<unsigned char>(pos)) & 1; } template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } // Only use it with unsigned numeric types __device__ __forceinline__ void SetBit(unsigned char &bitmap, Info pos) { bitmap |= (1 << static_cast<unsigned char>(pos)); } // Returns the root index of the UFTree __device__ unsigned Find(const int *s_buf, unsigned n) { while (s_buf[n] != n) { n = s_buf[n]; } return n; } // Merges the UFTrees of a and b, linking one root to the other __device__ void Union(int *s_buf, unsigned a, unsigned b) { bool done; do { a = Find(s_buf, a); b = Find(s_buf, b); if (a < b) { int old = atomicMin(s_buf + b, a); done = (old == b); b = old; } else if (b < a) { int old = atomicMin(s_buf + a, b); done = (old == a); a = old; } else { done = true; } } while (!done); } __global__ void InitLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char *last_pixel) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned P = 0; // Bitmask representing two kinds of information // Bits 0, 1, 2, 3 are set if pixel a, b, c, d are foreground, respectively // Bits 4, 5, 6, 7 are set if block P, Q, R, S need to be merged to X in Merge phase unsigned char info = 0; char buffer[4]; *(reinterpret_cast<int*>(buffer)) = 0; // Read pairs of consecutive values in memory at once if (col + 1 < img.cols) { // This does not depend on endianness *(reinterpret_cast<int16_t*>(buffer)) = *(reinterpret_cast<int16_t*>(img.data + img_index)); if (row + 1 < img.rows) { *(reinterpret_cast<int16_t*>(buffer + 2)) = *(reinterpret_cast<int16_t*>(img.data + img_index + img.step)); } } else { buffer[0] = img.data[img_index]; if (row + 1 < img.rows) { buffer[2] = img.data[img_index + img.step]; } } if (buffer[0]) { P |= 0x777; SetBit(info, Info::a); } if (buffer[1]) { P |= (0x777 << 1); SetBit(info, Info::b); } if (buffer[2]) { P |= (0x777 << 4); SetBit(info, Info::c); } if (buffer[3]) { SetBit(info, Info::d); } if (col == 0) { P &= 0xEEEE; } if (col + 1 >= img.cols) { P &= 0x3333; } else if (col + 2 >= img.cols) { P &= 0x7777; } if (row == 0) { P &= 0xFFF0; } if (row + 1 >= img.rows) { P &= 0x00FF; } else if (row + 2 >= img.rows) { P &= 0x0FFF; } // P is now ready to be used to find neighbour blocks // P value avoids range errors int father_offset = 0; // P square if (HasBit(P, 0) && img.data[img_index - img.step - 1]) { father_offset = -(2 * (labels.step / labels.elem_size) + 2); } // Q square if ((HasBit(P, 1) && img.data[img_index - img.step]) || (HasBit(P, 2) && img.data[img_index + 1 - img.step])) { if (!father_offset) { father_offset = -(2 * (labels.step / labels.elem_size)); } else { SetBit(info, Info::Q); } } // R square if (HasBit(P, 3) && img.data[img_index + 2 - img.step]) { if (!father_offset) { father_offset = -(2 * (labels.step / labels.elem_size) - 2); } else { SetBit(info, Info::R); } } // S square if ((HasBit(P, 4) && img.data[img_index - 1]) || (HasBit(P, 8) && img.data[img_index + img.step - 1])) { if (!father_offset) { father_offset = -2; } else { SetBit(info, Info::S); } } labels.data[labels_index] = labels_index + father_offset; if (col + 1 < labels.cols) { last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + 1); } else if (row + 1 < labels.rows) { last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + labels.step / labels.elem_size); } *last_pixel = info; } } __global__ void Merge(cuda::PtrStepSzi labels, unsigned char *last_pixel) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { if (col + 1 < labels.cols) { last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + 1); } else if (row + 1 < labels.rows) { last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + labels.step / labels.elem_size); } unsigned char info = *last_pixel; if (HasBit(info, Info::Q)) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); } if (HasBit(info, Info::R)) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); } if (HasBit(info, Info::S)) { Union(labels.data, labels_index, labels_index - 2); } } } __global__ void Compression(cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels.data[labels_index]; if (label < labels_index) { labels[labels_index] = Find(labels.data, label); } } } __global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char *last_pixel) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { int label; unsigned char info; unsigned long long buffer; if (col + 1 < labels.cols) { buffer = *reinterpret_cast<unsigned long long *>(labels.data + labels_index); label = (buffer & (0xFFFFFFFF)) + 1; info = (buffer >> 32) & 0xFFFFFFFF; } else { label = labels[labels_index] + 1; if (row + 1 < labels.rows) { info = labels[labels_index + labels.step / labels.elem_size]; } else { info = *last_pixel; } } if (col + 1 < labels.cols) { *reinterpret_cast<unsigned long long *>(labels.data + labels_index) = (static_cast<unsigned long long>(HasBit(info, Info::b) * label) << 32) | (HasBit(info, Info::a) * label); if (row + 1 < labels.rows) { *reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.step / labels.elem_size) = (static_cast<unsigned long long>(HasBit(info, Info::d) * label) << 32) | (HasBit(info, Info::c) * label); } } else { labels[labels_index] = HasBit(info, Info::a) * label; if (row + 1 < labels.rows) { labels[labels_index + (labels.step / labels.elem_size)] = HasBit(info, Info::c) * label; } } } } } class BKE : public GpuLabeling2D<Connectivity2D::CONN_8> { private: dim3 grid_size_; dim3 block_size_; unsigned char *last_pixel_; bool last_pixel_allocated_; public: BKE() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); last_pixel_allocated_ = false; if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) { hipMalloc(&last_pixel_, sizeof(unsigned char)); last_pixel_allocated_ = true; } else { last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize(); } grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_); //Mat1i init_blocks; //d_img_labels_.download(init_blocks); //cuda::GpuMat d_init_labels = d_img_labels_.clone(); //FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_init_labels); //Mat1i init_labels; //d_init_labels.download(init_labels); //d_init_labels.release(); Compression << <grid_size_, block_size_ >> > (d_img_labels_); //Mat1i compr_blocks; //d_img_labels_.download(compr_blocks); //cuda::GpuMat d_compr_labels = d_img_labels_.clone(); //FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_compr_labels); //Mat1i compr_labels; //d_compr_labels.download(compr_labels); //d_compr_labels.release(); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_); //Mat1i merge_blocks; //d_img_labels_.download(merge_blocks); //cuda::GpuMat d_merge_labels = d_img_labels_.clone(); //FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_merge_labels); //Mat1i merge_labels; //d_merge_labels.download(merge_labels); //d_merge_labels.release(); Compression << <grid_size_, block_size_ >> > (d_img_labels_); //Mat1i final_blocks; //d_img_labels_.download(final_blocks); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_); //d_img_labels_.download(img_labels_); if (last_pixel_allocated_) { hipFree(last_pixel_); } hipDeviceSynchronize(); } private: void Alloc() { d_img_labels_.create(d_img_.size(), CV_32SC1); if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) { hipMalloc(&last_pixel_, sizeof(unsigned char)); last_pixel_allocated_ = true; } else { last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize(); } } void Dealloc() { if (last_pixel_allocated_) { hipFree(last_pixel_); } } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_); Compression << <grid_size_, block_size_ >> > (d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); Compression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_); hipDeviceSynchronize(); } public: void PerformLabelingWithSteps() { perf_.start(); Alloc(); perf_.stop(); double alloc_timing = perf_.last(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); perf_.start(); Dealloc(); perf_.stop(); double dealloc_timing = perf_.last(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BKE);
822601605bdb5a8d05ff70ae5a040648ce29e04d.cu
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; namespace { // // This is a block-based algorithm. // Blocks are 2x2 sized, with internal pixels named as: // +---+ // |a b| // |c d| // +---+ // // Neighbour blocks of block X are named as: // +-+-+-+ // |P|Q|R| // +-+-+-+ // |S|X| // +-+-+ // enum class Info : unsigned char { a = 0, b = 1, c = 2, d = 3, P = 4, Q = 5, R = 6, S = 7 }; // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, Info pos) { return (bitmap >> static_cast<unsigned char>(pos)) & 1; } template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } // Only use it with unsigned numeric types __device__ __forceinline__ void SetBit(unsigned char &bitmap, Info pos) { bitmap |= (1 << static_cast<unsigned char>(pos)); } // Returns the root index of the UFTree __device__ unsigned Find(const int *s_buf, unsigned n) { while (s_buf[n] != n) { n = s_buf[n]; } return n; } // Merges the UFTrees of a and b, linking one root to the other __device__ void Union(int *s_buf, unsigned a, unsigned b) { bool done; do { a = Find(s_buf, a); b = Find(s_buf, b); if (a < b) { int old = atomicMin(s_buf + b, a); done = (old == b); b = old; } else if (b < a) { int old = atomicMin(s_buf + a, b); done = (old == a); a = old; } else { done = true; } } while (!done); } __global__ void InitLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char *last_pixel) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned P = 0; // Bitmask representing two kinds of information // Bits 0, 1, 2, 3 are set if pixel a, b, c, d are foreground, respectively // Bits 4, 5, 6, 7 are set if block P, Q, R, S need to be merged to X in Merge phase unsigned char info = 0; char buffer[4]; *(reinterpret_cast<int*>(buffer)) = 0; // Read pairs of consecutive values in memory at once if (col + 1 < img.cols) { // This does not depend on endianness *(reinterpret_cast<int16_t*>(buffer)) = *(reinterpret_cast<int16_t*>(img.data + img_index)); if (row + 1 < img.rows) { *(reinterpret_cast<int16_t*>(buffer + 2)) = *(reinterpret_cast<int16_t*>(img.data + img_index + img.step)); } } else { buffer[0] = img.data[img_index]; if (row + 1 < img.rows) { buffer[2] = img.data[img_index + img.step]; } } if (buffer[0]) { P |= 0x777; SetBit(info, Info::a); } if (buffer[1]) { P |= (0x777 << 1); SetBit(info, Info::b); } if (buffer[2]) { P |= (0x777 << 4); SetBit(info, Info::c); } if (buffer[3]) { SetBit(info, Info::d); } if (col == 0) { P &= 0xEEEE; } if (col + 1 >= img.cols) { P &= 0x3333; } else if (col + 2 >= img.cols) { P &= 0x7777; } if (row == 0) { P &= 0xFFF0; } if (row + 1 >= img.rows) { P &= 0x00FF; } else if (row + 2 >= img.rows) { P &= 0x0FFF; } // P is now ready to be used to find neighbour blocks // P value avoids range errors int father_offset = 0; // P square if (HasBit(P, 0) && img.data[img_index - img.step - 1]) { father_offset = -(2 * (labels.step / labels.elem_size) + 2); } // Q square if ((HasBit(P, 1) && img.data[img_index - img.step]) || (HasBit(P, 2) && img.data[img_index + 1 - img.step])) { if (!father_offset) { father_offset = -(2 * (labels.step / labels.elem_size)); } else { SetBit(info, Info::Q); } } // R square if (HasBit(P, 3) && img.data[img_index + 2 - img.step]) { if (!father_offset) { father_offset = -(2 * (labels.step / labels.elem_size) - 2); } else { SetBit(info, Info::R); } } // S square if ((HasBit(P, 4) && img.data[img_index - 1]) || (HasBit(P, 8) && img.data[img_index + img.step - 1])) { if (!father_offset) { father_offset = -2; } else { SetBit(info, Info::S); } } labels.data[labels_index] = labels_index + father_offset; if (col + 1 < labels.cols) { last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + 1); } else if (row + 1 < labels.rows) { last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + labels.step / labels.elem_size); } *last_pixel = info; } } __global__ void Merge(cuda::PtrStepSzi labels, unsigned char *last_pixel) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { if (col + 1 < labels.cols) { last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + 1); } else if (row + 1 < labels.rows) { last_pixel = reinterpret_cast<unsigned char *>(labels.data + labels_index + labels.step / labels.elem_size); } unsigned char info = *last_pixel; if (HasBit(info, Info::Q)) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); } if (HasBit(info, Info::R)) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); } if (HasBit(info, Info::S)) { Union(labels.data, labels_index, labels_index - 2); } } } __global__ void Compression(cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels.data[labels_index]; if (label < labels_index) { labels[labels_index] = Find(labels.data, label); } } } __global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, unsigned char *last_pixel) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { int label; unsigned char info; unsigned long long buffer; if (col + 1 < labels.cols) { buffer = *reinterpret_cast<unsigned long long *>(labels.data + labels_index); label = (buffer & (0xFFFFFFFF)) + 1; info = (buffer >> 32) & 0xFFFFFFFF; } else { label = labels[labels_index] + 1; if (row + 1 < labels.rows) { info = labels[labels_index + labels.step / labels.elem_size]; } else { info = *last_pixel; } } if (col + 1 < labels.cols) { *reinterpret_cast<unsigned long long *>(labels.data + labels_index) = (static_cast<unsigned long long>(HasBit(info, Info::b) * label) << 32) | (HasBit(info, Info::a) * label); if (row + 1 < labels.rows) { *reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.step / labels.elem_size) = (static_cast<unsigned long long>(HasBit(info, Info::d) * label) << 32) | (HasBit(info, Info::c) * label); } } else { labels[labels_index] = HasBit(info, Info::a) * label; if (row + 1 < labels.rows) { labels[labels_index + (labels.step / labels.elem_size)] = HasBit(info, Info::c) * label; } } } } } class BKE : public GpuLabeling2D<Connectivity2D::CONN_8> { private: dim3 grid_size_; dim3 block_size_; unsigned char *last_pixel_; bool last_pixel_allocated_; public: BKE() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); last_pixel_allocated_ = false; if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) { cudaMalloc(&last_pixel_, sizeof(unsigned char)); last_pixel_allocated_ = true; } else { last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize(); } grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_); //Mat1i init_blocks; //d_img_labels_.download(init_blocks); //cuda::GpuMat d_init_labels = d_img_labels_.clone(); //FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_init_labels); //Mat1i init_labels; //d_init_labels.download(init_labels); //d_init_labels.release(); Compression << <grid_size_, block_size_ >> > (d_img_labels_); //Mat1i compr_blocks; //d_img_labels_.download(compr_blocks); //cuda::GpuMat d_compr_labels = d_img_labels_.clone(); //FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_compr_labels); //Mat1i compr_labels; //d_compr_labels.download(compr_labels); //d_compr_labels.release(); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_); //Mat1i merge_blocks; //d_img_labels_.download(merge_blocks); //cuda::GpuMat d_merge_labels = d_img_labels_.clone(); //FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_merge_labels); //Mat1i merge_labels; //d_merge_labels.download(merge_labels); //d_merge_labels.release(); Compression << <grid_size_, block_size_ >> > (d_img_labels_); //Mat1i final_blocks; //d_img_labels_.download(final_blocks); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_); //d_img_labels_.download(img_labels_); if (last_pixel_allocated_) { cudaFree(last_pixel_); } cudaDeviceSynchronize(); } private: void Alloc() { d_img_labels_.create(d_img_.size(), CV_32SC1); if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) { cudaMalloc(&last_pixel_, sizeof(unsigned char)); last_pixel_allocated_ = true; } else { last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize(); } } void Dealloc() { if (last_pixel_allocated_) { cudaFree(last_pixel_); } } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_); Compression << <grid_size_, block_size_ >> > (d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_pixel_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); Compression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_pixel_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { perf_.start(); Alloc(); perf_.stop(); double alloc_timing = perf_.last(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); perf_.start(); Dealloc(); perf_.stop(); double dealloc_timing = perf_.last(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BKE);
3d50978dd5a44f98481a45684fca8c1bafb64260.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "checkerboard.cuh" __global__ void CheckerboardKernel(float* output, const int width, const int height) { const int i = blockDim.x * blockIdx.x + threadIdx.x; const int j = blockDim.y * blockIdx.y + threadIdx.y; const int center_x = width / 2; const int center_y = height / 2; if (i >= width || j >= height) { return; } float dist = (i - center_x)*(i - center_x) + (j - center_y)*(j - center_y); dist = sqrtf(dist); float dist_smaller = dist - floorf(dist); float dist_larger = 1.0f - dist; float result = dist_smaller < dist_larger ? dist_smaller : dist_larger; result = 1.0f - (result * 4.0f); //float result = (i + j % 2 == 0) ? -0.25f : 1.0f; output[(j * width) + i] = result; } void CheckerboardLauncher(float *output, const int width, const int height) { #ifdef CUDA_KERNEL_TIMING hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); #endif // CUDA_KERNEL_TIMING dim3 block(32, 32, 1); dim3 grid(width / block.x, height / block.y, 1); hipLaunchKernelGGL(( CheckerboardKernel), dim3(block), dim3(grid), 0, 0, output, width, height); // Confirm launch is good cudaAssert(hipGetLastError()); // Synchronize device to complete kernel cudaAssert(hipDeviceSynchronize()); #ifdef CUDA_KERNEL_TIMING hipEventRecord(stop); hipEventSynchronize(stop); float elapsed = 0.0f; hipEventElapsedTime(&elapsed, start, stop); printf("Checkerboard Kernel execution time in ms: %f\n", elapsed); #endif // CUDA_KERNEL_TIMING }
3d50978dd5a44f98481a45684fca8c1bafb64260.cu
#include "checkerboard.cuh" __global__ void CheckerboardKernel(float* output, const int width, const int height) { const int i = blockDim.x * blockIdx.x + threadIdx.x; const int j = blockDim.y * blockIdx.y + threadIdx.y; const int center_x = width / 2; const int center_y = height / 2; if (i >= width || j >= height) { return; } float dist = (i - center_x)*(i - center_x) + (j - center_y)*(j - center_y); dist = sqrtf(dist); float dist_smaller = dist - floorf(dist); float dist_larger = 1.0f - dist; float result = dist_smaller < dist_larger ? dist_smaller : dist_larger; result = 1.0f - (result * 4.0f); //float result = (i + j % 2 == 0) ? -0.25f : 1.0f; output[(j * width) + i] = result; } void CheckerboardLauncher(float *output, const int width, const int height) { #ifdef CUDA_KERNEL_TIMING cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); #endif // CUDA_KERNEL_TIMING dim3 block(32, 32, 1); dim3 grid(width / block.x, height / block.y, 1); CheckerboardKernel<<<block, grid>>>(output, width, height); // Confirm launch is good cudaAssert(cudaGetLastError()); // Synchronize device to complete kernel cudaAssert(cudaDeviceSynchronize()); #ifdef CUDA_KERNEL_TIMING cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsed = 0.0f; cudaEventElapsedTime(&elapsed, start, stop); printf("Checkerboard Kernel execution time in ms: %f\n", elapsed); #endif // CUDA_KERNEL_TIMING }
bbe3d9508faedfa091209a79408b1c7b9ffc2bf1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * * cuFindTriangles.cu -- The kernel that calculates the Number of triangles into * a graph given the CSR format of its Adjacency Matrix * * Michail Iason Pavlidis <[email protected]> * John Flionis <[email protected]> * ******************************************************************************/ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "readCSV.h" #include "cuFindTriangles.h" __global__ /* Kernel function that zeros the number of triangles variable */ void cuZeroVariable(int* nT) { (*nT) = 0; } __global__ /* Kernel function that finds the number of triangles formed in the graph */ void cuFindTriangles(csrFormat A, int N, int* nT) { // Each thread processes a different row int index = threadIdx.x + blockIdx.x*blockDim.x; int stride = blockDim.x * gridDim.x; // Iterate over rows for (int row = index; row < N; row += stride) { // Iterate over columns for (int j = A.csrRowPtr[row]; j < A.csrRowPtr[row+1]; j++) { int col = A.csrColInd[j]; // [row, col] = position of 1 horizontally if ( col>row ) { // OPTIMIZATION: Due to symmetry, nT of the upper half array is // equal to half the nT, thus additions are cut down to half ! int beginPtr_csr_row = A.csrRowPtr[row]; int beginPtr_csc_col = A.csrRowPtr[col]; // Multiplication of A[:,col] * A[row,:] for (int k = beginPtr_csc_col; k < A.csrRowPtr[col+1]; k++) { int csc_row = A.csrColInd[k]; // [csr_row, k] = position of 1 vertically for (int l = beginPtr_csr_row; l < A.csrRowPtr[row+1]; l++) { int csr_col = A.csrColInd[l]; if ( csc_row == csr_col ) atomicAdd( nT, 1 ); else if ( csr_col > csc_row ) { // OPTIMIZATION: when col>row no need to go further, // continue to the next col, plus for further optimization // keep track of the beginPtr_csr_row where the previous // iteration stopped, so that no time is wasted in rechecking beginPtr_csr_row = l; break; } } } } } } }
bbe3d9508faedfa091209a79408b1c7b9ffc2bf1.cu
/****************************************************************************** * * cuFindTriangles.cu -- The kernel that calculates the Number of triangles into * a graph given the CSR format of its Adjacency Matrix * * Michail Iason Pavlidis <[email protected]> * John Flionis <[email protected]> * ******************************************************************************/ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "readCSV.h" #include "cuFindTriangles.h" __global__ /* Kernel function that zeros the number of triangles variable */ void cuZeroVariable(int* nT) { (*nT) = 0; } __global__ /* Kernel function that finds the number of triangles formed in the graph */ void cuFindTriangles(csrFormat A, int N, int* nT) { // Each thread processes a different row int index = threadIdx.x + blockIdx.x*blockDim.x; int stride = blockDim.x * gridDim.x; // Iterate over rows for (int row = index; row < N; row += stride) { // Iterate over columns for (int j = A.csrRowPtr[row]; j < A.csrRowPtr[row+1]; j++) { int col = A.csrColInd[j]; // [row, col] = position of 1 horizontally if ( col>row ) { // OPTIMIZATION: Due to symmetry, nT of the upper half array is // equal to half the nT, thus additions are cut down to half ! int beginPtr_csr_row = A.csrRowPtr[row]; int beginPtr_csc_col = A.csrRowPtr[col]; // Multiplication of A[:,col] * A[row,:] for (int k = beginPtr_csc_col; k < A.csrRowPtr[col+1]; k++) { int csc_row = A.csrColInd[k]; // [csr_row, k] = position of 1 vertically for (int l = beginPtr_csr_row; l < A.csrRowPtr[row+1]; l++) { int csr_col = A.csrColInd[l]; if ( csc_row == csr_col ) atomicAdd( nT, 1 ); else if ( csr_col > csc_row ) { // OPTIMIZATION: when col>row no need to go further, // continue to the next col, plus for further optimization // keep track of the beginPtr_csr_row where the previous // iteration stopped, so that no time is wasted in rechecking beginPtr_csr_row = l; break; } } } } } } }
4d0405eb7b5af54407a73ccc7c803584bf683303.hip
// !!! This is a file automatically generated by hipify!!! /* * purpose: just a simple check whether a matrix, A, is composed * of eigenvectors only, in which case A^t x A = E * hence the inverse, A^-1, is simply the transpose, A^t, * resulting in the unit matrix, E, by the above matrix * matrix multiplication; * n.b. here we want to make use of CUBLAS but check out * the feasibility of CUDA-managed unified memory * rather than the forth-and-back-copied variant * using hipMalloc() * compile: nvcc chck_ev_v3.cu -lcublas * result: unfortunately, this doesn't seem to work in a * straightforward way */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "rocblas.h" int main(int argc, char **argv) { int N, i, j, Adim, Bdim, Cdim; double alpha, beta, *A, **A2D, *B, **B2D, *C, **C2D; hipblasStatus_t stat; hipblasHandle_t handle; hipblasOperation_t Atype, Btype; // memory allocation and parameter set up N = 5; Adim = N; Bdim = N; Cdim = N; alpha = (double) 1; beta = (double) 0; hipMallocManaged(&A, N * N * sizeof(double)); hipMallocManaged(&B, N * N * sizeof(double)); hipMallocManaged(&C, N * N * sizeof(double)); A2D = (double **) malloc(N * sizeof(double *)); B2D = (double **) malloc(N * sizeof(double *)); C2D = (double **) malloc(N * sizeof(double *)); for (i = 0; i < N; i++) { A2D[i] = (double *) malloc(N * sizeof(double)); B2D[i] = (double *) malloc(N * sizeof(double)); C2D[i] = (double *) malloc(N * sizeof(double)); } // set up matrix A2D[][] supposedly consisting of just eigenvectors A2D[0][0] = 0.30; A2D[0][1] = -0.61; A2D[0][2] = 0.40; A2D[0][3] = 0.37; A2D[0][4] = -0.49; A2D[1][0] = 0.51; A2D[1][1] = -0.29; A2D[1][2] = -0.41; A2D[1][3] = 0.36; A2D[1][4] = 0.61; A2D[2][0] = 0.08; A2D[2][1] = -0.38; A2D[2][2] = -0.66; A2D[2][3] = -0.50; A2D[2][4] = -0.40; A2D[3][0] = 0.00; A2D[3][1] = -0.45; A2D[3][2] = 0.46; A2D[3][3] = -0.62; A2D[3][4] = 0.46; A2D[4][0] = 0.80; A2D[4][1] = 0.45; A2D[4][2] = 0.17; A2D[4][3] = -0.31; A2D[4][4] = -0.16; // get the inverse of A2D[][] from simply the transpose (if really just eigenvectors) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { B2D[i][j] = A2D[j][i]; } } // print out initial matrix content printf(" Matrix to be sent into DGEMM\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%10.2lf", A2D[i][j]); } printf("\n"); } // copy content of A2D[][] and B2D[][] into their linear versions A[] and B[] --- column wise ! for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[(i*N)+j] = A2D[j][i]; B[(i*N)+j] = B2D[j][i]; } } // cublas: initiate the CUBLAS context stat = hipblasCreate(&handle); // cublas: set a couple of other CUBLAS parameters Atype = HIPBLAS_OP_N; Btype = HIPBLAS_OP_N; // call BLAS routine DGEMM --- only pointers as arguments ! stat = hipblasDgemm(handle, Atype, Btype, Adim, Bdim, Cdim, &alpha, &B[0], Bdim, &A[0], Adim, &beta, &C[0], Cdim); if ( stat != HIPBLAS_STATUS_SUCCESS ) { printf("CUBLAS error \n"); exit(99); } // print out results, hence the unit matrix if the assumption above was correct printf(" Matrix matrix product\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C2D[i][j] = C[(j*N)+i]; printf("%10.2lf", C2D[i][j]); } printf("\n"); } // and free up allocated memory for (i = N-1; i >= 0; i--) { free(C2D[i]); free(B2D[i]); free(A2D[i]); } free(C2D); free(B2D); free(A2D); return(0); }
4d0405eb7b5af54407a73ccc7c803584bf683303.cu
/* * purpose: just a simple check whether a matrix, A, is composed * of eigenvectors only, in which case A^t x A = E * hence the inverse, A^-1, is simply the transpose, A^t, * resulting in the unit matrix, E, by the above matrix * matrix multiplication; * n.b. here we want to make use of CUBLAS but check out * the feasibility of CUDA-managed unified memory * rather than the forth-and-back-copied variant * using cudaMalloc() * compile: nvcc chck_ev_v3.cu -lcublas * result: unfortunately, this doesn't seem to work in a * straightforward way */ #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "cublas_v2.h" int main(int argc, char **argv) { int N, i, j, Adim, Bdim, Cdim; double alpha, beta, *A, **A2D, *B, **B2D, *C, **C2D; cublasStatus_t stat; cublasHandle_t handle; cublasOperation_t Atype, Btype; // memory allocation and parameter set up N = 5; Adim = N; Bdim = N; Cdim = N; alpha = (double) 1; beta = (double) 0; cudaMallocManaged(&A, N * N * sizeof(double)); cudaMallocManaged(&B, N * N * sizeof(double)); cudaMallocManaged(&C, N * N * sizeof(double)); A2D = (double **) malloc(N * sizeof(double *)); B2D = (double **) malloc(N * sizeof(double *)); C2D = (double **) malloc(N * sizeof(double *)); for (i = 0; i < N; i++) { A2D[i] = (double *) malloc(N * sizeof(double)); B2D[i] = (double *) malloc(N * sizeof(double)); C2D[i] = (double *) malloc(N * sizeof(double)); } // set up matrix A2D[][] supposedly consisting of just eigenvectors A2D[0][0] = 0.30; A2D[0][1] = -0.61; A2D[0][2] = 0.40; A2D[0][3] = 0.37; A2D[0][4] = -0.49; A2D[1][0] = 0.51; A2D[1][1] = -0.29; A2D[1][2] = -0.41; A2D[1][3] = 0.36; A2D[1][4] = 0.61; A2D[2][0] = 0.08; A2D[2][1] = -0.38; A2D[2][2] = -0.66; A2D[2][3] = -0.50; A2D[2][4] = -0.40; A2D[3][0] = 0.00; A2D[3][1] = -0.45; A2D[3][2] = 0.46; A2D[3][3] = -0.62; A2D[3][4] = 0.46; A2D[4][0] = 0.80; A2D[4][1] = 0.45; A2D[4][2] = 0.17; A2D[4][3] = -0.31; A2D[4][4] = -0.16; // get the inverse of A2D[][] from simply the transpose (if really just eigenvectors) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { B2D[i][j] = A2D[j][i]; } } // print out initial matrix content printf(" Matrix to be sent into DGEMM\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%10.2lf", A2D[i][j]); } printf("\n"); } // copy content of A2D[][] and B2D[][] into their linear versions A[] and B[] --- column wise ! for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[(i*N)+j] = A2D[j][i]; B[(i*N)+j] = B2D[j][i]; } } // cublas: initiate the CUBLAS context stat = cublasCreate(&handle); // cublas: set a couple of other CUBLAS parameters Atype = CUBLAS_OP_N; Btype = CUBLAS_OP_N; // call BLAS routine DGEMM --- only pointers as arguments ! stat = cublasDgemm(handle, Atype, Btype, Adim, Bdim, Cdim, &alpha, &B[0], Bdim, &A[0], Adim, &beta, &C[0], Cdim); if ( stat != CUBLAS_STATUS_SUCCESS ) { printf("CUBLAS error \n"); exit(99); } // print out results, hence the unit matrix if the assumption above was correct printf(" Matrix matrix product\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C2D[i][j] = C[(j*N)+i]; printf("%10.2lf", C2D[i][j]); } printf("\n"); } // and free up allocated memory for (i = N-1; i >= 0; i--) { free(C2D[i]); free(B2D[i]); free(A2D[i]); } free(C2D); free(B2D); free(A2D); return(0); }
d83f5d0e1815a100b16bd03f5306d31951a0dd0c.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2018 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::flat(std::string name, Tensor input) { assert(input.numDim == 4); //assert(strategies.find(name) != strategies.end()); //ParallelConfig pc = strategies[name]; Flat *flat = new Flat(*this, name, input); layers.push_back(flat); return flat->output; } Flat::Flat(FFModel& model, const std::string& pcname, const Tensor& _input) : Op(pcname, _input) { task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is); int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1; // Assert data parallelism for operators with dim changes assert(num_par_c == 1); int out_dim = _input.adim[0] * _input.adim[1] * _input.adim[2]; int batch_size = _input.adim[3]; // Create output tensor { const int dims[2] = {batch_size, out_dim}; output = model.create_tensor<2>(dims, task_is, DT_FLOAT); } model.create_data_parallel_partition_with_diff_dims<4, 2>( _input, task_is, input_lps[0], input_grad_lps[0]); #ifdef DEADCODE Rect<2, coord_t> output_rect(Point<2>(0, 0), Point<2>(output_c-1, output_n-1)); IndexSpaceT<2> output_is = runtime->create_index_space(ctx, output_rect); LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs); LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs); Transform<2, 2, coord_t> transform; //int extent_c = input.pdim[0] * input.pdim[1] * input.pdim[2]; //int extent_n = input.pdim[3]; // We assume equal partition for load balancing assert(output_c % fc_num_par_c == 0); assert(output_n % fc_num_par_n == 0); int extent_c = output_c / fc_num_par_c; int extent_n = output_n / fc_num_par_n; Rect<2, coord_t> extent(Point<2>(0, 0), Point<2>(extent_c-1,extent_n-1)); transform[0][0] = extent_c; transform[0][1] = 0; transform[1][0] = 0; transform[1][1] = extent_n; IndexPartition output_ip = runtime->create_partition_by_restriction(ctx, output_is, task_is_2d, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, output_ip)); assert(runtime->is_index_partition_complete(ctx, output_ip)); LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip); LogicalPartition output_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, output_ip); output.numDim = 2; output.adim[0] = output_c; output.adim[1] = output_n; output.pdim[0] = extent_c; output.pdim[1] = extent_n; output.region = output_lr; output.region_grad = output_grad_lr; output.part = output_lp; output.part_grad = output_grad_lp; printf("Create flat layer: input(N=%d C=%d H=%d W=%d) -> output(N=%d C=%d)\n", _input.adim[3], _input.adim[2], _input.adim[1], _input.adim[0], output.adim[1], output.adim[0]); FieldSpace proj_fs = runtime->create_field_space(ctx); { FieldAllocator allocator = runtime->create_field_allocator(ctx, proj_fs); allocator.allocate_field(sizeof(Rect<2>), FID_DATA); } LogicalRegion proj_lr = runtime->create_logical_region(ctx, task_is_3d, proj_fs); InlineLauncher launcher( RegionRequirement(proj_lr, WRITE_DISCARD, EXCLUSIVE, proj_lr) .add_field(FID_DATA)); PhysicalRegion proj_pr = runtime->map_region(ctx, launcher); proj_pr.wait_until_valid(); coord_t subtotal = 0; { const FieldAccessor<WRITE_DISCARD, Rect<2>, 3, coord_t, Realm::AffineAccessor<Rect<2>, 3, coord_t> > ra(proj_pr, FID_DATA); Rect<3> rect = runtime->get_index_space_domain(ctx, task_is_3d); for(PointInRectIterator<3> pir(rect); pir(); ++pir) { IndexSpace subspace = runtime->get_index_subspace(_input.part.get_index_partition(), *pir); Rect<3> subrect = runtime->get_index_space_domain(ctx, subspace); // Currently we assume the size of each subregion is divisible by output_n (i.e., batch size) assert(subrect.volume() % output_n == 0); coord_t subsize = subrect.volume() / output_n; ra[*pir] = Rect<2>(Point<2>(subtotal, 0), Point<2>(subtotal + subsize - 1, output_n - 1)); subtotal += subsize; } } runtime->unmap_region(ctx, proj_pr); Transform<3, 3, coord_t> proj_trans; proj_trans[0][0] = 1; proj_trans[0][1] = 0; proj_trans[0][2] = 0; proj_trans[1][0] = 0; proj_trans[1][1] = 1; proj_trans[1][2] = 0; proj_trans[2][0] = 0; proj_trans[2][1] = 0; proj_trans[2][2] = 1; Rect<3, coord_t> proj_extent(Point<3>(0, 0, 0), Point<3>(0, 0, 0)); IndexPartition proj_ip = runtime->create_partition_by_restriction(ctx, task_is_3d, task_is_3d, proj_trans, proj_extent); LogicalPartition proj_lp = runtime->get_logical_partition(ctx, proj_lr, proj_ip); IndexPartition flat_ip = runtime->create_partition_by_image_range(ctx, output_is, proj_lp, proj_lr, FID_DATA, task_is_3d); assert(runtime->is_index_partition_disjoint(ctx, flat_ip)); assert(runtime->is_index_partition_complete(ctx, flat_ip)); flat_lp = runtime->get_logical_partition(ctx, output_lr, flat_ip); flat_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, flat_ip); return; /* Transform<2, 3, coord_t> flat_trans; flat_trans[0][0] = input.pdim[0] * input.pdim[1] * input.adim[2]; flat_trans[0][1] = input.adim[0] * input.pdim[1] * input.adim[2]; flat_trans[0][2] = 0; flat_trans[1][0] = 0; flat_trans[1][1] = 0; flat_trans[1][2] = input.pdim[3]; IndexPartition flat_ip = runtime->create_partition_by_restriction(ctx, output_is, part_is_3d, flat_trans, extent); flat_lp = runtime->get_logical_partition(ctx, output_lr, flat_ip); */ #endif } OpMeta* Flat::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { FFHandler handler = *((const FFHandler*) task->local_args); FlatMeta* m = new FlatMeta(handler); return m; } void Flat::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { FFHandler handle = ff.handlers[idx++]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(FLAT_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Flat)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /* regions[0](I): input regions[1](O): output */ void Flat::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); assert(acc_input.rect.volume() == acc_output.rect.volume()); checkCUDA(hipMemcpyAsync(acc_output.ptr, acc_input.ptr, acc_input.rect.volume() * sizeof(float), hipMemcpyDeviceToDevice)); } void Flat::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(FLAT_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, output.region)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } /* regions[0](O) : input_grad regions[1](I) : output_grad */ void Flat::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); TensorAccessorW<float, 4> acc_input_grad( regions[0], task->regions[0], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 2> acc_output_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime); assert(acc_input_grad.rect.volume() == acc_output_grad.rect.volume()); checkCUDA(hipMemcpyAsync(acc_input_grad.ptr, acc_output_grad.ptr, acc_input_grad.rect.volume() * sizeof(float), hipMemcpyDeviceToDevice)); } void Flat::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(FLAT_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(output.part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, output.region_grad)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); }
d83f5d0e1815a100b16bd03f5306d31951a0dd0c.cu
/* Copyright 2018 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::flat(std::string name, Tensor input) { assert(input.numDim == 4); //assert(strategies.find(name) != strategies.end()); //ParallelConfig pc = strategies[name]; Flat *flat = new Flat(*this, name, input); layers.push_back(flat); return flat->output; } Flat::Flat(FFModel& model, const std::string& pcname, const Tensor& _input) : Op(pcname, _input) { task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is); int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1; // Assert data parallelism for operators with dim changes assert(num_par_c == 1); int out_dim = _input.adim[0] * _input.adim[1] * _input.adim[2]; int batch_size = _input.adim[3]; // Create output tensor { const int dims[2] = {batch_size, out_dim}; output = model.create_tensor<2>(dims, task_is, DT_FLOAT); } model.create_data_parallel_partition_with_diff_dims<4, 2>( _input, task_is, input_lps[0], input_grad_lps[0]); #ifdef DEADCODE Rect<2, coord_t> output_rect(Point<2>(0, 0), Point<2>(output_c-1, output_n-1)); IndexSpaceT<2> output_is = runtime->create_index_space(ctx, output_rect); LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs); LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs); Transform<2, 2, coord_t> transform; //int extent_c = input.pdim[0] * input.pdim[1] * input.pdim[2]; //int extent_n = input.pdim[3]; // We assume equal partition for load balancing assert(output_c % fc_num_par_c == 0); assert(output_n % fc_num_par_n == 0); int extent_c = output_c / fc_num_par_c; int extent_n = output_n / fc_num_par_n; Rect<2, coord_t> extent(Point<2>(0, 0), Point<2>(extent_c-1,extent_n-1)); transform[0][0] = extent_c; transform[0][1] = 0; transform[1][0] = 0; transform[1][1] = extent_n; IndexPartition output_ip = runtime->create_partition_by_restriction(ctx, output_is, task_is_2d, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, output_ip)); assert(runtime->is_index_partition_complete(ctx, output_ip)); LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip); LogicalPartition output_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, output_ip); output.numDim = 2; output.adim[0] = output_c; output.adim[1] = output_n; output.pdim[0] = extent_c; output.pdim[1] = extent_n; output.region = output_lr; output.region_grad = output_grad_lr; output.part = output_lp; output.part_grad = output_grad_lp; printf("Create flat layer: input(N=%d C=%d H=%d W=%d) -> output(N=%d C=%d)\n", _input.adim[3], _input.adim[2], _input.adim[1], _input.adim[0], output.adim[1], output.adim[0]); FieldSpace proj_fs = runtime->create_field_space(ctx); { FieldAllocator allocator = runtime->create_field_allocator(ctx, proj_fs); allocator.allocate_field(sizeof(Rect<2>), FID_DATA); } LogicalRegion proj_lr = runtime->create_logical_region(ctx, task_is_3d, proj_fs); InlineLauncher launcher( RegionRequirement(proj_lr, WRITE_DISCARD, EXCLUSIVE, proj_lr) .add_field(FID_DATA)); PhysicalRegion proj_pr = runtime->map_region(ctx, launcher); proj_pr.wait_until_valid(); coord_t subtotal = 0; { const FieldAccessor<WRITE_DISCARD, Rect<2>, 3, coord_t, Realm::AffineAccessor<Rect<2>, 3, coord_t> > ra(proj_pr, FID_DATA); Rect<3> rect = runtime->get_index_space_domain(ctx, task_is_3d); for(PointInRectIterator<3> pir(rect); pir(); ++pir) { IndexSpace subspace = runtime->get_index_subspace(_input.part.get_index_partition(), *pir); Rect<3> subrect = runtime->get_index_space_domain(ctx, subspace); // Currently we assume the size of each subregion is divisible by output_n (i.e., batch size) assert(subrect.volume() % output_n == 0); coord_t subsize = subrect.volume() / output_n; ra[*pir] = Rect<2>(Point<2>(subtotal, 0), Point<2>(subtotal + subsize - 1, output_n - 1)); subtotal += subsize; } } runtime->unmap_region(ctx, proj_pr); Transform<3, 3, coord_t> proj_trans; proj_trans[0][0] = 1; proj_trans[0][1] = 0; proj_trans[0][2] = 0; proj_trans[1][0] = 0; proj_trans[1][1] = 1; proj_trans[1][2] = 0; proj_trans[2][0] = 0; proj_trans[2][1] = 0; proj_trans[2][2] = 1; Rect<3, coord_t> proj_extent(Point<3>(0, 0, 0), Point<3>(0, 0, 0)); IndexPartition proj_ip = runtime->create_partition_by_restriction(ctx, task_is_3d, task_is_3d, proj_trans, proj_extent); LogicalPartition proj_lp = runtime->get_logical_partition(ctx, proj_lr, proj_ip); IndexPartition flat_ip = runtime->create_partition_by_image_range(ctx, output_is, proj_lp, proj_lr, FID_DATA, task_is_3d); assert(runtime->is_index_partition_disjoint(ctx, flat_ip)); assert(runtime->is_index_partition_complete(ctx, flat_ip)); flat_lp = runtime->get_logical_partition(ctx, output_lr, flat_ip); flat_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, flat_ip); return; /* Transform<2, 3, coord_t> flat_trans; flat_trans[0][0] = input.pdim[0] * input.pdim[1] * input.adim[2]; flat_trans[0][1] = input.adim[0] * input.pdim[1] * input.adim[2]; flat_trans[0][2] = 0; flat_trans[1][0] = 0; flat_trans[1][1] = 0; flat_trans[1][2] = input.pdim[3]; IndexPartition flat_ip = runtime->create_partition_by_restriction(ctx, output_is, part_is_3d, flat_trans, extent); flat_lp = runtime->get_logical_partition(ctx, output_lr, flat_ip); */ #endif } OpMeta* Flat::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { FFHandler handler = *((const FFHandler*) task->local_args); FlatMeta* m = new FlatMeta(handler); return m; } void Flat::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { FFHandler handle = ff.handlers[idx++]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(FLAT_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Flat)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /* regions[0](I): input regions[1](O): output */ void Flat::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); assert(acc_input.rect.volume() == acc_output.rect.volume()); checkCUDA(cudaMemcpyAsync(acc_output.ptr, acc_input.ptr, acc_input.rect.volume() * sizeof(float), cudaMemcpyDeviceToDevice)); } void Flat::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(FLAT_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, output.region)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } /* regions[0](O) : input_grad regions[1](I) : output_grad */ void Flat::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); TensorAccessorW<float, 4> acc_input_grad( regions[0], task->regions[0], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 2> acc_output_grad( regions[1], task->regions[1], FID_DATA, ctx, runtime); assert(acc_input_grad.rect.volume() == acc_output_grad.rect.volume()); checkCUDA(cudaMemcpyAsync(acc_input_grad.ptr, acc_output_grad.ptr, acc_input_grad.rect.volume() * sizeof(float), cudaMemcpyDeviceToDevice)); } void Flat::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<2> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<2> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(FLAT_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(output.part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, output.region_grad)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); }
008f67ebb4861c1cceda3a833e382f9393dd2f8a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "AbstractAPI.h" #include "interfaces/cuda/Internals.h" #include <cassert> #include <device.h> namespace device { template <typename T> __global__ void kernel_scaleArray(T *array, const T scalar, const size_t numElements) { size_t index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numElements) { array[index] *= scalar; } } template <typename T> void Algorithms::scaleArray(T *devArray, T scalar, const size_t numElements, void* streamPtr) { dim3 block(64, 1, 1); dim3 grid = internals::computeGrid1D(block, numElements); auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr); hipLaunchKernelGGL(( kernel_scaleArray), dim3(grid), dim3(block), 0, stream, devArray, scalar, numElements); CHECK_ERR; } template void Algorithms::scaleArray(real *devArray, real scalar, const size_t numElements, void* streamPtr); template void Algorithms::scaleArray(int *devArray, int scalar, const size_t numElements, void* streamPtr); template void Algorithms::scaleArray(char *devArray, char scalar, const size_t numElements, void* streamPtr); //-------------------------------------------------------------------------------------------------- template <typename T> __global__ void kernel_fillArray(T *array, T scalar, const size_t numElements) { size_t index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numElements) { array[index] = scalar; } } template <typename T> void Algorithms::fillArray(T *devArray, const T scalar, const size_t numElements, void* streamPtr) { dim3 block(64, 1, 1); dim3 grid = internals::computeGrid1D(block, numElements); auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr); hipLaunchKernelGGL(( kernel_fillArray), dim3(grid), dim3(block), 0, stream, devArray, scalar, numElements); CHECK_ERR; } template void Algorithms::fillArray(real *devArray, real scalar, const size_t numElements, void* streamPtr); template void Algorithms::fillArray(int *devArray, int scalar, const size_t numElements, void* streamPtr); template void Algorithms::fillArray(unsigned *devArray, unsigned scalar, const size_t numElements, void* streamPtr); template void Algorithms::fillArray(char *devArray, char scalar, const size_t numElements, void* streamPtr); //-------------------------------------------------------------------------------------------------- __global__ void kernel_touchMemory(real *ptr, size_t size, bool clean) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < size) { if (clean) { ptr[id] = 0; } else { real value = ptr[id]; // Do something dummy here. We just need to check the pointers point to valid memory locations. // Avoid compiler optimization. Possibly, implement a dummy code with asm. value += 1; value -= 1; } } } void Algorithms::touchMemory(real *ptr, size_t size, bool clean, void* streamPtr) { dim3 block(256, 1, 1); dim3 grid = internals::computeGrid1D(block, size); auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr); hipLaunchKernelGGL(( kernel_touchMemory), dim3(grid), dim3(block), 0, stream, ptr, size, clean); CHECK_ERR; } //-------------------------------------------------------------------------------------------------- __global__ void kernel_incrementalAdd( real** out, real *base, size_t increment, size_t numElements) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < numElements) { out[id] = base + id * increment; } } void Algorithms::incrementalAdd( real** out, real *base, size_t increment, size_t numElements, void* streamPtr) { dim3 block(256, 1, 1); dim3 grid = internals::computeGrid1D(block, numElements); auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr); hipLaunchKernelGGL(( kernel_incrementalAdd), dim3(grid), dim3(block), 0, stream, out, base, increment, numElements); CHECK_ERR; } } // namespace device
008f67ebb4861c1cceda3a833e382f9393dd2f8a.cu
#include "AbstractAPI.h" #include "interfaces/cuda/Internals.h" #include <cassert> #include <device.h> namespace device { template <typename T> __global__ void kernel_scaleArray(T *array, const T scalar, const size_t numElements) { size_t index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numElements) { array[index] *= scalar; } } template <typename T> void Algorithms::scaleArray(T *devArray, T scalar, const size_t numElements, void* streamPtr) { dim3 block(64, 1, 1); dim3 grid = internals::computeGrid1D(block, numElements); auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr); kernel_scaleArray<<<grid, block, 0, stream>>>(devArray, scalar, numElements); CHECK_ERR; } template void Algorithms::scaleArray(real *devArray, real scalar, const size_t numElements, void* streamPtr); template void Algorithms::scaleArray(int *devArray, int scalar, const size_t numElements, void* streamPtr); template void Algorithms::scaleArray(char *devArray, char scalar, const size_t numElements, void* streamPtr); //-------------------------------------------------------------------------------------------------- template <typename T> __global__ void kernel_fillArray(T *array, T scalar, const size_t numElements) { size_t index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numElements) { array[index] = scalar; } } template <typename T> void Algorithms::fillArray(T *devArray, const T scalar, const size_t numElements, void* streamPtr) { dim3 block(64, 1, 1); dim3 grid = internals::computeGrid1D(block, numElements); auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr); kernel_fillArray<<<grid, block, 0, stream>>>(devArray, scalar, numElements); CHECK_ERR; } template void Algorithms::fillArray(real *devArray, real scalar, const size_t numElements, void* streamPtr); template void Algorithms::fillArray(int *devArray, int scalar, const size_t numElements, void* streamPtr); template void Algorithms::fillArray(unsigned *devArray, unsigned scalar, const size_t numElements, void* streamPtr); template void Algorithms::fillArray(char *devArray, char scalar, const size_t numElements, void* streamPtr); //-------------------------------------------------------------------------------------------------- __global__ void kernel_touchMemory(real *ptr, size_t size, bool clean) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < size) { if (clean) { ptr[id] = 0; } else { real value = ptr[id]; // Do something dummy here. We just need to check the pointers point to valid memory locations. // Avoid compiler optimization. Possibly, implement a dummy code with asm. value += 1; value -= 1; } } } void Algorithms::touchMemory(real *ptr, size_t size, bool clean, void* streamPtr) { dim3 block(256, 1, 1); dim3 grid = internals::computeGrid1D(block, size); auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr); kernel_touchMemory<<<grid, block, 0, stream>>>(ptr, size, clean); CHECK_ERR; } //-------------------------------------------------------------------------------------------------- __global__ void kernel_incrementalAdd( real** out, real *base, size_t increment, size_t numElements) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < numElements) { out[id] = base + id * increment; } } void Algorithms::incrementalAdd( real** out, real *base, size_t increment, size_t numElements, void* streamPtr) { dim3 block(256, 1, 1); dim3 grid = internals::computeGrid1D(block, numElements); auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr); kernel_incrementalAdd<<<grid, block, 0, stream>>>(out, base, increment, numElements); CHECK_ERR; } } // namespace device
ee78b61fac700aa4a212098a78eea6f938b261a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void d_putgaps(float *sne7, float *snaw, int *aw2ali, const int snno) { //sino index int sni = threadIdx.x + blockIdx.y*blockDim.x; //sino bin index int awi = blockIdx.x; if (sni<snno) { sne7[aw2ali[awi] * snno + sni] = snaw[awi*snno + sni]; } }
ee78b61fac700aa4a212098a78eea6f938b261a5.cu
#include "includes.h" __global__ void d_putgaps(float *sne7, float *snaw, int *aw2ali, const int snno) { //sino index int sni = threadIdx.x + blockIdx.y*blockDim.x; //sino bin index int awi = blockIdx.x; if (sni<snno) { sne7[aw2ali[awi] * snno + sni] = snaw[awi*snno + sni]; } }
2d520c09392e3b140dab92d54aa6052336007a80.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <text/subword/detail/codepoint_metadata.ah> #include <text/subword/detail/tokenizer_utils.cuh> #include <nvtext/detail/load_hash_file.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <thrust/fill.h> #include <algorithm> #include <cstdint> #include <fstream> #include <iostream> #include <vector> namespace nvtext { namespace detail { /** * @brief Retrieve the code point metadata table. * * Build the code point metadata table in device memory * using the vector pieces from codepoint_metadata.ah */ rmm::device_uvector<codepoint_metadata_type> get_codepoint_metadata(rmm::cuda_stream_view stream) { auto table_vector = rmm::device_uvector<codepoint_metadata_type>(codepoint_metadata_size, stream); auto table = table_vector.data(); thrust::fill(rmm::exec_policy(stream), table + cp_section1_end, table + codepoint_metadata_size, codepoint_metadata_default_value); CUDF_CUDA_TRY(hipMemcpyAsync(table, codepoint_metadata, cp_section1_end * sizeof(codepoint_metadata[0]), // 1st section hipMemcpyHostToDevice, stream.value())); CUDF_CUDA_TRY(hipMemcpyAsync( table + cp_section2_begin, cp_metadata_917505_917999, (cp_section2_end - cp_section2_begin + 1) * sizeof(codepoint_metadata[0]), // 2nd section hipMemcpyHostToDevice, stream.value())); return table_vector; } /** * @brief Retrieve the aux code point data table. * * Build the aux code point data table in device memory * using the vector pieces from codepoint_metadata.ah */ rmm::device_uvector<aux_codepoint_data_type> get_aux_codepoint_data(rmm::cuda_stream_view stream) { auto table_vector = rmm::device_uvector<aux_codepoint_data_type>(aux_codepoint_data_size, stream); auto table = table_vector.data(); thrust::fill(rmm::exec_policy(stream), table + aux_section1_end, table + aux_codepoint_data_size, aux_codepoint_default_value); CUDF_CUDA_TRY(hipMemcpyAsync(table, aux_codepoint_data, aux_section1_end * sizeof(aux_codepoint_data[0]), // 1st section hipMemcpyHostToDevice, stream.value())); CUDF_CUDA_TRY(hipMemcpyAsync( table + aux_section2_begin, aux_cp_data_44032_55203, (aux_section2_end - aux_section2_begin + 1) * sizeof(aux_codepoint_data[0]), // 2nd section hipMemcpyHostToDevice, stream.value())); CUDF_CUDA_TRY(hipMemcpyAsync( table + aux_section3_begin, aux_cp_data_70475_71099, (aux_section3_end - aux_section3_begin + 1) * sizeof(aux_codepoint_data[0]), // 3rd section hipMemcpyHostToDevice, stream.value())); CUDF_CUDA_TRY(hipMemcpyAsync( table + aux_section4_begin, aux_cp_data_119134_119232, (aux_section4_end - aux_section4_begin + 1) * sizeof(aux_codepoint_data[0]), // 4th section hipMemcpyHostToDevice, stream.value())); return table_vector; } namespace { /** * @brief Convert string to uint32. * * This just wraps the std::stoi but provides a nice error message * in case the hash file format is incorrect. */ uint32_t str_to_uint32(std::string const& str, uint64_t line_no) { try { return std::stoi(str); // there is no std::stoui } catch (std::exception const& exc) { std::string message("Line "); message += std::to_string(line_no) + ": "; message += "cannot convert integer from '"; message += str; message += "': "; message += exc.what(); std::cerr << message << std::endl; throw; } } /** * @brief Convert string to uint64. * * This just wraps the std::stoul but provides a nice error message * in case the hash file format is incorrect. */ uint64_t str_to_uint64(std::string const& str, uint64_t line_no) { try { return std::stoul(str); } catch (std::exception const& exc) { std::string message("Line "); message += std::to_string(line_no) + ": "; message += "cannot convert integer from '"; message += str; message += "': "; message += exc.what(); std::cerr << message << std::endl; throw; } } } // namespace /** * @brief Loads a text file representing the hashed vocabulary into hashed_vocabulary struct. * * @code{.pseudo} * Format of the file (ASCII text file with numbers): * First 3 lines have the following values: * outer_hash_a * outer_hash_b * number-of-bins * The next number-of-bins lines has two values in each line separated by a space * coefficient offset * ... * Next line has the size (number of lines) of the table followed * by the table values -- one value per line. * The last three lines: * unknown_token_id * first_token_id * separator_token_id * @endcode * * @param filename_hashed_vocabulary Path to text file containing hashed vocabulary * @return object containing hash table elements for the wordpiece tokenizer */ std::unique_ptr<hashed_vocabulary> load_vocabulary_file( std::string const& filename_hashed_vocabulary, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { hashed_vocabulary result; std::ifstream hash_file(filename_hashed_vocabulary); CUDF_EXPECTS(hash_file.good(), "Could not open " + filename_hashed_vocabulary); uint64_t line_no = 1; std::string line; std::getline(hash_file, line); result.outer_hash_a = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.outer_hash_b = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.num_bins = str_to_uint32(line, line_no++); std::vector<uint64_t> bin_coefficients(result.num_bins); std::vector<uint16_t> bin_offsets(result.num_bins); for (int i = 0; i < result.num_bins; ++i) { std::getline(hash_file, line); size_t loc_of_space = line.find(" "); CUDF_EXPECTS(loc_of_space != line.npos, "invalid hash file format"); std::string first_num = line.substr(0, loc_of_space); std::string second_num = line.substr(loc_of_space + 1, line.length()); bin_coefficients[i] = str_to_uint64(first_num, line_no); bin_offsets[i] = str_to_uint32(second_num, line_no); ++line_no; } std::getline(hash_file, line); uint64_t hash_table_length = str_to_uint64(line, line_no++); std::vector<uint64_t> table(hash_table_length); std::generate(table.begin(), table.end(), [&hash_file, &line_no]() { std::string line; std::getline(hash_file, line); return str_to_uint64(line, line_no++); }); std::getline(hash_file, line); result.unknown_token_id = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.first_token_id = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.separator_token_id = str_to_uint32(line, line_no++); // Transfer hash table to columns result.table = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT64}, table.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDF_CUDA_TRY(hipMemcpyAsync(result.table->mutable_view().data<uint64_t>(), table.data(), table.size() * sizeof(uint64_t), hipMemcpyHostToDevice, stream.value())); result.bin_coefficients = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT64}, bin_coefficients.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDF_CUDA_TRY(hipMemcpyAsync(result.bin_coefficients->mutable_view().data<uint64_t>(), bin_coefficients.data(), bin_coefficients.size() * sizeof(uint64_t), hipMemcpyHostToDevice, stream.value())); result.bin_offsets = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT16}, bin_offsets.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDF_CUDA_TRY(hipMemcpyAsync(result.bin_offsets->mutable_view().data<uint16_t>(), bin_offsets.data(), bin_offsets.size() * sizeof(uint16_t), hipMemcpyHostToDevice, stream.value())); auto cp_metadata = detail::get_codepoint_metadata(stream); auto const cp_metadata_size = static_cast<cudf::size_type>(cp_metadata.size()); result.cp_metadata = std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::UINT32}, cp_metadata_size, cp_metadata.release()); auto aux_cp_table = detail::get_aux_codepoint_data(stream); auto const aux_cp_table_size = static_cast<cudf::size_type>(aux_cp_table.size()); result.aux_cp_table = std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::UINT64}, aux_cp_table_size, aux_cp_table.release()); return std::make_unique<hashed_vocabulary>(std::move(result)); } } // namespace detail std::unique_ptr<hashed_vocabulary> load_vocabulary_file( std::string const& filename_hashed_vocabulary, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::load_vocabulary_file(filename_hashed_vocabulary, cudf::default_stream_value, mr); } } // namespace nvtext
2d520c09392e3b140dab92d54aa6052336007a80.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <text/subword/detail/codepoint_metadata.ah> #include <text/subword/detail/tokenizer_utils.cuh> #include <nvtext/detail/load_hash_file.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <thrust/fill.h> #include <algorithm> #include <cstdint> #include <fstream> #include <iostream> #include <vector> namespace nvtext { namespace detail { /** * @brief Retrieve the code point metadata table. * * Build the code point metadata table in device memory * using the vector pieces from codepoint_metadata.ah */ rmm::device_uvector<codepoint_metadata_type> get_codepoint_metadata(rmm::cuda_stream_view stream) { auto table_vector = rmm::device_uvector<codepoint_metadata_type>(codepoint_metadata_size, stream); auto table = table_vector.data(); thrust::fill(rmm::exec_policy(stream), table + cp_section1_end, table + codepoint_metadata_size, codepoint_metadata_default_value); CUDF_CUDA_TRY(cudaMemcpyAsync(table, codepoint_metadata, cp_section1_end * sizeof(codepoint_metadata[0]), // 1st section cudaMemcpyHostToDevice, stream.value())); CUDF_CUDA_TRY(cudaMemcpyAsync( table + cp_section2_begin, cp_metadata_917505_917999, (cp_section2_end - cp_section2_begin + 1) * sizeof(codepoint_metadata[0]), // 2nd section cudaMemcpyHostToDevice, stream.value())); return table_vector; } /** * @brief Retrieve the aux code point data table. * * Build the aux code point data table in device memory * using the vector pieces from codepoint_metadata.ah */ rmm::device_uvector<aux_codepoint_data_type> get_aux_codepoint_data(rmm::cuda_stream_view stream) { auto table_vector = rmm::device_uvector<aux_codepoint_data_type>(aux_codepoint_data_size, stream); auto table = table_vector.data(); thrust::fill(rmm::exec_policy(stream), table + aux_section1_end, table + aux_codepoint_data_size, aux_codepoint_default_value); CUDF_CUDA_TRY(cudaMemcpyAsync(table, aux_codepoint_data, aux_section1_end * sizeof(aux_codepoint_data[0]), // 1st section cudaMemcpyHostToDevice, stream.value())); CUDF_CUDA_TRY(cudaMemcpyAsync( table + aux_section2_begin, aux_cp_data_44032_55203, (aux_section2_end - aux_section2_begin + 1) * sizeof(aux_codepoint_data[0]), // 2nd section cudaMemcpyHostToDevice, stream.value())); CUDF_CUDA_TRY(cudaMemcpyAsync( table + aux_section3_begin, aux_cp_data_70475_71099, (aux_section3_end - aux_section3_begin + 1) * sizeof(aux_codepoint_data[0]), // 3rd section cudaMemcpyHostToDevice, stream.value())); CUDF_CUDA_TRY(cudaMemcpyAsync( table + aux_section4_begin, aux_cp_data_119134_119232, (aux_section4_end - aux_section4_begin + 1) * sizeof(aux_codepoint_data[0]), // 4th section cudaMemcpyHostToDevice, stream.value())); return table_vector; } namespace { /** * @brief Convert string to uint32. * * This just wraps the std::stoi but provides a nice error message * in case the hash file format is incorrect. */ uint32_t str_to_uint32(std::string const& str, uint64_t line_no) { try { return std::stoi(str); // there is no std::stoui } catch (std::exception const& exc) { std::string message("Line "); message += std::to_string(line_no) + ": "; message += "cannot convert integer from '"; message += str; message += "': "; message += exc.what(); std::cerr << message << std::endl; throw; } } /** * @brief Convert string to uint64. * * This just wraps the std::stoul but provides a nice error message * in case the hash file format is incorrect. */ uint64_t str_to_uint64(std::string const& str, uint64_t line_no) { try { return std::stoul(str); } catch (std::exception const& exc) { std::string message("Line "); message += std::to_string(line_no) + ": "; message += "cannot convert integer from '"; message += str; message += "': "; message += exc.what(); std::cerr << message << std::endl; throw; } } } // namespace /** * @brief Loads a text file representing the hashed vocabulary into hashed_vocabulary struct. * * @code{.pseudo} * Format of the file (ASCII text file with numbers): * First 3 lines have the following values: * outer_hash_a * outer_hash_b * number-of-bins * The next number-of-bins lines has two values in each line separated by a space * coefficient offset * ... * Next line has the size (number of lines) of the table followed * by the table values -- one value per line. * The last three lines: * unknown_token_id * first_token_id * separator_token_id * @endcode * * @param filename_hashed_vocabulary Path to text file containing hashed vocabulary * @return object containing hash table elements for the wordpiece tokenizer */ std::unique_ptr<hashed_vocabulary> load_vocabulary_file( std::string const& filename_hashed_vocabulary, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { hashed_vocabulary result; std::ifstream hash_file(filename_hashed_vocabulary); CUDF_EXPECTS(hash_file.good(), "Could not open " + filename_hashed_vocabulary); uint64_t line_no = 1; std::string line; std::getline(hash_file, line); result.outer_hash_a = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.outer_hash_b = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.num_bins = str_to_uint32(line, line_no++); std::vector<uint64_t> bin_coefficients(result.num_bins); std::vector<uint16_t> bin_offsets(result.num_bins); for (int i = 0; i < result.num_bins; ++i) { std::getline(hash_file, line); size_t loc_of_space = line.find(" "); CUDF_EXPECTS(loc_of_space != line.npos, "invalid hash file format"); std::string first_num = line.substr(0, loc_of_space); std::string second_num = line.substr(loc_of_space + 1, line.length()); bin_coefficients[i] = str_to_uint64(first_num, line_no); bin_offsets[i] = str_to_uint32(second_num, line_no); ++line_no; } std::getline(hash_file, line); uint64_t hash_table_length = str_to_uint64(line, line_no++); std::vector<uint64_t> table(hash_table_length); std::generate(table.begin(), table.end(), [&hash_file, &line_no]() { std::string line; std::getline(hash_file, line); return str_to_uint64(line, line_no++); }); std::getline(hash_file, line); result.unknown_token_id = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.first_token_id = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.separator_token_id = str_to_uint32(line, line_no++); // Transfer hash table to columns result.table = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT64}, table.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDF_CUDA_TRY(cudaMemcpyAsync(result.table->mutable_view().data<uint64_t>(), table.data(), table.size() * sizeof(uint64_t), cudaMemcpyHostToDevice, stream.value())); result.bin_coefficients = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT64}, bin_coefficients.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDF_CUDA_TRY(cudaMemcpyAsync(result.bin_coefficients->mutable_view().data<uint64_t>(), bin_coefficients.data(), bin_coefficients.size() * sizeof(uint64_t), cudaMemcpyHostToDevice, stream.value())); result.bin_offsets = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT16}, bin_offsets.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDF_CUDA_TRY(cudaMemcpyAsync(result.bin_offsets->mutable_view().data<uint16_t>(), bin_offsets.data(), bin_offsets.size() * sizeof(uint16_t), cudaMemcpyHostToDevice, stream.value())); auto cp_metadata = detail::get_codepoint_metadata(stream); auto const cp_metadata_size = static_cast<cudf::size_type>(cp_metadata.size()); result.cp_metadata = std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::UINT32}, cp_metadata_size, cp_metadata.release()); auto aux_cp_table = detail::get_aux_codepoint_data(stream); auto const aux_cp_table_size = static_cast<cudf::size_type>(aux_cp_table.size()); result.aux_cp_table = std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::UINT64}, aux_cp_table_size, aux_cp_table.release()); return std::make_unique<hashed_vocabulary>(std::move(result)); } } // namespace detail std::unique_ptr<hashed_vocabulary> load_vocabulary_file( std::string const& filename_hashed_vocabulary, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::load_vocabulary_file(filename_hashed_vocabulary, cudf::default_stream_value, mr); } } // namespace nvtext
9a2715bf47a3d3d946b83c571e559e13d684b309.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "cuda_util.h" #include "sampling.h" #include "grid-search.h" int TESTING_WITH_RANDOM_FORESTS = 0; int main() { printf("GPU Mem: %zu\n", get_global_memory_size_for_device(0)); printf("Samples Possible: %d\n", get_sample_size_for_device(0, 20, sizeof(float))); // Set the Random Seed to Time srand((unsigned int) time(NULL)); SamplingProperties props = make_properties( "test_data/xy.csv", // csv_file_path 11209389, // file_size 3000000, // line_buffer_size 1000, // random_chunk_size 21, // elements_per_line sizeof(float), // element_size_bytes 4 // cuda_device_count ); float** cuda_samples = load_devices(&props); // if(TESTING_WITH_RANDOM_FORESTS == 1) { // // run GridSearch on RandomForests // int* results = grid_search(cuda_samples, &props); // // printf("Optimal parameters chosen: [n, m, f]\n"); // printf("n: number of estimators (trees), m = minimum size, d = maximum depth\n"); // for(int i=0; i < sizeof(results) / sizeof(int); i++){ // printf("%d ", results[i]); // } // } // // free memory // for (int i = 0; i < props.cuda_device_count; i++) hipFree((void *) cuda_samples[i]); free(cuda_samples); return 0; }
9a2715bf47a3d3d946b83c571e559e13d684b309.cu
#include <stdio.h> #include "cuda_util.h" #include "sampling.h" #include "grid-search.h" int TESTING_WITH_RANDOM_FORESTS = 0; int main() { printf("GPU Mem: %zu\n", get_global_memory_size_for_device(0)); printf("Samples Possible: %d\n", get_sample_size_for_device(0, 20, sizeof(float))); // Set the Random Seed to Time srand((unsigned int) time(NULL)); SamplingProperties props = make_properties( "test_data/xy.csv", // csv_file_path 11209389, // file_size 3000000, // line_buffer_size 1000, // random_chunk_size 21, // elements_per_line sizeof(float), // element_size_bytes 4 // cuda_device_count ); float** cuda_samples = load_devices(&props); // if(TESTING_WITH_RANDOM_FORESTS == 1) { // // run GridSearch on RandomForests // int* results = grid_search(cuda_samples, &props); // // printf("Optimal parameters chosen: [n, m, f]\n"); // printf("n: number of estimators (trees), m = minimum size, d = maximum depth\n"); // for(int i=0; i < sizeof(results) / sizeof(int); i++){ // printf("%d ", results[i]); // } // } // // free memory // for (int i = 0; i < props.cuda_device_count; i++) cudaFree((void *) cuda_samples[i]); free(cuda_samples); return 0; }
644b691ebfd21dd312a15534a063bb7f71842793.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zmergecg.cu normal z -> s, Fri Jul 18 17:34:28 2014 @author Hartwig Anzt */ #include "common_magma.h" #include "../include/magmasparse.h" #define BLOCK_SIZE 512 #define PRECISION_s // These routines merge multiple kernels from smergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_scgreduce_kernel_spmv1( int Gs, int n, float *vtmp, float *vtmp2 ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_S_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_scgmerge_spmvcsr_kernel( int n, float *d_val, magma_index_t *d_rowptr, magma_index_t *d_colind, float *d, float *z, float *vtmp ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0); if( i<n ){ float dot = MAGMA_S_ZERO; int start = d_rowptr[ i ]; int end = d_rowptr[ i+1 ]; for( j=start; j<end; j++) dot += d_val[ j ] * d[ d_colind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_scgmerge_spmvellpackt_kernel( int n, int num_cols_per_row, float *d_val, magma_index_t *d_colind, float *d, float *z, float *vtmp ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0); if(i < n ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = d_colind [ n * k + i ]; float val = d_val [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_scgmerge_spmvellpack_kernel( int n, int num_cols_per_row, float *d_val, magma_index_t *d_colind, float *d, float *z, float *vtmp ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0); if(i < n ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = d_colind [ num_cols_per_row * i + k ]; float val = d_val [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_scgmerge_spmvellpackrt_kernel_8( int n, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, float *d, float *z, float *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < n ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_scgmerge_spmvellpackrt_kernel_16( int n, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, float *d, float *z, float *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < n ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_scgmerge_spmvellpackrt_kernel_32( int n, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, float *d, float *z, float *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < n ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_scgmerge_spmvellpackrt_kernel2( int n, float *z, float *d, float *vtmp2 ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_S_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_scgmerge_spmvsellc_kernel( int num_rows, int blocksize, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float *d, float *z, float *vtmp){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0); if(i < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++){ int col = d_colind [offset+ blocksize * n + Idx ]; float val = d_val[offset+ blocksize * n + Idx]; if( val != 0){ dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_scgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float *d, float *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_scgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float *d, float *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_scgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float *d, float *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_scg_rhokernel( float *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ float tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param A magma_s_sparse_matrix input matrix @param d1 float* temporary vector @param d2 float* temporary vector @param d_d float* input vector d @param d_z float* input vector z @param skp float* array for parameters ( skp[3]=rho ) @ingroup magmasparse_ssygpuk ********************************************************************/ extern "C" magma_int_t magma_scgmerge_spmv1( magma_s_sparse_matrix A, float *d1, float *d2, float *d_d, float *d_z, float *skp ){ int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (A.num_rows+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = local_block_size * sizeof( float ); float *aux1 = d1, *aux2 = d2; int b = 1; if( A.storage_type == Magma_CSR ) hipLaunchKernelGGL(( magma_scgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, A.val, A.row, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_ELLPACK ) hipLaunchKernelGGL(( magma_scgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_ELL ) hipLaunchKernelGGL(( magma_scgmerge_spmvellpackt_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_SELLC || A.storage_type == Magma_SELLP ){ if( A.blocksize==256){ hipLaunchKernelGGL(( magma_scgmerge_spmvsellc_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, A.blocksize, A. val, A.col, A.row, d_d, d_z, d1 ); } else printf("error: SELLC only for blocksize 256.\n"); } else if( A.storage_type == Magma_SELLP ){ int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = sqrt(A.numblocks); int dimgrid2 = (A.numblocks + dimgrid1 -1 ) / dimgrid1; dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( float ); if( A.alignment == 8) hipLaunchKernelGGL(( magma_scgmerge_spmvsellpt_kernel_8) , dim3(gridsellp), dim3(block), Mssellp, magma_stream , A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else if( A.alignment == 16) hipLaunchKernelGGL(( magma_scgmerge_spmvsellpt_kernel_16) , dim3(gridsellp), dim3(block), Mssellp, magma_stream , A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else if( A.alignment == 32) hipLaunchKernelGGL(( magma_scgmerge_spmvsellpt_kernel_32) , dim3(gridsellp), dim3(block), Mssellp, magma_stream , A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, d_z, d_d, d1 ); } else if( A.storage_type == Magma_ELLRT ){ // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize); int num_threads = A.alignment*A.blocksize; int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment) *A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = sqrt(num_blocks); int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1; dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( float ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if( A.alignment == 32 ){ hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel_32) , dim3(gridellrt), dim3(num_threads) , Mellrt, magma_stream , A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else if( A.alignment == 16 ){ hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel_16) , dim3(gridellrt), dim3(num_threads) , Mellrt, magma_stream , A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else if( A.alignment == 8 ){ hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel_8) , dim3(gridellrt), dim3(num_threads) , Mellrt, magma_stream , A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else{ printf("error: alignment %d not supported.\n", A.alignment); exit(-1); } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_scgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, magma_stream , A.num_rows, d_z, d_d, d1 ); } while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_scgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_scopyvector( 1, aux1, 1, skp+4, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_scg_rhokernel), dim3(Gs2), dim3(Bs2), 0, 0, skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_scgmerge_xrbeta_kernel( int n, float *x, float *r, float *d, float *z, float *skp, float *vtmp ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; float rho = skp[3]; float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0); if( i<n ){ x[i] += rho * d[i] ; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_scg_alphabetakernel( float *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ float tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_S_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_scg_d_kernel( int n, float *skp, float *r, float *d ){ int i = blockIdx.x * blockDim.x + threadIdx.x; float alpha = skp[0]; if( i<n ){ d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param n int dimension n @param d1 float* temporary vector @param d2 float* temporary vector @param d_x float* input vector x @param d_r float* input/output vector r @param d_d float* input vector d @param d_z float* input vector z @param skp float* array for parameters @ingroup magmasparse_ssygpuk ********************************************************************/ extern "C" magma_int_t magma_scgmerge_xrbeta( int n, float *d1, float *d2, float *d_x, float *d_r, float *d_d, float *d_z, float *skp ){ int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (n+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( float ); float *aux1 = d1, *aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_scgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, 0, n, d_x, d_r, d_d, d_z, skp, d1); while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_scgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_scopyvector( 1, aux1, 1, skp+1, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_scg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp ); dim3 Bs3( local_block_size ); dim3 Gs3( (n+local_block_size-1)/local_block_size ); hipLaunchKernelGGL(( magma_scg_d_kernel), dim3(Gs3), dim3(Bs3), 0, 0, n, skp, d_r, d_d ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
644b691ebfd21dd312a15534a063bb7f71842793.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zmergecg.cu normal z -> s, Fri Jul 18 17:34:28 2014 @author Hartwig Anzt */ #include "common_magma.h" #include "../include/magmasparse.h" #define BLOCK_SIZE 512 #define PRECISION_s // These routines merge multiple kernels from smergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_scgreduce_kernel_spmv1( int Gs, int n, float *vtmp, float *vtmp2 ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_S_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_scgmerge_spmvcsr_kernel( int n, float *d_val, magma_index_t *d_rowptr, magma_index_t *d_colind, float *d, float *z, float *vtmp ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0); if( i<n ){ float dot = MAGMA_S_ZERO; int start = d_rowptr[ i ]; int end = d_rowptr[ i+1 ]; for( j=start; j<end; j++) dot += d_val[ j ] * d[ d_colind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_scgmerge_spmvellpackt_kernel( int n, int num_cols_per_row, float *d_val, magma_index_t *d_colind, float *d, float *z, float *vtmp ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0); if(i < n ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = d_colind [ n * k + i ]; float val = d_val [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_scgmerge_spmvellpack_kernel( int n, int num_cols_per_row, float *d_val, magma_index_t *d_colind, float *d, float *z, float *vtmp ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0); if(i < n ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = d_colind [ num_cols_per_row * i + k ]; float val = d_val [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_scgmerge_spmvellpackrt_kernel_8( int n, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, float *d, float *z, float *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < n ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_scgmerge_spmvellpackrt_kernel_16( int n, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, float *d, float *z, float *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < n ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_scgmerge_spmvellpackrt_kernel_32( int n, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, float *d, float *z, float *vtmp, magma_int_t T, magma_int_t alignment ){ int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < n ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_scgmerge_spmvellpackrt_kernel2( int n, float *z, float *d, float *vtmp2 ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_S_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_scgmerge_spmvsellc_kernel( int num_rows, int blocksize, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float *d, float *z, float *vtmp){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0); if(i < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++){ int col = d_colind [offset+ blocksize * n + Idx ]; float val = d_val[offset+ blocksize * n + Idx]; if( val != 0){ dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_scgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float *d, float *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_scgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float *d, float *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_scgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float *d, float *z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_scg_rhokernel( float *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ float tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param A magma_s_sparse_matrix input matrix @param d1 float* temporary vector @param d2 float* temporary vector @param d_d float* input vector d @param d_z float* input vector z @param skp float* array for parameters ( skp[3]=rho ) @ingroup magmasparse_ssygpuk ********************************************************************/ extern "C" magma_int_t magma_scgmerge_spmv1( magma_s_sparse_matrix A, float *d1, float *d2, float *d_d, float *d_z, float *skp ){ int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (A.num_rows+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = local_block_size * sizeof( float ); float *aux1 = d1, *aux2 = d2; int b = 1; if( A.storage_type == Magma_CSR ) magma_scgmerge_spmvcsr_kernel<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, A.val, A.row, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_ELLPACK ) magma_scgmerge_spmvellpack_kernel<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_ELL ) magma_scgmerge_spmvellpackt_kernel<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 ); else if( A.storage_type == Magma_SELLC || A.storage_type == Magma_SELLP ){ if( A.blocksize==256){ magma_scgmerge_spmvsellc_kernel<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, A.blocksize, A. val, A.col, A.row, d_d, d_z, d1 ); } else printf("error: SELLC only for blocksize 256.\n"); } else if( A.storage_type == Magma_SELLP ){ int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = sqrt(A.numblocks); int dimgrid2 = (A.numblocks + dimgrid1 -1 ) / dimgrid1; dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( float ); if( A.alignment == 8) magma_scgmerge_spmvsellpt_kernel_8 <<< gridsellp, block, Mssellp, magma_stream >>> ( A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else if( A.alignment == 16) magma_scgmerge_spmvsellpt_kernel_16 <<< gridsellp, block, Mssellp, magma_stream >>> ( A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else if( A.alignment == 32) magma_scgmerge_spmvsellpt_kernel_32 <<< gridsellp, block, Mssellp, magma_stream >>> ( A.num_rows, A.blocksize, A.alignment, A.val, A.col, A.row, d_d, d_z); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_scgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, d_z, d_d, d1 ); } else if( A.storage_type == Magma_ELLRT ){ // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize); int num_threads = A.alignment*A.blocksize; int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment) *A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = sqrt(num_blocks); int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1; dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( float ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if( A.alignment == 32 ){ magma_scgmerge_spmvellpackrt_kernel_32 <<< gridellrt, num_threads , Mellrt, magma_stream >>> ( A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else if( A.alignment == 16 ){ magma_scgmerge_spmvellpackrt_kernel_16 <<< gridellrt, num_threads , Mellrt, magma_stream >>> ( A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else if( A.alignment == 8 ){ magma_scgmerge_spmvellpackrt_kernel_8 <<< gridellrt, num_threads , Mellrt, magma_stream >>> ( A.num_rows, A.val, A.col, A.row, d_d, d_z, d1, A.alignment, real_row_length ); } else{ printf("error: alignment %d not supported.\n", A.alignment); exit(-1); } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_scgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, magma_stream >>> ( A.num_rows, d_z, d_d, d1 ); } while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; magma_scgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_scopyvector( 1, aux1, 1, skp+4, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_scg_rhokernel<<<Gs2, Bs2, 0>>>( skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_scgmerge_xrbeta_kernel( int n, float *x, float *r, float *d, float *z, float *skp, float *vtmp ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; float rho = skp[3]; float mrho = MAGMA_S_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_S_MAKE( 0.0, 0.0); if( i<n ){ x[i] += rho * d[i] ; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_scg_alphabetakernel( float *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ float tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_S_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_scg_d_kernel( int n, float *skp, float *r, float *d ){ int i = blockIdx.x * blockDim.x + threadIdx.x; float alpha = skp[0]; if( i<n ){ d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param n int dimension n @param d1 float* temporary vector @param d2 float* temporary vector @param d_x float* input vector x @param d_r float* input/output vector r @param d_d float* input vector d @param d_z float* input vector z @param skp float* array for parameters @ingroup magmasparse_ssygpuk ********************************************************************/ extern "C" magma_int_t magma_scgmerge_xrbeta( int n, float *d1, float *d2, float *d_x, float *d_r, float *d_d, float *d_z, float *skp ){ int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (n+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( float ); float *aux1 = d1, *aux2 = d2; int b = 1; magma_scgmerge_xrbeta_kernel<<<Gs, Bs, Ms>>> ( n, d_x, d_r, d_d, d_z, skp, d1); while( Gs.x > 1 ){ Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if( Gs_next.x == 1 ) Gs_next.x = 2; magma_scgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if( b ){ aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_scopyvector( 1, aux1, 1, skp+1, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_scg_alphabetakernel<<<Gs2, Bs2, 0>>>( skp ); dim3 Bs3( local_block_size ); dim3 Gs3( (n+local_block_size-1)/local_block_size ); magma_scg_d_kernel<<<Gs3, Bs3, 0>>>( n, skp, d_r, d_d ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
0f7ca080034d188a48bd71e0eaf1821a2e040000.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> typedef unsigned int uint; uint max_threads = 1024; uint max_blocks = 65535; __global__ void bitonicSortStep(float *cudaArr, uint i, uint j) { uint tid = threadIdx.x + blockDim.x * blockIdx.x; uint mate = tid ^ j; if (tid < mate) { if((tid & i) == 0) { if(cudaArr[tid] > cudaArr[mate]) { float temp = cudaArr[tid]; cudaArr[tid] = cudaArr[mate]; cudaArr[mate] = temp; } } else { if(cudaArr[tid] < cudaArr[mate]) { float temp = cudaArr[tid]; cudaArr[tid] = cudaArr[mate]; cudaArr[mate] = temp; } } } } //len 2 extern "C" void bitonicSort(float *cudaArr, uint len) { uint threads = max_threads; uint blocks = len / threads; if(len % threads != 0) blocks++; if(blocks > max_blocks) throw 1; for(uint i = 2; i <= len; i <<= 1) { for(uint j = i>>1; j > 0; j >>= 1) { hipLaunchKernelGGL(( bitonicSortStep), dim3(blocks), dim3(threads), 0, 0, cudaArr, i, j); hipDeviceSynchronize(); } } }
0f7ca080034d188a48bd71e0eaf1821a2e040000.cu
#include <cuda_runtime.h> typedef unsigned int uint; uint max_threads = 1024; uint max_blocks = 65535; __global__ void bitonicSortStep(float *cudaArr, uint i, uint j) { uint tid = threadIdx.x + blockDim.x * blockIdx.x; uint mate = tid ^ j; if (tid < mate) { if((tid & i) == 0) { if(cudaArr[tid] > cudaArr[mate]) { float temp = cudaArr[tid]; cudaArr[tid] = cudaArr[mate]; cudaArr[mate] = temp; } } else { if(cudaArr[tid] < cudaArr[mate]) { float temp = cudaArr[tid]; cudaArr[tid] = cudaArr[mate]; cudaArr[mate] = temp; } } } } //len должно быть степенью 2 extern "C" void bitonicSort(float *cudaArr, uint len) { uint threads = max_threads; uint blocks = len / threads; if(len % threads != 0) blocks++; if(blocks > max_blocks) throw 1; for(uint i = 2; i <= len; i <<= 1) { for(uint j = i>>1; j > 0; j >>= 1) { bitonicSortStep<<<blocks, threads>>>(cudaArr, i, j); cudaThreadSynchronize(); } } }
b6eccb4a05f4a4761b907375ffd14c1ead5868f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" #include <stdio.h> #include <iostream> #define DEBUG (1) #define BLOCKDIMX (16) #define BLOCKDIMY (16) __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int w_i = blockIdx.x * blockDim.x + threadIdx.x; int h_i = blockIdx.y * blockDim.y + threadIdx.y; int input_offset = h_i * numCols + w_i; if ( w_i >= numCols || h_i >= numRows) { return; } // first threads in every block copy the filter into shared memory // dynamic alloc shared memory // <<<Grid, Block, SharedMemSize>>> extern __shared__ float sfilter[]; if (DEBUG && w_i==0 && h_i==0) { printf("filterWidth %d \n", filterWidth); } // use few threads to initialize the shared memory if ( threadIdx.x < filterWidth && threadIdx.y < filterWidth){ int lin_index = threadIdx.x * filterWidth + threadIdx.y; sfilter[lin_index] = filter[lin_index]; } __syncthreads(); // if ( DEBUG && w_i == 0 && h_i ==0) // { // for( int row_delta=0; row_delta<filterWidth; row_delta++){ // for(int col_delta=0; col_delta<filterWidth; col_delta++){ // int offset = row_delta * filterWidth + col_delta; // float value = sfilter[offset]; // printf("%f ", value); // } // printf("\n"); // } // } // The following loop walks over all coefficients in the filter // if filter_width=3, row:-1,0,1, col:-1,0,1 float result = 0.0f; for( int row_delta=-filterWidth/2; row_delta<=filterWidth/2; row_delta++){ for(int col_delta=-filterWidth/2; col_delta<=filterWidth/2; col_delta++){ // Compute the coordinates of the value this coefficient applies to // Apply clamping to image boundaries int value_row = min(max(h_i + row_delta, 0), numRows - 1); int value_col = min(max(w_i + col_delta, 0), numCols - 1); // Compute the partial sum this value adds to the result when scaled by the // appropriate coefficient. int img_offset = value_row * numCols + value_col; int filter_offset = (row_delta + filterWidth / 2) * filterWidth + (col_delta + filterWidth / 2); float channel_value = static_cast<float>(inputChannel[img_offset]); float filter_coefficient = sfilter[filter_offset]; result = result + channel_value * filter_coefficient; } } outputChannel[input_offset] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // Compute the thread's row and col int col = blockIdx.x * blockDim.x + threadIdx.x; // w int row = blockIdx.y * blockDim.y + threadIdx.y; // h int offset = row * numCols + col; if ( col >= numCols || row >= numRows){ return; } uchar4 rgba_pixel = inputImageRGBA[offset]; redChannel[offset] = rgba_pixel.x; greenChannel[offset] = rgba_pixel.y; blueChannel[offset] = rgba_pixel.z; if ( DEBUG && col == 0 && row ==0) { printf("At offset %d, Red %u, Green %u, Blue %u", offset, redChannel[offset], greenChannel[offset], blueChannel[offset]); } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc size_t filtersize = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(hipMalloc(&d_filter, filtersize)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, filtersize, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(BLOCKDIMX, BLOCKDIMY, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(1 + (numCols / blockSize.x), 1 + (numRows / blockSize.y), 1); if DEBUG { size_t num_pixels = numCols * numRows; printf("Image Size, Width: %d, Height %d, Pixels %d\n", numCols, numRows, num_pixels); printf("Block Size, x:%d, y:%d, z:%d\n", blockSize.x, blockSize.y, blockSize.z); printf("Gird Size, x:%d, y:%d, z:%d\n", gridSize.x, gridSize.y, gridSize.z); } //TODO: Launch a kernel for separating the RGBA image into different color channels GpuTimer timer; timer.Start(); hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. timer.Stop(); std::cout<< "Seperate Channel elapsed: " << timer.Elapsed() << "ms\n"; timer.Start(); int shared_size = filterWidth * filterWidth * sizeof(*d_filter); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), shared_size, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), shared_size, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), shared_size, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); timer.Stop(); std::cout<< "Gaussion blur x3 elapsed: " << timer.Elapsed() << "ms\n"; timer.Start(); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); timer.Stop(); std::cout<< "Combine Channel elapsed: " << timer.Elapsed() << "ms\n"; } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
b6eccb4a05f4a4761b907375ffd14c1ead5868f0.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" #include <stdio.h> #include <iostream> #define DEBUG (1) #define BLOCKDIMX (16) #define BLOCKDIMY (16) __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int w_i = blockIdx.x * blockDim.x + threadIdx.x; int h_i = blockIdx.y * blockDim.y + threadIdx.y; int input_offset = h_i * numCols + w_i; if ( w_i >= numCols || h_i >= numRows) { return; } // first threads in every block copy the filter into shared memory // dynamic alloc shared memory // <<<Grid, Block, SharedMemSize>>> extern __shared__ float sfilter[]; if (DEBUG && w_i==0 && h_i==0) { printf("filterWidth %d \n", filterWidth); } // use few threads to initialize the shared memory if ( threadIdx.x < filterWidth && threadIdx.y < filterWidth){ int lin_index = threadIdx.x * filterWidth + threadIdx.y; sfilter[lin_index] = filter[lin_index]; } __syncthreads(); // if ( DEBUG && w_i == 0 && h_i ==0) // { // for( int row_delta=0; row_delta<filterWidth; row_delta++){ // for(int col_delta=0; col_delta<filterWidth; col_delta++){ // int offset = row_delta * filterWidth + col_delta; // float value = sfilter[offset]; // printf("%f ", value); // } // printf("\n"); // } // } // The following loop walks over all coefficients in the filter // if filter_width=3, row:-1,0,1, col:-1,0,1 float result = 0.0f; for( int row_delta=-filterWidth/2; row_delta<=filterWidth/2; row_delta++){ for(int col_delta=-filterWidth/2; col_delta<=filterWidth/2; col_delta++){ // Compute the coordinates of the value this coefficient applies to // Apply clamping to image boundaries int value_row = min(max(h_i + row_delta, 0), numRows - 1); int value_col = min(max(w_i + col_delta, 0), numCols - 1); // Compute the partial sum this value adds to the result when scaled by the // appropriate coefficient. int img_offset = value_row * numCols + value_col; int filter_offset = (row_delta + filterWidth / 2) * filterWidth + (col_delta + filterWidth / 2); float channel_value = static_cast<float>(inputChannel[img_offset]); float filter_coefficient = sfilter[filter_offset]; result = result + channel_value * filter_coefficient; } } outputChannel[input_offset] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // Compute the thread's row and col int col = blockIdx.x * blockDim.x + threadIdx.x; // w int row = blockIdx.y * blockDim.y + threadIdx.y; // h int offset = row * numCols + col; if ( col >= numCols || row >= numRows){ return; } uchar4 rgba_pixel = inputImageRGBA[offset]; redChannel[offset] = rgba_pixel.x; greenChannel[offset] = rgba_pixel.y; blueChannel[offset] = rgba_pixel.z; if ( DEBUG && col == 0 && row ==0) { printf("At offset %d, Red %u, Green %u, Blue %u", offset, redChannel[offset], greenChannel[offset], blueChannel[offset]); } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc size_t filtersize = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(cudaMalloc(&d_filter, filtersize)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, filtersize, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(BLOCKDIMX, BLOCKDIMY, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(1 + (numCols / blockSize.x), 1 + (numRows / blockSize.y), 1); if DEBUG { size_t num_pixels = numCols * numRows; printf("Image Size, Width: %d, Height %d, Pixels %d\n", numCols, numRows, num_pixels); printf("Block Size, x:%d, y:%d, z:%d\n", blockSize.x, blockSize.y, blockSize.z); printf("Gird Size, x:%d, y:%d, z:%d\n", gridSize.x, gridSize.y, gridSize.z); } //TODO: Launch a kernel for separating the RGBA image into different color channels GpuTimer timer; timer.Start(); separateChannels<<<gridSize, blockSize>>>( d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. timer.Stop(); std::cout<< "Seperate Channel elapsed: " << timer.Elapsed() << "ms\n"; timer.Start(); int shared_size = filterWidth * filterWidth * sizeof(*d_filter); gaussian_blur<<<gridSize, blockSize, shared_size>>>( d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize, shared_size>>>( d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize, shared_size>>>( d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); timer.Stop(); std::cout<< "Gaussion blur x3 elapsed: " << timer.Elapsed() << "ms\n"; timer.Start(); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); timer.Stop(); std::cout<< "Combine Channel elapsed: " << timer.Elapsed() << "ms\n"; } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
6f3a16013b7e3e4681ffe1e13d1dfde600c0f7cc.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <face_quda.h> #include <inline_ptx.h> namespace quda { namespace domainwall { #undef GPU_STAGGERED_DIRAC #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_DOMAIN_WALL_DIRAC #include <dw_dslash_def.h> // Domain Wall kernels #endif #ifndef DSLASH_SHARED_FLOATS_PER_THREAD #define DSLASH_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // declare the dslash events #include <dslash_events.cuh> using namespace domainwall; #ifdef GPU_DOMAIN_WALL_DIRAC template <typename sFloat, typename gFloat> class DomainWallDslashCuda : public DslashCuda { private: const gFloat *gauge0, *gauge1; const double mferm; const double a; bool checkGrid(TuneParam &param) const { if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) { warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large", param.block.x, param.block.y, param.block.z, param.grid.x, param.grid.y, param.grid.z); return false; } else { return true; } } protected: bool advanceBlockDim(TuneParam &param) const { const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock; const int step[2] = { deviceProp.warpSize, 1 }; bool advance[2] = { false, false }; // first try to advance block.x param.block.x += step[0]; if (param.block.x > deviceProp.maxThreadsDim[0] || sharedBytesPerThread()*param.block.x*param.block.y > max_shared) { advance[0] = false; param.block.x = step[0]; // reset block.x } else { advance[0] = true; // successfully advanced block.x } if (!advance[0]) { // if failed to advance block.x, now try block.y param.block.y += step[1]; if (param.block.y > in->X(4) || sharedBytesPerThread()*param.block.x*param.block.y > max_shared) { advance[1] = false; param.block.y = step[1]; // reset block.x } else { advance[1] = true; // successfully advanced block.y } } if (advance[0] || advance[1]) { param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool advance = true; if (!checkGrid(param)) advance = advanceBlockDim(param); return advance; } else { return false; } } unsigned int sharedBytesPerThread() const { return 0; } public: DomainWallDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1, const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const double mferm, const double a, const int dagger) : DslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), mferm(mferm), a(a) { bindSpinorTex<sFloat>(in, out, x); } virtual ~DomainWallDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); } virtual void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool ok = true; if (!checkGrid(param)) ok = advanceBlockDim(param); if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim"); } /** sets default values for when tuning is disabled */ virtual void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool ok = true; if (!checkGrid(param)) ok = advanceBlockDim(param); if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim"); } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); DSLASH(domainWallDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a); } long long flops() const { long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: int Ls = in->X(4); long long bulk = (Ls-2)*(in->VolumeCB()/Ls); long long wall = 2*(in->VolumeCB()/Ls); flops += 96ll*bulk + 120ll*wall; break; } return flops; } virtual long long bytes() const { bool isHalf = in->Precision() == sizeof(short) ? true : false; int spinor_bytes = 2 * in->Ncolor() * in->Nspin() * in->Precision() + (isHalf ? sizeof(float) : 0); long long bytes = DslashCuda::bytes(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: bytes += 2 * spinor_bytes * in->VolumeCB(); break; } return bytes; } }; #endif // GPU_DOMAIN_WALL_DIRAC #include <dslash_policy.cuh> void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const double &m_f, const double &k2, const int *commOverride, TimeProfile &profile, const QudaDslashPolicy &dslashPolicy) { inSpinor = (cudaColorSpinorField*)in; // EVIL dslashParam.parity = parity; #ifdef GPU_DOMAIN_WALL_DIRAC //currently splitting in space-time is impelemented: int dirs = 4; int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code for(int i = 0;i < dirs; i++){ dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride()); dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride(); dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0 } void *gauge0, *gauge1; bindGaugeTex(gauge, parity, &gauge0, &gauge1); if (in->Precision() != gauge.Precision()) errorQuda("Mixing gauge and spinor precision not supported"); DslashCuda *dslash = 0; size_t regSize = sizeof(float); if (in->Precision() == QUDA_DOUBLE_PRECISION) { dslash = new DomainWallDslashCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger); regSize = sizeof(double); } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new DomainWallDslashCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new DomainWallDslashCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger); } // the parameters passed to dslashCuda must be 4-d volume and 3-d // faces because Ls is added as the y-dimension in thread space int ghostFace[QUDA_MAX_DIM]; for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4); #ifndef GPU_COMMS DslashPolicyImp* dslashImp = DslashFactory::create(dslashPolicy); #else DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH); #endif (*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume()/in->X(4), ghostFace, profile); delete dslashImp; delete dslash; unbindGaugeTex(gauge); checkCudaError(); #else errorQuda("Domain wall dslash has not been built"); #endif } }
6f3a16013b7e3e4681ffe1e13d1dfde600c0f7cc.cu
#include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <face_quda.h> #include <inline_ptx.h> namespace quda { namespace domainwall { #undef GPU_STAGGERED_DIRAC #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_DOMAIN_WALL_DIRAC #include <dw_dslash_def.h> // Domain Wall kernels #endif #ifndef DSLASH_SHARED_FLOATS_PER_THREAD #define DSLASH_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // declare the dslash events #include <dslash_events.cuh> using namespace domainwall; #ifdef GPU_DOMAIN_WALL_DIRAC template <typename sFloat, typename gFloat> class DomainWallDslashCuda : public DslashCuda { private: const gFloat *gauge0, *gauge1; const double mferm; const double a; bool checkGrid(TuneParam &param) const { if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) { warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large", param.block.x, param.block.y, param.block.z, param.grid.x, param.grid.y, param.grid.z); return false; } else { return true; } } protected: bool advanceBlockDim(TuneParam &param) const { const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock; const int step[2] = { deviceProp.warpSize, 1 }; bool advance[2] = { false, false }; // first try to advance block.x param.block.x += step[0]; if (param.block.x > deviceProp.maxThreadsDim[0] || sharedBytesPerThread()*param.block.x*param.block.y > max_shared) { advance[0] = false; param.block.x = step[0]; // reset block.x } else { advance[0] = true; // successfully advanced block.x } if (!advance[0]) { // if failed to advance block.x, now try block.y param.block.y += step[1]; if (param.block.y > in->X(4) || sharedBytesPerThread()*param.block.x*param.block.y > max_shared) { advance[1] = false; param.block.y = step[1]; // reset block.x } else { advance[1] = true; // successfully advanced block.y } } if (advance[0] || advance[1]) { param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool advance = true; if (!checkGrid(param)) advance = advanceBlockDim(param); return advance; } else { return false; } } unsigned int sharedBytesPerThread() const { return 0; } public: DomainWallDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1, const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const double mferm, const double a, const int dagger) : DslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), mferm(mferm), a(a) { bindSpinorTex<sFloat>(in, out, x); } virtual ~DomainWallDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); } virtual void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool ok = true; if (!checkGrid(param)) ok = advanceBlockDim(param); if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim"); } /** sets default values for when tuning is disabled */ virtual void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool ok = true; if (!checkGrid(param)) ok = advanceBlockDim(param); if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim"); } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); DSLASH(domainWallDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a); } long long flops() const { long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: int Ls = in->X(4); long long bulk = (Ls-2)*(in->VolumeCB()/Ls); long long wall = 2*(in->VolumeCB()/Ls); flops += 96ll*bulk + 120ll*wall; break; } return flops; } virtual long long bytes() const { bool isHalf = in->Precision() == sizeof(short) ? true : false; int spinor_bytes = 2 * in->Ncolor() * in->Nspin() * in->Precision() + (isHalf ? sizeof(float) : 0); long long bytes = DslashCuda::bytes(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: bytes += 2 * spinor_bytes * in->VolumeCB(); break; } return bytes; } }; #endif // GPU_DOMAIN_WALL_DIRAC #include <dslash_policy.cuh> void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const double &m_f, const double &k2, const int *commOverride, TimeProfile &profile, const QudaDslashPolicy &dslashPolicy) { inSpinor = (cudaColorSpinorField*)in; // EVIL dslashParam.parity = parity; #ifdef GPU_DOMAIN_WALL_DIRAC //currently splitting in space-time is impelemented: int dirs = 4; int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code for(int i = 0;i < dirs; i++){ dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride()); dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride(); dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0 } void *gauge0, *gauge1; bindGaugeTex(gauge, parity, &gauge0, &gauge1); if (in->Precision() != gauge.Precision()) errorQuda("Mixing gauge and spinor precision not supported"); DslashCuda *dslash = 0; size_t regSize = sizeof(float); if (in->Precision() == QUDA_DOUBLE_PRECISION) { dslash = new DomainWallDslashCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger); regSize = sizeof(double); } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new DomainWallDslashCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new DomainWallDslashCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger); } // the parameters passed to dslashCuda must be 4-d volume and 3-d // faces because Ls is added as the y-dimension in thread space int ghostFace[QUDA_MAX_DIM]; for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4); #ifndef GPU_COMMS DslashPolicyImp* dslashImp = DslashFactory::create(dslashPolicy); #else DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH); #endif (*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume()/in->X(4), ghostFace, profile); delete dslashImp; delete dslash; unbindGaugeTex(gauge); checkCudaError(); #else errorQuda("Domain wall dslash has not been built"); #endif } }
15a2527ca269cb759d5f7e356a07235198519af6.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include "hip/hip_runtime.h" extern "C" { #include "fc_layer_kernels.h" } __global__ void fc_kernel(float *in, float *weights, float *out) { const int tx = threadIdx.x; float inputv = 0; for (int i = 0; i < 12; i += 1) for (int j = 0; j < 12; j += 1) for (int z = 0; z < 8; z += 1) { int in_idx = (z * 12 * 12) + (j * 12) + i; int weight_idx = (12 * 12 * 8 * tx) + in_idx; inputv += in[in_idx] * weights[weight_idx]; } float result = 1.0f / (1.0f + exp(-inputv)); // activator function out[tx] = result; __syncthreads(); } void activate_fc_gpu(float *in, float *weights, float *out) { float *device_in; float *device_weights; float *device_out; hipMalloc((void **) &device_in, 8 * 12 * 12 * sizeof(float)); hipMalloc((void **) &device_weights, 8 * 12 * 12 * 10 * sizeof(float)); hipMalloc((void **) &device_out, 10 * sizeof(float)); hipMemcpy(device_in, in, 8 * 12 * 12 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(device_weights, weights, 8 * 12 * 12 * 10 * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( fc_kernel), dim3(1), dim3(10), 0, 0, device_in, device_weights, device_out); hipDeviceSynchronize(); hipMemcpy(out, device_out, sizeof(float) * 10, hipMemcpyDeviceToHost); }
15a2527ca269cb759d5f7e356a07235198519af6.cu
#include "stdio.h" #include "cuda.h" extern "C" { #include "fc_layer_kernels.h" } __global__ void fc_kernel(float *in, float *weights, float *out) { const int tx = threadIdx.x; float inputv = 0; for (int i = 0; i < 12; i += 1) for (int j = 0; j < 12; j += 1) for (int z = 0; z < 8; z += 1) { int in_idx = (z * 12 * 12) + (j * 12) + i; int weight_idx = (12 * 12 * 8 * tx) + in_idx; inputv += in[in_idx] * weights[weight_idx]; } float result = 1.0f / (1.0f + exp(-inputv)); // activator function out[tx] = result; __syncthreads(); } void activate_fc_gpu(float *in, float *weights, float *out) { float *device_in; float *device_weights; float *device_out; cudaMalloc((void **) &device_in, 8 * 12 * 12 * sizeof(float)); cudaMalloc((void **) &device_weights, 8 * 12 * 12 * 10 * sizeof(float)); cudaMalloc((void **) &device_out, 10 * sizeof(float)); cudaMemcpy(device_in, in, 8 * 12 * 12 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(device_weights, weights, 8 * 12 * 12 * 10 * sizeof(float), cudaMemcpyHostToDevice); fc_kernel<<<1, 10>>>(device_in, device_weights, device_out); cudaDeviceSynchronize(); cudaMemcpy(out, device_out, sizeof(float) * 10, cudaMemcpyDeviceToHost); }
7cf6b1185ecc9dcefb7d39952650e154fc6f6cfe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> //#include <hip/hip_cooperative_groups.h> #include <math.h> #include <string.h> #include <fstream> #include <sstream> //#include <bits/stdc++.h> //#include <stdlib.h> //# <time.h> using namespace std; //using namespace cooperative_groups; /***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/ //#define N 128 #define C 3 #define H 227 #define W 227 #define R 11 #define S 11 #define M 96 #define E 55 #define F 55 #define U 4 __global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt, int height, int width) { int row = threadIdx.y; int col = threadIdx.x; for (int y=0;y<2;y++){ for (int x=0;x<2;x++){ float red_sum = 0; for(int i=0; i<num_ch; i++) { //if((2*row+y<height)&&(2*col+x<width)) red_sum += d_o[i*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] ; } if((2*row+y<height)&&(2*col+x<width)) d_r[blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] = red_sum; } }} __global__ void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch) {//float prod=0; int row = threadIdx.y; int col = threadIdx.x; __shared__ float s_w[R*S]; if(row*blockDim.x+col<R*S) { s_w[row*blockDim.x+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*blockDim.x+col)]; } __syncthreads(); for (int y=0; y<2; y++){ for (int x=0; x<2; x++){ float prod = 0; for (int i=0; i<wt_width; i++){ for (int j=0; j<wt_width; j++){ float ip =d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(2*row+y)+i)*ip_height+(stride*(2*col+x)+j)]; // float wt = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(i*wt_width+j)]; prod += ip*s_w[(i*wt_width+j)]; __syncthreads(); } } if((2*row+y<height)&&(2*col+x<width)) {if(prod >=0) d_o[blockIdx.z*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] = prod; } } } if(row*blockDim.x+col < R*S) { s_w[(row*blockDim.x+col)] = 0; } __syncthreads(); } void element_wise_mmul(float* output, float* input, float* weight, int batch_size) { int x,y,i,j,m,n,k; for(n=0; n<batch_size; n++){ for (m=0 ; m<M; m++){ for (x=0; x<F; x++){ for(y=0; y<E; y++){ // OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ for(k=0; k<C; k++){ float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)]; float wt = weight[m*C*R*S+k*R*S+i*S+j]; float prod = ip*wt; if(prod>=0) output[n*E*F*M+m*E*F+x*E+y] += prod; //OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; }} } } } } } } int main(int argc, char* argv[]) { int batch_size = atoi(argv[1]); /*************INITALIZING MATRICES*********************************/ float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float)); //float IP[H][W]; float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float)); //float OP[F][E]; float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float)); float *WT = (float*) malloc(M*C*R*S*sizeof(float)); //float WT[R][S]; float* d_o; float* d_i; float* d_w; float* d_r; //clock_t cpu_start, gpu_start, cpu_end, gpu_end; //int a,b,c,d; int c,d,m,n,k; /* WEIGHT MATRIX*/ for (m=0; m<M; m++){ for(k=0;k<C;k++){ for (c=0; c<R; c++){ for(d=0; d<S; d++){ //WT[c][d] = 2.0; //WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1; WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0); } } } } /*INITIALIZING OUTPUT MATRIX*/ for (n=0; n<batch_size;n++){ for (m=0; m<M; m++){ for (c=0; c<F; c++){ for(d=0; d<E; d++){ //OP[c][d] = 0; OP[n*M*F*E+m*F*E+c*E+d] = 0; } } } } /*INITIALIZING INPUT MATRIX*/ for (n=0; n<batch_size; n++){ for(k=0;k<C;k++){ for (c=0; c<H; c++){ for(d=0; d<W; d++){ // IP[c][d] = (a+b+c+d); //if ((c<=1) || (d<=1) || (c>=29) || (d>=29)) //IP[n*C*H*W+k*H*W+c*W+d] = 0; //else IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0); } } } } if(hipSuccess != hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float))) { printf("error in d_i malloc\n"); } hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice); if(hipSuccess != hipMalloc((void**) &d_w, M*C*R*S*sizeof(float))) { printf("error in d_w malloc\n"); } hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice); if(hipSuccess != hipMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float))) { printf("error in d_o malloc\n"); } if(hipSuccess != hipMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float))) { printf("error in d_r malloc\n"); } //cpu_start = clock(); //element_wise_mmul(OP, IP, WT, batch_size); //cpu_end = clock(); dim3 dimGrid(batch_size,96,3); dim3 dimBlock(28,28,1); dim3 dimGridRed(batch_size,96,1); dim3 dimBlockRed(28,28,1)hipLaunchKernelGGL((;ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,55,55,4,227,11,96,batch_size,3); hipDeviceSynchronize();hipLaunchKernelGGL(( red_ch), dim3(dimGridRed), dim3(dimBlockRed), 0, 0, d_r,d_o,3,batch_size,96,55,55); hipMemcpy(OPG,d_r,(long int)batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost); /**print outputs**/ //int e,f,g,h; int g,h,s,u,t; float max_error = 0; string filename = "layer_1_"+to_string(batch_size); ifstream fin(filename.c_str()); string line ; //for (t=0;t<C;t++){ for (u=0;u<batch_size;u++){ for (s=0;s<M;s++){ for (g=0; g<F; g++){ for(h=0; h<E; h++){ getline(fin,line); float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str())); //float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]); if(error > max_error) max_error = error; // printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h); } } } } fin.close(); printf("max error is %f\n", max_error); //} //cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; //cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; hipFree(d_o); hipFree(d_i); hipFree(d_w); hipFree(d_r); free(OPG); free(IP); free(WT); free(OP); return 0; }
7cf6b1185ecc9dcefb7d39952650e154fc6f6cfe.cu
#include <stdio.h> #include <iostream> //#include <cooperative_groups.h> #include <math.h> #include <string.h> #include <fstream> #include <sstream> //#include <bits/stdc++.h> //#include <stdlib.h> //# <time.h> using namespace std; //using namespace cooperative_groups; /***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/ //#define N 128 #define C 3 #define H 227 #define W 227 #define R 11 #define S 11 #define M 96 #define E 55 #define F 55 #define U 4 __global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt, int height, int width) { int row = threadIdx.y; int col = threadIdx.x; for (int y=0;y<2;y++){ for (int x=0;x<2;x++){ float red_sum = 0; for(int i=0; i<num_ch; i++) { //if((2*row+y<height)&&(2*col+x<width)) red_sum += d_o[i*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] ; } if((2*row+y<height)&&(2*col+x<width)) d_r[blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] = red_sum; } }} __global__ void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch) {//float prod=0; int row = threadIdx.y; int col = threadIdx.x; __shared__ float s_w[R*S]; if(row*blockDim.x+col<R*S) { s_w[row*blockDim.x+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*blockDim.x+col)]; } __syncthreads(); for (int y=0; y<2; y++){ for (int x=0; x<2; x++){ float prod = 0; for (int i=0; i<wt_width; i++){ for (int j=0; j<wt_width; j++){ float ip =d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(2*row+y)+i)*ip_height+(stride*(2*col+x)+j)]; // float wt = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(i*wt_width+j)]; prod += ip*s_w[(i*wt_width+j)]; __syncthreads(); } } if((2*row+y<height)&&(2*col+x<width)) {if(prod >=0) d_o[blockIdx.z*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] = prod; } } } if(row*blockDim.x+col < R*S) { s_w[(row*blockDim.x+col)] = 0; } __syncthreads(); } void element_wise_mmul(float* output, float* input, float* weight, int batch_size) { int x,y,i,j,m,n,k; for(n=0; n<batch_size; n++){ for (m=0 ; m<M; m++){ for (x=0; x<F; x++){ for(y=0; y<E; y++){ // OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ for(k=0; k<C; k++){ float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)]; float wt = weight[m*C*R*S+k*R*S+i*S+j]; float prod = ip*wt; if(prod>=0) output[n*E*F*M+m*E*F+x*E+y] += prod; //OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; }} } } } } } } int main(int argc, char* argv[]) { int batch_size = atoi(argv[1]); /*************INITALIZING MATRICES*********************************/ float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float)); //float IP[H][W]; float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float)); //float OP[F][E]; float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float)); float *WT = (float*) malloc(M*C*R*S*sizeof(float)); //float WT[R][S]; float* d_o; float* d_i; float* d_w; float* d_r; //clock_t cpu_start, gpu_start, cpu_end, gpu_end; //int a,b,c,d; int c,d,m,n,k; /* WEIGHT MATRIX*/ for (m=0; m<M; m++){ for(k=0;k<C;k++){ for (c=0; c<R; c++){ for(d=0; d<S; d++){ //WT[c][d] = 2.0; //WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1; WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0); } } } } /*INITIALIZING OUTPUT MATRIX*/ for (n=0; n<batch_size;n++){ for (m=0; m<M; m++){ for (c=0; c<F; c++){ for(d=0; d<E; d++){ //OP[c][d] = 0; OP[n*M*F*E+m*F*E+c*E+d] = 0; } } } } /*INITIALIZING INPUT MATRIX*/ for (n=0; n<batch_size; n++){ for(k=0;k<C;k++){ for (c=0; c<H; c++){ for(d=0; d<W; d++){ // IP[c][d] = (a+b+c+d); //if ((c<=1) || (d<=1) || (c>=29) || (d>=29)) //IP[n*C*H*W+k*H*W+c*W+d] = 0; //else IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0); } } } } if(cudaSuccess != cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float))) { printf("error in d_i malloc\n"); } cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice); if(cudaSuccess != cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float))) { printf("error in d_w malloc\n"); } cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice); if(cudaSuccess != cudaMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float))) { printf("error in d_o malloc\n"); } if(cudaSuccess != cudaMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float))) { printf("error in d_r malloc\n"); } //cpu_start = clock(); //element_wise_mmul(OP, IP, WT, batch_size); //cpu_end = clock(); dim3 dimGrid(batch_size,96,3); dim3 dimBlock(28,28,1); dim3 dimGridRed(batch_size,96,1); dim3 dimBlockRed(28,28,1);ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,55,55,4,227,11,96,batch_size,3); cudaDeviceSynchronize(); red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,3,batch_size,96,55,55); cudaMemcpy(OPG,d_r,(long int)batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost); /**print outputs**/ //int e,f,g,h; int g,h,s,u,t; float max_error = 0; string filename = "layer_1_"+to_string(batch_size); ifstream fin(filename.c_str()); string line ; //for (t=0;t<C;t++){ for (u=0;u<batch_size;u++){ for (s=0;s<M;s++){ for (g=0; g<F; g++){ for(h=0; h<E; h++){ getline(fin,line); float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str())); //float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]); if(error > max_error) max_error = error; // printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h); } } } } fin.close(); printf("max error is %f\n", max_error); //} //cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; //cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; cudaFree(d_o); cudaFree(d_i); cudaFree(d_w); cudaFree(d_r); free(OPG); free(IP); free(WT); free(OP); return 0; }
d563b847b4d25f3f952ac5c665af5f8cd7b3988d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <unistd.h> #include <sys/wait.h> #include <sys/time.h> /** 1.5[MB] div == 4, size = * 48000 2.0[MB] div == 8, size = * 32000 2.4[MB] div == 8, size = * 37000 **/ __global__ void __add(float* a,float* b,int size,int div){ int idx = blockDim.x * blockIdx.x + threadIdx.x; #pragma unroll for(int i = 0 ; i < div ; i ++){ a[idx + (size*i)/div] += b[idx + (size*i)/div]; } } static float elapsed(struct timeval tv0,struct timeval tv1){ return (float)(tv1.tv_sec - tv0.tv_sec) + (float)(tv1.tv_usec - tv0.tv_usec) * 0.000001f; } int main(){ struct timeval t0,t1; gettimeofday(&t0,NULL); float *h_a = NULL; float *h_b = NULL; float *d_a = NULL; float *d_b = NULL; int div = 8; int threadNum = 1024; unsigned int size = (threadNum*div) * 32000; int blockNum = size/(threadNum*div); printf("blockNum : %d\n",blockNum); printf("threadNum : %d\n",threadNum); printf("size : %d\n",size); printf("vector size : %d\n",sizeof(float)*size); int ite = 4000; hipMalloc((void**)&d_a,sizeof(float)*size); hipMalloc((void**)&d_b,sizeof(float)*size); h_a = (float*)malloc(sizeof(float)*size); h_b = (float*)malloc(sizeof(float)*size); for(int i = 0 ; i < size ; i ++){ h_a[i] = 0.0f; h_b[i] = 1.0f; } dim3 threads(threadNum,1,1); dim3 blocks(blockNum,1,1); hipMemcpy(d_a,h_a,sizeof(float)*size,hipMemcpyHostToDevice); hipMemcpy(d_b,h_b,sizeof(float)*size,hipMemcpyHostToDevice); for(int i = 0 ; i < ite ; i ++){ hipLaunchKernelGGL(( __add), dim3(blocks),dim3(threads), 0, 0, d_a,d_b,size,div); } hipMemcpy(h_a,d_a,sizeof(float)*size,hipMemcpyDeviceToHost); int pass = 1; int firstFailedIndex = 0; for(int i = 0 ; i < size ; i ++){ // printf("h_a[%d]:%f ",i,h_a[i]); if(h_a[i] != ite){ firstFailedIndex = i; pass = 0; break; } } if(pass){ printf("Result test PASS!\n"); }else{ printf("Result test Failed\n"); printf("h_a[%d] == %f\n",firstFailedIndex,h_a[firstFailedIndex]); } gettimeofday(&t1,NULL); printf("TIME RESULT : %f\n",elapsed(t0,t1)); return 0; }
d563b847b4d25f3f952ac5c665af5f8cd7b3988d.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <unistd.h> #include <sys/wait.h> #include <sys/time.h> /** 1.5[MB] div == 4, size = * 48000 2.0[MB] div == 8, size = * 32000 2.4[MB] div == 8, size = * 37000 **/ __global__ void __add(float* a,float* b,int size,int div){ int idx = blockDim.x * blockIdx.x + threadIdx.x; #pragma unroll for(int i = 0 ; i < div ; i ++){ a[idx + (size*i)/div] += b[idx + (size*i)/div]; } } static float elapsed(struct timeval tv0,struct timeval tv1){ return (float)(tv1.tv_sec - tv0.tv_sec) + (float)(tv1.tv_usec - tv0.tv_usec) * 0.000001f; } int main(){ struct timeval t0,t1; gettimeofday(&t0,NULL); float *h_a = NULL; float *h_b = NULL; float *d_a = NULL; float *d_b = NULL; int div = 8; int threadNum = 1024; unsigned int size = (threadNum*div) * 32000; int blockNum = size/(threadNum*div); printf("blockNum : %d\n",blockNum); printf("threadNum : %d\n",threadNum); printf("size : %d\n",size); printf("vector size : %d\n",sizeof(float)*size); int ite = 4000; cudaMalloc((void**)&d_a,sizeof(float)*size); cudaMalloc((void**)&d_b,sizeof(float)*size); h_a = (float*)malloc(sizeof(float)*size); h_b = (float*)malloc(sizeof(float)*size); for(int i = 0 ; i < size ; i ++){ h_a[i] = 0.0f; h_b[i] = 1.0f; } dim3 threads(threadNum,1,1); dim3 blocks(blockNum,1,1); cudaMemcpy(d_a,h_a,sizeof(float)*size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,sizeof(float)*size,cudaMemcpyHostToDevice); for(int i = 0 ; i < ite ; i ++){ __add<<<blocks,threads>>>(d_a,d_b,size,div); } cudaMemcpy(h_a,d_a,sizeof(float)*size,cudaMemcpyDeviceToHost); int pass = 1; int firstFailedIndex = 0; for(int i = 0 ; i < size ; i ++){ // printf("h_a[%d]:%f ",i,h_a[i]); if(h_a[i] != ite){ firstFailedIndex = i; pass = 0; break; } } if(pass){ printf("Result test PASS!\n"); }else{ printf("Result test Failed\n"); printf("h_a[%d] == %f\n",firstFailedIndex,h_a[firstFailedIndex]); } gettimeofday(&t1,NULL); printf("TIME RESULT : %f\n",elapsed(t0,t1)); return 0; }
5ef486e1253a9e33fdc93347a382e9dae1cf9e0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include <iostream> #include <vector> #include <string> #include <algorithm> #include "DevicePara.h" #include "APoint.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define PI 3.14159265358979 #define MAX_FITNESS 10000000.0 using namespace std; // // __device__ float createARandomNum(hiprandState_t* globalState, int index); //sort(APoint) static __device__ void StableSort_APoint(APoint** objArray, int start, int end, APoint** tempArray); static __device__ double getMax(double a, double b) { return a > b ? a : b; } static __device__ double getMin(double a, double b) { return a < b ? a : b; } // static __device__ bool IsRangeOverlap(double low1, double upper1, double low2, double upper2) { if (getMax(low1, low2) <= getMin(upper1, upper2)) { return true; } else { return false; } } __device__ int DeviceIDSize_Partition(DeviceIDSize* sizeArray, int start, int end) { DeviceIDSize& temp = sizeArray[start];// double tempSize = temp.size.x * temp.size.y; int i = start; int j = end; while (i < j) { while (i < j && sizeArray[j].size.x * sizeArray[j].size.y >= tempSize) --j; sizeArray[i] = sizeArray[j]; while (i < j && sizeArray[j].size.x * sizeArray[j].size.y <= tempSize) ++i; sizeArray[j] = sizeArray[i]; } sizeArray[i] = temp;// return i; } //DeviceIDSize static __device__ void DeviceIDSize_Sort(DeviceIDSize* sizeArray, int start, int end) { if (start < end) { int pivot = DeviceIDSize_Partition(sizeArray, start, end); DeviceIDSize_Sort(sizeArray, start, pivot - 1); DeviceIDSize_Sort(sizeArray, pivot + 1, end); } } //double static __device__ int Double_Partition(double* numArray, int start, int end) { double temp = numArray[start]; int i = start; int j = end; while (i < j) { while (i < j && numArray[j] >= temp) --j; numArray[i] = numArray[j]; while (i < j && numArray[i] <= temp) ++i; numArray[j] = numArray[i]; } numArray[i] = temp;// return i; } static __device__ void Double_Sort(double* numArray, int start, int end) { if (start < end) { int pivot = Double_Partition(numArray, start, end); Double_Sort(numArray, start, pivot - 1); Double_Sort(numArray, pivot + 1, end); } } //unique) static __device__ int Double_Unique(double* numArray, int start, int end) { int l = 0; int r = 1; while (r <= end) { if (numArray[r] != numArray[l]) { l++; numArray[l] = numArray[r]; } r++; } return l + 1; } // static __device__ int MyRound(double num) { return (num > 0.0) ? floor(num + 0.5) : ceil(num - 0.5); } //SegPath __device__ int SegPath_Partition(SegPath* objArray, int start, int end) { SegPath& temp = objArray[start]; int i = start; int j = end; while (i < j) { while (i < j && objArray[j].ABigEqualB(temp, -1)) --j; objArray[i] = objArray[j]; while (i < j && objArray[i].ASmallEqualB(temp, -1)) ++i; objArray[j] = objArray[i]; } objArray[i] = temp;// return i; } static __device__ void SegPath_Sort(SegPath* objArray, int start, int end) { if (start < end) { int pivot = SegPath_Partition(objArray, start, end); SegPath_Sort(objArray, start, pivot - 1); SegPath_Sort(objArray, pivot + 1, end); } } //unique) static __device__ int SegPath_Unique(SegPath* objArray, int start, int end) { int l = 0; int r = 1; while (r <= end) { if (!objArray[r].AEqualB(objArray[l], -1))// { l++; objArray[l] = objArray[r]; } r++; } return l + 1; } //PointInfo static __device__ int PointInfo_Partition(PointInfo* objArray, int start, int end) { PointInfo& temp = objArray[start]; int i = start; int j = end; while (i < j) { while (i < j && objArray[j].ABigEqualB(temp, -1)) --j; objArray[i] = objArray[j]; while (i < j && objArray[i].ASmallEqualB(temp, -1)) ++i; objArray[j] = objArray[i]; } objArray[i] = temp;// return i; } static __device__ void PointInfo_Sort(PointInfo* objArray, int start, int end) { if (start < end) { int pivot = PointInfo_Partition(objArray, start, end); PointInfo_Sort(objArray, start, pivot - 1); PointInfo_Sort(objArray, pivot + 1, end); } } //& static __device__ int PointInfo_CalcuAndUnique(PointInfo* objArray, int start, int end) { int l = 0; int r = 1; while (r <= end) { if (!objArray[r].AEqualB(objArray[l], -1))// { l++; objArray[l] = objArray[r]; } else //vertNumhoriNum { objArray[l].horiDirNum += objArray[r].horiDirNum; objArray[l].vertDirNum += objArray[r].vertDirNum; } r++;//unique } return l + 1; } // static __device__ PointInfo FindPointInfo(PointInfo* pointInfoList, int start, int end, Vector2Int point) { int left = start; int right = end; PointInfo res; while (start <= end) { int mid = (start + end) >> 1; if (pointInfoList[mid].pointAxis.AEqualB(point, -1)) { res = pointInfoList[mid]; break; } else if (pointInfoList[mid].pointAxis.ASmallB(point, -1)) { mid = left + 1; } else { mid = right - 1; } } return res; } //(APoint) //APointf __device__ void Merge(APoint** objArray, int start, int middle, int end, APoint** tempArray) { int i = start; int j = middle + 1; int index = 0; while (i <= middle && j <= end) { if (objArray[i]->f <= objArray[j]->f) {// tempArray[index++] = objArray[i++]; } else { tempArray[index++] = objArray[j++]; } } while (i <= middle) { tempArray[index++] = objArray[i++]; } while (j <= end) { tempArray[index++] = objArray[j++]; } for (int i = 0; i < index; i++) { objArray[start + i] = tempArray[i]; } } //sort(APoint) static __device__ void StableSort_APoint(APoint** objArray, int start, int end, APoint** tempArray) { if (start < end) { int middle = (start + end) / 2; StableSort_APoint(objArray, start, middle, tempArray); StableSort_APoint(objArray, middle + 1, end, tempArray); Merge(objArray, start, middle, end, tempArray); } } extern "C" void initRandomGenerator(int particle_num_, hiprandState_t* state, unsigned long seed) { hipLaunchKernelGGL(( initRandomGenerator_Kernal) , dim3(1), dim3(particle_num_) , 0, 0, state, seed); } // GPU // __global__ void initRandomGenerator_Kernal(hiprandState_t* state, unsigned long seed) { int id = threadIdx.x; hiprand_init(seed, id, 0, &state[id]); } // __device__ float createARandomNum(hiprandState_t* globalState, int index) { hiprandState_t localState = globalState[index]; float RANDOM = hiprand_uniform(&localState); globalState[index] = localState; return RANDOM; } // //__global__ void generateRandomNum(float* N, hiprandState_t* globalState) //{ // int i = blockIdx.x * blockDim.x + threadIdx.x;// // float k = generate(globalState, i); // N[i] = k; //}
5ef486e1253a9e33fdc93347a382e9dae1cf9e0f.cu
#pragma once #include <iostream> #include <vector> #include <string> #include <algorithm> #include "DevicePara.h" #include "APoint.h" #include <curand.h> #include <curand_kernel.h> #define PI 3.14159265358979 #define MAX_FITNESS 10000000.0 using namespace std; //所有函数都加上定义 //生成一个随机数 __device__ float createARandomNum(curandState* globalState, int index); //使用归并排序实现稳定的sort(APoint) static __device__ void StableSort_APoint(APoint** objArray, int start, int end, APoint** tempArray); static __device__ double getMax(double a, double b) { return a > b ? a : b; } static __device__ double getMin(double a, double b) { return a < b ? a : b; } //判断两个区间是否重叠 static __device__ bool IsRangeOverlap(double low1, double upper1, double low2, double upper2) { if (getMax(low1, low2) <= getMin(upper1, upper2)) { return true; } else { return false; } } __device__ int DeviceIDSize_Partition(DeviceIDSize* sizeArray, int start, int end) { DeviceIDSize& temp = sizeArray[start];//引用可以使用吗??? double tempSize = temp.size.x * temp.size.y; int i = start; int j = end; while (i < j) { while (i < j && sizeArray[j].size.x * sizeArray[j].size.y >= tempSize) --j; sizeArray[i] = sizeArray[j]; while (i < j && sizeArray[j].size.x * sizeArray[j].size.y <= tempSize) ++i; sizeArray[j] = sizeArray[i]; } sizeArray[i] = temp;//插入到正确位置 return i; } //快速排序(DeviceIDSize版本) static __device__ void DeviceIDSize_Sort(DeviceIDSize* sizeArray, int start, int end) { if (start < end) { int pivot = DeviceIDSize_Partition(sizeArray, start, end); DeviceIDSize_Sort(sizeArray, start, pivot - 1); DeviceIDSize_Sort(sizeArray, pivot + 1, end); } } //快速排序(double版本) static __device__ int Double_Partition(double* numArray, int start, int end) { double temp = numArray[start]; int i = start; int j = end; while (i < j) { while (i < j && numArray[j] >= temp) --j; numArray[i] = numArray[j]; while (i < j && numArray[i] <= temp) ++i; numArray[j] = numArray[i]; } numArray[i] = temp;//插入到正确位置 return i; } static __device__ void Double_Sort(double* numArray, int start, int end) { if (start < end) { int pivot = Double_Partition(numArray, start, end); Double_Sort(numArray, start, pivot - 1); Double_Sort(numArray, pivot + 1, end); } } //自定义unique函数(对于已经排好序的数组,去除其中的重复部分,返回新数组的大小) static __device__ int Double_Unique(double* numArray, int start, int end) { int l = 0; int r = 1; while (r <= end) { if (numArray[r] != numArray[l]) { l++; numArray[l] = numArray[r]; } r++; } return l + 1; } //自定义四舍五入算法 static __device__ int MyRound(double num) { return (num > 0.0) ? floor(num + 0.5) : ceil(num - 0.5); } //快速排序(SegPath版本) __device__ int SegPath_Partition(SegPath* objArray, int start, int end) { SegPath& temp = objArray[start]; int i = start; int j = end; while (i < j) { while (i < j && objArray[j].ABigEqualB(temp, -1)) --j; objArray[i] = objArray[j]; while (i < j && objArray[i].ASmallEqualB(temp, -1)) ++i; objArray[j] = objArray[i]; } objArray[i] = temp;//插入到正确位置 return i; } static __device__ void SegPath_Sort(SegPath* objArray, int start, int end) { if (start < end) { int pivot = SegPath_Partition(objArray, start, end); SegPath_Sort(objArray, start, pivot - 1); SegPath_Sort(objArray, pivot + 1, end); } } //自定义unique函数(对于已经排好序的数组,去除其中的重复部分,返回新数组的大小) static __device__ int SegPath_Unique(SegPath* objArray, int start, int end) { int l = 0; int r = 1; while (r <= end) { if (!objArray[r].AEqualB(objArray[l], -1))//两者不相等 { l++; objArray[l] = objArray[r]; } r++; } return l + 1; } //快速排序(PointInfo版本) static __device__ int PointInfo_Partition(PointInfo* objArray, int start, int end) { PointInfo& temp = objArray[start]; int i = start; int j = end; while (i < j) { while (i < j && objArray[j].ABigEqualB(temp, -1)) --j; objArray[i] = objArray[j]; while (i < j && objArray[i].ASmallEqualB(temp, -1)) ++i; objArray[j] = objArray[i]; } objArray[i] = temp;//插入到正确位置 return i; } static __device__ void PointInfo_Sort(PointInfo* objArray, int start, int end) { if (start < end) { int pivot = PointInfo_Partition(objArray, start, end); PointInfo_Sort(objArray, start, pivot - 1); PointInfo_Sort(objArray, pivot + 1, end); } } //计算每个点水平和垂直方向的连线数目&去重 static __device__ int PointInfo_CalcuAndUnique(PointInfo* objArray, int start, int end) { int l = 0; int r = 1; while (r <= end) { if (!objArray[r].AEqualB(objArray[l], -1))//两者不相等 { l++; objArray[l] = objArray[r]; } else //相等的话,需要累加vertNum和horiNum { objArray[l].horiDirNum += objArray[r].horiDirNum; objArray[l].vertDirNum += objArray[r].vertDirNum; } r++;//注意unique操作 } return l + 1; } //二分查找 static __device__ PointInfo FindPointInfo(PointInfo* pointInfoList, int start, int end, Vector2Int point) { int left = start; int right = end; PointInfo res; while (start <= end) { int mid = (start + end) >> 1; if (pointInfoList[mid].pointAxis.AEqualB(point, -1)) { res = pointInfoList[mid]; break; } else if (pointInfoList[mid].pointAxis.ASmallB(point, -1)) { mid = left + 1; } else { mid = right - 1; } } return res; } //归并排序(APoint版本) //注意排序规则,是按照APoint的f值进行比较 __device__ void Merge(APoint** objArray, int start, int middle, int end, APoint** tempArray) { int i = start; int j = middle + 1; int index = 0; while (i <= middle && j <= end) { if (objArray[i]->f <= objArray[j]->f) {//排序规则 tempArray[index++] = objArray[i++]; } else { tempArray[index++] = objArray[j++]; } } while (i <= middle) { tempArray[index++] = objArray[i++]; } while (j <= end) { tempArray[index++] = objArray[j++]; } for (int i = 0; i < index; i++) { objArray[start + i] = tempArray[i]; } } //使用归并排序实现稳定的sort(APoint) static __device__ void StableSort_APoint(APoint** objArray, int start, int end, APoint** tempArray) { if (start < end) { int middle = (start + end) / 2; StableSort_APoint(objArray, start, middle, tempArray); StableSort_APoint(objArray, middle + 1, end, tempArray); Merge(objArray, start, middle, end, tempArray); } } extern "C" void initRandomGenerator(int particle_num_, curandState* state, unsigned long seed) { initRandomGenerator_Kernal <<<1, particle_num_ >>> (state, seed); } //生成随机数相关代码 GPU //初始化随机数生成器 __global__ void initRandomGenerator_Kernal(curandState* state, unsigned long seed) { int id = threadIdx.x; curand_init(seed, id, 0, &state[id]); } //生成一个随机数 __device__ float createARandomNum(curandState* globalState, int index) { curandState localState = globalState[index]; float RANDOM = curand_uniform(&localState); globalState[index] = localState; return RANDOM; } //生成随机数的核函数 //__global__ void generateRandomNum(float* N, curandState* globalState) //{ // int i = blockIdx.x * blockDim.x + threadIdx.x;//当前序号 // float k = generate(globalState, i); // N[i] = k; //}
73fc7c9e228af4a1f2f6f8375bb17a2777f542d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <time.h> #include "rocblas.h" #define MAT_TYPE double #define MAT_SIZE 1024 #define N MAT_SIZE #define N2 MAT_SIZE*MAT_SIZE #define BLOCK 256 #define THREAD 512 void stopwatch(int); __global__ void cuda_mul(MAT_TYPE*,MAT_TYPE*,MAT_TYPE*,int); int main() { MAT_TYPE* host_A; MAT_TYPE* host_B; MAT_TYPE* host_C; MAT_TYPE* dev_A; MAT_TYPE* dev_B; MAT_TYPE* dev_C; double alpha = 1.0; double beta = 0.0; host_A = (MAT_TYPE*)malloc(sizeof(MAT_TYPE)*N2); host_B = (MAT_TYPE*)malloc(sizeof(MAT_TYPE)*N2); host_C = (MAT_TYPE*)malloc(sizeof(MAT_TYPE)*N2); for (int i=0;i < N2; i++) { host_A[i] = rand()/(MAT_TYPE)RAND_MAX; host_B[i] = rand()/(MAT_TYPE)RAND_MAX; host_C[i] = rand()/(MAT_TYPE)RAND_MAX; } hipMalloc((void**)&dev_A,N2 * sizeof(MAT_TYPE)); hipMalloc((void**)&dev_B,N2 * sizeof(MAT_TYPE)); hipMalloc((void**)&dev_C,N2 * sizeof(MAT_TYPE)); //cublas hipblasHandle_t handle; hipblasCreate(&handle); hipblasSetVector(N2,sizeof(MAT_TYPE),host_A,1,dev_A,1); hipblasSetVector(N2,sizeof(MAT_TYPE),host_B,1,dev_B,1); hipblasSetVector(N2,sizeof(MAT_TYPE),host_C,1,dev_C,1); printf("(1024 X 1024) * (1024 X 1024)\n"); printf("cublas dgemm : "); stopwatch(0); hipblasDgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,N,N,N,&alpha,dev_A,N,dev_B,N,&beta,dev_C,N); stopwatch(1); hipblasGetVector(N2,sizeof(MAT_TYPE),dev_C,1,host_C,1); hipblasDestroy(handle); hipDeviceSynchronize(); dim3 Dg(BLOCK,BLOCK,1); dim3 Db(THREAD,THREAD,1); printf("cuda matrix multiplication "); stopwatch(0); hipLaunchKernelGGL(( cuda_mul), dim3(Dg),dim3(Db), 0, 0, dev_A,dev_B,dev_C,N); stopwatch(1); free(host_A); free(host_B); free(host_C); hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); return 0; } __global__ void cuda_mul(MAT_TYPE* A,MAT_TYPE* B,MAT_TYPE* C,int w) { int tid,tx,ty; tx = blockDim.x * blockIdx.x + threadIdx.x; ty = blockDim.y * blockIdx.y + threadIdx.y; tid = w*ty + tx; MAT_TYPE v = 0; MAT_TYPE a = 0; MAT_TYPE b = 0; for(int i=0;i< w;i++) { a = A[ty * w + i]; b = B[i * w + tx]; v += a+b; } C[tid]= v; } void stopwatch(int flag) { const long long NANOS = 1000000000LL; static struct timespec startTS,endTS; static long long diff = 0; //start if(flag == 0) { diff = 0; if(-1 == clock_gettime(CLOCK_MONOTONIC,&startTS)) printf("Failed to call clock_gettime\n"); } //end else if(flag == 1) { if(-1 == clock_gettime(CLOCK_MONOTONIC,&endTS)) printf("Failed to call clock_gettime\n"); diff = NANOS * (endTS.tv_sec - startTS.tv_sec) + (endTS.tv_nsec - startTS.tv_nsec); printf("elapsed time : % lld microsec\n",diff/1000); } else { printf("wrong flag | 0 : start, 1 : end\n"); } }
73fc7c9e228af4a1f2f6f8375bb17a2777f542d4.cu
#include <stdlib.h> #include <stdio.h> #include <time.h> #include "cublas_v2.h" #define MAT_TYPE double #define MAT_SIZE 1024 #define N MAT_SIZE #define N2 MAT_SIZE*MAT_SIZE #define BLOCK 256 #define THREAD 512 void stopwatch(int); __global__ void cuda_mul(MAT_TYPE*,MAT_TYPE*,MAT_TYPE*,int); int main() { MAT_TYPE* host_A; MAT_TYPE* host_B; MAT_TYPE* host_C; MAT_TYPE* dev_A; MAT_TYPE* dev_B; MAT_TYPE* dev_C; double alpha = 1.0; double beta = 0.0; host_A = (MAT_TYPE*)malloc(sizeof(MAT_TYPE)*N2); host_B = (MAT_TYPE*)malloc(sizeof(MAT_TYPE)*N2); host_C = (MAT_TYPE*)malloc(sizeof(MAT_TYPE)*N2); for (int i=0;i < N2; i++) { host_A[i] = rand()/(MAT_TYPE)RAND_MAX; host_B[i] = rand()/(MAT_TYPE)RAND_MAX; host_C[i] = rand()/(MAT_TYPE)RAND_MAX; } cudaMalloc((void**)&dev_A,N2 * sizeof(MAT_TYPE)); cudaMalloc((void**)&dev_B,N2 * sizeof(MAT_TYPE)); cudaMalloc((void**)&dev_C,N2 * sizeof(MAT_TYPE)); //cublas 초기화 cublasHandle_t handle; cublasCreate(&handle); cublasSetVector(N2,sizeof(MAT_TYPE),host_A,1,dev_A,1); cublasSetVector(N2,sizeof(MAT_TYPE),host_B,1,dev_B,1); cublasSetVector(N2,sizeof(MAT_TYPE),host_C,1,dev_C,1); printf("(1024 X 1024) * (1024 X 1024)\n"); printf("cublas dgemm : "); stopwatch(0); cublasDgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,N,N,N,&alpha,dev_A,N,dev_B,N,&beta,dev_C,N); stopwatch(1); cublasGetVector(N2,sizeof(MAT_TYPE),dev_C,1,host_C,1); cublasDestroy(handle); cudaThreadSynchronize(); dim3 Dg(BLOCK,BLOCK,1); dim3 Db(THREAD,THREAD,1); printf("cuda matrix multiplication "); stopwatch(0); cuda_mul<<<Dg,Db>>>(dev_A,dev_B,dev_C,N); stopwatch(1); free(host_A); free(host_B); free(host_C); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); return 0; } __global__ void cuda_mul(MAT_TYPE* A,MAT_TYPE* B,MAT_TYPE* C,int w) { int tid,tx,ty; tx = blockDim.x * blockIdx.x + threadIdx.x; ty = blockDim.y * blockIdx.y + threadIdx.y; tid = w*ty + tx; MAT_TYPE v = 0; MAT_TYPE a = 0; MAT_TYPE b = 0; for(int i=0;i< w;i++) { a = A[ty * w + i]; b = B[i * w + tx]; v += a+b; } C[tid]= v; } void stopwatch(int flag) { const long long NANOS = 1000000000LL; static struct timespec startTS,endTS; static long long diff = 0; //start if(flag == 0) { diff = 0; if(-1 == clock_gettime(CLOCK_MONOTONIC,&startTS)) printf("Failed to call clock_gettime\n"); } //end else if(flag == 1) { if(-1 == clock_gettime(CLOCK_MONOTONIC,&endTS)) printf("Failed to call clock_gettime\n"); diff = NANOS * (endTS.tv_sec - startTS.tv_sec) + (endTS.tv_nsec - startTS.tv_nsec); printf("elapsed time : % lld microsec\n",diff/1000); } else { printf("wrong flag | 0 : start, 1 : end\n"); } }
0cb91d804fc507f2b9aaa25f9313303985d108d4.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <Eigen/Dense> #include "cupoch/geometry/geometry_utils.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/console.h" namespace cupoch { namespace geometry { namespace { template <int Dim> struct transform_points_functor { transform_points_functor( const Eigen::Matrix<float, Dim + 1, Dim + 1> &transform) : transform_(transform){}; const Eigen::Matrix<float, Dim + 1, Dim + 1> transform_; __device__ void operator()(Eigen::Matrix<float, Dim, 1> &pt) { pt = transform_.template block<Dim, Dim>(0, 0) * pt + transform_.template block<Dim, 1>(0, Dim); } }; struct transform_normals_functor { transform_normals_functor(const Eigen::Matrix4f &transform) : transform_(transform){}; const Eigen::Matrix4f transform_; __device__ void operator()(Eigen::Vector3f &nl) { nl = transform_.block<3, 3>(0, 0) * nl; } }; } // namespace const utility::device_vector<Eigen::Vector3f>& ConvertVector3fVectorRef(const Geometry &geometry) { switch (geometry.GetGeometryType()) { case Geometry::GeometryType::PointCloud: return ((const PointCloud &)geometry).points_; case Geometry::GeometryType::TriangleMesh: return ((const TriangleMesh &)geometry).vertices_; case Geometry::GeometryType::Image: case Geometry::GeometryType::Unspecified: default: utility::LogWarning( "[KDTreeFlann::SetGeometry] Unsupported Geometry type."); throw std::runtime_error( "[KDTreeFlann::SetGeometry] Unsupported Geometry type."); } } void ResizeAndPaintUniformColor(utility::device_vector<Eigen::Vector3f> &colors, const size_t size, const Eigen::Vector3f &color) { colors.resize(size); Eigen::Vector3f clipped_color = color; if (color.minCoeff() < 0 || color.maxCoeff() > 1) { utility::LogWarning( "invalid color in PaintUniformColor, clipping to [0, 1]"); clipped_color = clipped_color.array() .max(Eigen::Vector3f(0, 0, 0).array()) .matrix(); clipped_color = clipped_color.array() .min(Eigen::Vector3f(1, 1, 1).array()) .matrix(); } thrust::fill(colors.begin(), colors.end(), clipped_color); } template <int Dim> void TransformPoints( const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { TransformPoints<Dim>(0, transformation, points); } template <int Dim> void TransformPoints( hipStream_t stream, const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { transform_points_functor<Dim> func(transformation); thrust::for_each(utility::exec_policy(stream), points.begin(), points.end(), func); } template void TransformPoints<2>( const Eigen::Matrix3f &transformation, utility::device_vector<Eigen::Vector2f> &points); template void TransformPoints<2>( hipStream_t stream, const Eigen::Matrix3f &transformation, utility::device_vector<Eigen::Vector2f> &points); template void TransformPoints<3>( const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &points); template void TransformPoints<3>( hipStream_t stream, const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &points); void TransformNormals(const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &normals) { TransformNormals(0, transformation, normals); } void TransformNormals(hipStream_t stream, const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &normals) { transform_normals_functor func(transformation); thrust::for_each(utility::exec_policy(stream), normals.begin(), normals.end(), func); } template <int Dim> void TranslatePoints( const Eigen::Matrix<float, Dim, 1> &translation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool relative) { Eigen::Matrix<float, Dim, 1> transform = translation; if (!relative) { transform -= utility::ComputeCenter<Dim>(points); } thrust::for_each(points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt += transform; }); } template <int Dim> void ScalePoints(const float scale, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { Eigen::Matrix<float, Dim, 1> points_center = Eigen::Matrix<float, Dim, 1>::Zero(); if (center && !points.empty()) { points_center = utility::ComputeCenter<Dim>(points); } thrust::for_each(points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt = (pt - points_center) * scale + points_center; }); } template void TranslatePoints<2>( const Eigen::Vector2f &translation, utility::device_vector<Eigen::Vector2f> &points, bool relative); template void TranslatePoints<3>( const Eigen::Vector3f &translation, utility::device_vector<Eigen::Vector3f> &points, bool relative); template void ScalePoints<2>(const float scale, utility::device_vector<Eigen::Vector2f> &points, bool center); template void ScalePoints<3>(const float scale, utility::device_vector<Eigen::Vector3f> &points, bool center); template <int Dim> void RotatePoints(const Eigen::Matrix<float, Dim, Dim> &R, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { RotatePoints<Dim>(0, R, points, center); } template <int Dim> void RotatePoints(hipStream_t stream, const Eigen::Matrix<float, Dim, Dim> &R, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { Eigen::Matrix<float, Dim, 1> points_center = Eigen::Matrix<float, Dim, 1>::Zero(); if (center && !points.empty()) { points_center = utility::ComputeCenter<Dim>(points); } thrust::for_each(utility::exec_policy(stream), points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt = R * (pt - points_center) + points_center; }); } template void RotatePoints<2>(const Eigen::Matrix2f &R, utility::device_vector<Eigen::Vector2f> &points, bool center); template void RotatePoints<3>(const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &points, bool center); template void RotatePoints<2>(hipStream_t stream, const Eigen::Matrix2f &R, utility::device_vector<Eigen::Vector2f> &points, bool center); template void RotatePoints<3>(hipStream_t stream, const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &points, bool center); void RotateNormals(const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &normals) { RotateNormals(0, R, normals); } void RotateNormals(hipStream_t stream, const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &normals) { thrust::for_each(utility::exec_policy(stream), normals.begin(), normals.end(), [=] __device__(Eigen::Vector3f & normal) { normal = R * normal; }); } Eigen::Matrix3f GetRotationMatrixFromXYZ(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixX(rotation(0)) * cupoch::utility::RotationMatrixY(rotation(1)) * cupoch::utility::RotationMatrixZ(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromYZX(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixY(rotation(0)) * cupoch::utility::RotationMatrixZ(rotation(1)) * cupoch::utility::RotationMatrixX(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromZXY(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixZ(rotation(0)) * cupoch::utility::RotationMatrixX(rotation(1)) * cupoch::utility::RotationMatrixY(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromXZY(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixX(rotation(0)) * cupoch::utility::RotationMatrixZ(rotation(1)) * cupoch::utility::RotationMatrixY(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromZYX(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixZ(rotation(0)) * cupoch::utility::RotationMatrixY(rotation(1)) * cupoch::utility::RotationMatrixX(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromYXZ(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixY(rotation(0)) * cupoch::utility::RotationMatrixX(rotation(1)) * cupoch::utility::RotationMatrixZ(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromAxisAngle( const Eigen::Vector3f &rotation) { const float phi = rotation.norm(); return Eigen::AngleAxisf(phi, rotation / phi).toRotationMatrix(); } Eigen::Matrix3f GetRotationMatrixFromQuaternion( const Eigen::Vector4f &rotation) { return Eigen::Quaternionf(rotation(0), rotation(1), rotation(2), rotation(3)) .normalized() .toRotationMatrix(); } } // namespace geometry } // namespace cupoch
0cb91d804fc507f2b9aaa25f9313303985d108d4.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <Eigen/Dense> #include "cupoch/geometry/geometry_utils.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/console.h" namespace cupoch { namespace geometry { namespace { template <int Dim> struct transform_points_functor { transform_points_functor( const Eigen::Matrix<float, Dim + 1, Dim + 1> &transform) : transform_(transform){}; const Eigen::Matrix<float, Dim + 1, Dim + 1> transform_; __device__ void operator()(Eigen::Matrix<float, Dim, 1> &pt) { pt = transform_.template block<Dim, Dim>(0, 0) * pt + transform_.template block<Dim, 1>(0, Dim); } }; struct transform_normals_functor { transform_normals_functor(const Eigen::Matrix4f &transform) : transform_(transform){}; const Eigen::Matrix4f transform_; __device__ void operator()(Eigen::Vector3f &nl) { nl = transform_.block<3, 3>(0, 0) * nl; } }; } // namespace const utility::device_vector<Eigen::Vector3f>& ConvertVector3fVectorRef(const Geometry &geometry) { switch (geometry.GetGeometryType()) { case Geometry::GeometryType::PointCloud: return ((const PointCloud &)geometry).points_; case Geometry::GeometryType::TriangleMesh: return ((const TriangleMesh &)geometry).vertices_; case Geometry::GeometryType::Image: case Geometry::GeometryType::Unspecified: default: utility::LogWarning( "[KDTreeFlann::SetGeometry] Unsupported Geometry type."); throw std::runtime_error( "[KDTreeFlann::SetGeometry] Unsupported Geometry type."); } } void ResizeAndPaintUniformColor(utility::device_vector<Eigen::Vector3f> &colors, const size_t size, const Eigen::Vector3f &color) { colors.resize(size); Eigen::Vector3f clipped_color = color; if (color.minCoeff() < 0 || color.maxCoeff() > 1) { utility::LogWarning( "invalid color in PaintUniformColor, clipping to [0, 1]"); clipped_color = clipped_color.array() .max(Eigen::Vector3f(0, 0, 0).array()) .matrix(); clipped_color = clipped_color.array() .min(Eigen::Vector3f(1, 1, 1).array()) .matrix(); } thrust::fill(colors.begin(), colors.end(), clipped_color); } template <int Dim> void TransformPoints( const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { TransformPoints<Dim>(0, transformation, points); } template <int Dim> void TransformPoints( cudaStream_t stream, const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { transform_points_functor<Dim> func(transformation); thrust::for_each(utility::exec_policy(stream), points.begin(), points.end(), func); } template void TransformPoints<2>( const Eigen::Matrix3f &transformation, utility::device_vector<Eigen::Vector2f> &points); template void TransformPoints<2>( cudaStream_t stream, const Eigen::Matrix3f &transformation, utility::device_vector<Eigen::Vector2f> &points); template void TransformPoints<3>( const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &points); template void TransformPoints<3>( cudaStream_t stream, const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &points); void TransformNormals(const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &normals) { TransformNormals(0, transformation, normals); } void TransformNormals(cudaStream_t stream, const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &normals) { transform_normals_functor func(transformation); thrust::for_each(utility::exec_policy(stream), normals.begin(), normals.end(), func); } template <int Dim> void TranslatePoints( const Eigen::Matrix<float, Dim, 1> &translation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool relative) { Eigen::Matrix<float, Dim, 1> transform = translation; if (!relative) { transform -= utility::ComputeCenter<Dim>(points); } thrust::for_each(points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt += transform; }); } template <int Dim> void ScalePoints(const float scale, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { Eigen::Matrix<float, Dim, 1> points_center = Eigen::Matrix<float, Dim, 1>::Zero(); if (center && !points.empty()) { points_center = utility::ComputeCenter<Dim>(points); } thrust::for_each(points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt = (pt - points_center) * scale + points_center; }); } template void TranslatePoints<2>( const Eigen::Vector2f &translation, utility::device_vector<Eigen::Vector2f> &points, bool relative); template void TranslatePoints<3>( const Eigen::Vector3f &translation, utility::device_vector<Eigen::Vector3f> &points, bool relative); template void ScalePoints<2>(const float scale, utility::device_vector<Eigen::Vector2f> &points, bool center); template void ScalePoints<3>(const float scale, utility::device_vector<Eigen::Vector3f> &points, bool center); template <int Dim> void RotatePoints(const Eigen::Matrix<float, Dim, Dim> &R, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { RotatePoints<Dim>(0, R, points, center); } template <int Dim> void RotatePoints(cudaStream_t stream, const Eigen::Matrix<float, Dim, Dim> &R, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { Eigen::Matrix<float, Dim, 1> points_center = Eigen::Matrix<float, Dim, 1>::Zero(); if (center && !points.empty()) { points_center = utility::ComputeCenter<Dim>(points); } thrust::for_each(utility::exec_policy(stream), points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt = R * (pt - points_center) + points_center; }); } template void RotatePoints<2>(const Eigen::Matrix2f &R, utility::device_vector<Eigen::Vector2f> &points, bool center); template void RotatePoints<3>(const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &points, bool center); template void RotatePoints<2>(cudaStream_t stream, const Eigen::Matrix2f &R, utility::device_vector<Eigen::Vector2f> &points, bool center); template void RotatePoints<3>(cudaStream_t stream, const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &points, bool center); void RotateNormals(const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &normals) { RotateNormals(0, R, normals); } void RotateNormals(cudaStream_t stream, const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &normals) { thrust::for_each(utility::exec_policy(stream), normals.begin(), normals.end(), [=] __device__(Eigen::Vector3f & normal) { normal = R * normal; }); } Eigen::Matrix3f GetRotationMatrixFromXYZ(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixX(rotation(0)) * cupoch::utility::RotationMatrixY(rotation(1)) * cupoch::utility::RotationMatrixZ(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromYZX(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixY(rotation(0)) * cupoch::utility::RotationMatrixZ(rotation(1)) * cupoch::utility::RotationMatrixX(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromZXY(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixZ(rotation(0)) * cupoch::utility::RotationMatrixX(rotation(1)) * cupoch::utility::RotationMatrixY(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromXZY(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixX(rotation(0)) * cupoch::utility::RotationMatrixZ(rotation(1)) * cupoch::utility::RotationMatrixY(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromZYX(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixZ(rotation(0)) * cupoch::utility::RotationMatrixY(rotation(1)) * cupoch::utility::RotationMatrixX(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromYXZ(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixY(rotation(0)) * cupoch::utility::RotationMatrixX(rotation(1)) * cupoch::utility::RotationMatrixZ(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromAxisAngle( const Eigen::Vector3f &rotation) { const float phi = rotation.norm(); return Eigen::AngleAxisf(phi, rotation / phi).toRotationMatrix(); } Eigen::Matrix3f GetRotationMatrixFromQuaternion( const Eigen::Vector4f &rotation) { return Eigen::Quaternionf(rotation(0), rotation(1), rotation(2), rotation(3)) .normalized() .toRotationMatrix(); } } // namespace geometry } // namespace cupoch
14d9ffe3e24a785fd9e071294449ce9e1204826e.hip
// !!! This is a file automatically generated by hipify!!! //12560.0235297Gbps #include<stdio.h> #include<string.h> #include <stdint.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <cstring> #include <hip/hip_runtime.h> #include <iomanip> #include <time.h> typedef uint64_t word_t; #define BYTE unsigned char #define BLOCK_SIZE 128 #define KEY_SCHEDULE_SIZE 176 #define WORD_SIZE 64 #define BS_BLOCK_SIZE (BLOCK_SIZE * WORD_SIZE / 8) #define WORDS_PER_BLOCK (BLOCK_SIZE / WORD_SIZE) #define ONE 1ULL #define MUL_SHIFT 6 #define bs2le(x) (x) #define bs2be(x) (x) using namespace std; class aes_block { public: BYTE block[16]; }; void printBytes(uint8_t b[],int len){ for(int i=0; i<len; i++) printf("%2x ",b[i]); printf("\n"); } __device__ void printByte(uint8_t b[],int len){ for(int i=0; i<len; i++) printf("%2x ",b[i]); printf("\n"); } void f1printBytes(BYTE b[], int len, FILE* fp) { int i; for (i=0; i<len; i++) fprintf(fp, "%02x ", b[i]); fprintf(fp, "\n"); } static const uint8_t sbox[256] = { //0 1 2 3 4 5 6 7 8 9 A B C D E F 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; static void rotate(unsigned char *in) { unsigned char a,c; a = in[0]; for(c=0;c<3;c++) in[c] = in[c + 1]; in[3] = a; return; } /* Calculate the rcon used in key expansion */ static unsigned char rcon(unsigned char in) { unsigned char c=1; if(in == 0) return 0; while(in != 1) { unsigned char b; b = c & 0x80; c <<= 1; if(b == 0x80) { c ^= 0x1b; } in--; } return c; } static void schedule_core(unsigned char *in, unsigned char i) { char a; /* Rotate the input 8 bits to the left */ rotate(in); /* Apply Rijndael's s-box on all 4 bytes */ for(a = 0; a < 4; a++) in[a] = sbox[in[a]]; /* On just the first byte, add 2^i to the byte */ in[0] ^= rcon(i); } void expand_key(unsigned char *in) { unsigned char t[4]; /* c is 16 because the first sub-key is the user-supplied key */ unsigned char c = 16; unsigned char i = 1; unsigned char a; /* We need 11 sets of sixteen bytes each for 128-bit mode */ while(c < 176) { /* Copy the temporary variable over from the last 4-byte * block */ for(a = 0; a < 4; a++) t[a] = in[a + c - 4]; /* Every four blocks (of four bytes), * do a complex calculation */ if(c % 16 == 0) { schedule_core(t,i); i++; } for(a = 0; a < 4; a++) { in[c] = in[c - 16] ^ t[a]; c++; } } } void key_transpose_dst(word_t * transpose, word_t * blocks) { int i,k; word_t w; for(k=0; k < WORD_SIZE; k++) { int bitpos = ONE << k; for (i=0; i < WORDS_PER_BLOCK; i++) { w = blocks[k * WORDS_PER_BLOCK + i]; int offset = i << MUL_SHIFT; transpose[(offset)+ 0 ] |= (w & (ONE << 0 )) ? (bitpos) : 0; transpose[(offset)+ 1 ] |= (w & (ONE << 1 )) ? (bitpos) : 0; transpose[(offset)+ 2 ] |= (w & (ONE << 2 )) ? (bitpos) : 0; transpose[(offset)+ 3 ] |= (w & (ONE << 3 )) ? (bitpos) : 0; transpose[(offset)+ 4 ] |= (w & (ONE << 4 )) ? (bitpos) : 0; transpose[(offset)+ 5 ] |= (w & (ONE << 5 )) ? (bitpos) : 0; transpose[(offset)+ 6 ] |= (w & (ONE << 6 )) ? (bitpos) : 0; transpose[(offset)+ 7 ] |= (w & (ONE << 7 )) ? (bitpos) : 0; transpose[(offset)+ 8 ] |= (w & (ONE << 8 )) ? (bitpos) : 0; transpose[(offset)+ 9 ] |= (w & (ONE << 9 )) ? (bitpos) : 0; transpose[(offset)+ 10] |= (w & (ONE << 10)) ? (bitpos) : 0; transpose[(offset)+ 11] |= (w & (ONE << 11)) ? (bitpos) : 0; transpose[(offset)+ 12] |= (w & (ONE << 12)) ? (bitpos) : 0; transpose[(offset)+ 13] |= (w & (ONE << 13)) ? (bitpos) : 0; transpose[(offset)+ 14] |= (w & (ONE << 14)) ? (bitpos) : 0; transpose[(offset)+ 15] |= (w & (ONE << 15)) ? (bitpos) : 0; transpose[(offset)+ 16] |= (w & (ONE << 16)) ? (bitpos) : 0; transpose[(offset)+ 17] |= (w & (ONE << 17)) ? (bitpos) : 0; transpose[(offset)+ 18] |= (w & (ONE << 18)) ? (bitpos) : 0; transpose[(offset)+ 19] |= (w & (ONE << 19)) ? (bitpos) : 0; transpose[(offset)+ 20] |= (w & (ONE << 20)) ? (bitpos) : 0; transpose[(offset)+ 21] |= (w & (ONE << 21)) ? (bitpos) : 0; transpose[(offset)+ 22] |= (w & (ONE << 22)) ? (bitpos) : 0; transpose[(offset)+ 23] |= (w & (ONE << 23)) ? (bitpos) : 0; transpose[(offset)+ 24] |= (w & (ONE << 24)) ? (bitpos) : 0; transpose[(offset)+ 25] |= (w & (ONE << 25)) ? (bitpos) : 0; transpose[(offset)+ 26] |= (w & (ONE << 26)) ? (bitpos) : 0; transpose[(offset)+ 27] |= (w & (ONE << 27)) ? (bitpos) : 0; transpose[(offset)+ 28] |= (w & (ONE << 28)) ? (bitpos) : 0; transpose[(offset)+ 29] |= (w & (ONE << 29)) ? (bitpos) : 0; transpose[(offset)+ 30] |= (w & (ONE << 30)) ? (bitpos) : 0; transpose[(offset)+ 31] |= (w & (ONE << 31)) ? (bitpos) : 0; transpose[(offset)+ 32] |= (w & (ONE << 32)) ? (bitpos) : 0; transpose[(offset)+ 33] |= (w & (ONE << 33)) ? (bitpos) : 0; transpose[(offset)+ 34] |= (w & (ONE << 34)) ? (bitpos) : 0; transpose[(offset)+ 35] |= (w & (ONE << 35)) ? (bitpos) : 0; transpose[(offset)+ 36] |= (w & (ONE << 36)) ? (bitpos) : 0; transpose[(offset)+ 37] |= (w & (ONE << 37)) ? (bitpos) : 0; transpose[(offset)+ 38] |= (w & (ONE << 38)) ? (bitpos) : 0; transpose[(offset)+ 39] |= (w & (ONE << 39)) ? (bitpos) : 0; transpose[(offset)+ 40] |= (w & (ONE << 40)) ? (bitpos) : 0; transpose[(offset)+ 41] |= (w & (ONE << 41)) ? (bitpos) : 0; transpose[(offset)+ 42] |= (w & (ONE << 42)) ? (bitpos) : 0; transpose[(offset)+ 43] |= (w & (ONE << 43)) ? (bitpos) : 0; transpose[(offset)+ 44] |= (w & (ONE << 44)) ? (bitpos) : 0; transpose[(offset)+ 45] |= (w & (ONE << 45)) ? (bitpos) : 0; transpose[(offset)+ 46] |= (w & (ONE << 46)) ? (bitpos) : 0; transpose[(offset)+ 47] |= (w & (ONE << 47)) ? (bitpos) : 0; transpose[(offset)+ 48] |= (w & (ONE << 48)) ? (bitpos) : 0; transpose[(offset)+ 49] |= (w & (ONE << 49)) ? (bitpos) : 0; transpose[(offset)+ 50] |= (w & (ONE << 50)) ? (bitpos) : 0; transpose[(offset)+ 51] |= (w & (ONE << 51)) ? (bitpos) : 0; transpose[(offset)+ 52] |= (w & (ONE << 52)) ? (bitpos) : 0; transpose[(offset)+ 53] |= (w & (ONE << 53)) ? (bitpos) : 0; transpose[(offset)+ 54] |= (w & (ONE << 54)) ? (bitpos) : 0; transpose[(offset)+ 55] |= (w & (ONE << 55)) ? (bitpos) : 0; transpose[(offset)+ 56] |= (w & (ONE << 56)) ? (bitpos) : 0; transpose[(offset)+ 57] |= (w & (ONE << 57)) ? (bitpos) : 0; transpose[(offset)+ 58] |= (w & (ONE << 58)) ? (bitpos) : 0; transpose[(offset)+ 59] |= (w & (ONE << 59)) ? (bitpos) : 0; transpose[(offset)+ 60] |= (w & (ONE << 60)) ? (bitpos) : 0; transpose[(offset)+ 61] |= (w & (ONE << 61)) ? (bitpos) : 0; transpose[(offset)+ 62] |= (w & (ONE << 62)) ? (bitpos) : 0; transpose[(offset)+ 63] |= (w & (ONE << 63)) ? (bitpos) : 0; } } } __device__ void bs_transpose_dst(word_t * transpose, word_t * blocks) { int i,k; word_t w; for(k=0; k < WORD_SIZE; k++) { int bitpos = ONE << k; for (i=0; i < WORDS_PER_BLOCK; i++) { w = bs2le(blocks[k * WORDS_PER_BLOCK + i]); int offset = i << MUL_SHIFT; transpose[(offset)+ 0 ] |= (w & (ONE << 0 )) ? (bitpos) : 0; transpose[(offset)+ 1 ] |= (w & (ONE << 1 )) ? (bitpos) : 0; transpose[(offset)+ 2 ] |= (w & (ONE << 2 )) ? (bitpos) : 0; transpose[(offset)+ 3 ] |= (w & (ONE << 3 )) ? (bitpos) : 0; transpose[(offset)+ 4 ] |= (w & (ONE << 4 )) ? (bitpos) : 0; transpose[(offset)+ 5 ] |= (w & (ONE << 5 )) ? (bitpos) : 0; transpose[(offset)+ 6 ] |= (w & (ONE << 6 )) ? (bitpos) : 0; transpose[(offset)+ 7 ] |= (w & (ONE << 7 )) ? (bitpos) : 0; transpose[(offset)+ 8 ] |= (w & (ONE << 8 )) ? (bitpos) : 0; transpose[(offset)+ 9 ] |= (w & (ONE << 9 )) ? (bitpos) : 0; transpose[(offset)+ 10] |= (w & (ONE << 10)) ? (bitpos) : 0; transpose[(offset)+ 11] |= (w & (ONE << 11)) ? (bitpos) : 0; transpose[(offset)+ 12] |= (w & (ONE << 12)) ? (bitpos) : 0; transpose[(offset)+ 13] |= (w & (ONE << 13)) ? (bitpos) : 0; transpose[(offset)+ 14] |= (w & (ONE << 14)) ? (bitpos) : 0; transpose[(offset)+ 15] |= (w & (ONE << 15)) ? (bitpos) : 0; transpose[(offset)+ 16] |= (w & (ONE << 16)) ? (bitpos) : 0; transpose[(offset)+ 17] |= (w & (ONE << 17)) ? (bitpos) : 0; transpose[(offset)+ 18] |= (w & (ONE << 18)) ? (bitpos) : 0; transpose[(offset)+ 19] |= (w & (ONE << 19)) ? (bitpos) : 0; transpose[(offset)+ 20] |= (w & (ONE << 20)) ? (bitpos) : 0; transpose[(offset)+ 21] |= (w & (ONE << 21)) ? (bitpos) : 0; transpose[(offset)+ 22] |= (w & (ONE << 22)) ? (bitpos) : 0; transpose[(offset)+ 23] |= (w & (ONE << 23)) ? (bitpos) : 0; transpose[(offset)+ 24] |= (w & (ONE << 24)) ? (bitpos) : 0; transpose[(offset)+ 25] |= (w & (ONE << 25)) ? (bitpos) : 0; transpose[(offset)+ 26] |= (w & (ONE << 26)) ? (bitpos) : 0; transpose[(offset)+ 27] |= (w & (ONE << 27)) ? (bitpos) : 0; transpose[(offset)+ 28] |= (w & (ONE << 28)) ? (bitpos) : 0; transpose[(offset)+ 29] |= (w & (ONE << 29)) ? (bitpos) : 0; transpose[(offset)+ 30] |= (w & (ONE << 30)) ? (bitpos) : 0; transpose[(offset)+ 31] |= (w & (ONE << 31)) ? (bitpos) : 0; transpose[(offset)+ 32] |= (w & (ONE << 32)) ? (bitpos) : 0; transpose[(offset)+ 33] |= (w & (ONE << 33)) ? (bitpos) : 0; transpose[(offset)+ 34] |= (w & (ONE << 34)) ? (bitpos) : 0; transpose[(offset)+ 35] |= (w & (ONE << 35)) ? (bitpos) : 0; transpose[(offset)+ 36] |= (w & (ONE << 36)) ? (bitpos) : 0; transpose[(offset)+ 37] |= (w & (ONE << 37)) ? (bitpos) : 0; transpose[(offset)+ 38] |= (w & (ONE << 38)) ? (bitpos) : 0; transpose[(offset)+ 39] |= (w & (ONE << 39)) ? (bitpos) : 0; transpose[(offset)+ 40] |= (w & (ONE << 40)) ? (bitpos) : 0; transpose[(offset)+ 41] |= (w & (ONE << 41)) ? (bitpos) : 0; transpose[(offset)+ 42] |= (w & (ONE << 42)) ? (bitpos) : 0; transpose[(offset)+ 43] |= (w & (ONE << 43)) ? (bitpos) : 0; transpose[(offset)+ 44] |= (w & (ONE << 44)) ? (bitpos) : 0; transpose[(offset)+ 45] |= (w & (ONE << 45)) ? (bitpos) : 0; transpose[(offset)+ 46] |= (w & (ONE << 46)) ? (bitpos) : 0; transpose[(offset)+ 47] |= (w & (ONE << 47)) ? (bitpos) : 0; transpose[(offset)+ 48] |= (w & (ONE << 48)) ? (bitpos) : 0; transpose[(offset)+ 49] |= (w & (ONE << 49)) ? (bitpos) : 0; transpose[(offset)+ 50] |= (w & (ONE << 50)) ? (bitpos) : 0; transpose[(offset)+ 51] |= (w & (ONE << 51)) ? (bitpos) : 0; transpose[(offset)+ 52] |= (w & (ONE << 52)) ? (bitpos) : 0; transpose[(offset)+ 53] |= (w & (ONE << 53)) ? (bitpos) : 0; transpose[(offset)+ 54] |= (w & (ONE << 54)) ? (bitpos) : 0; transpose[(offset)+ 55] |= (w & (ONE << 55)) ? (bitpos) : 0; transpose[(offset)+ 56] |= (w & (ONE << 56)) ? (bitpos) : 0; transpose[(offset)+ 57] |= (w & (ONE << 57)) ? (bitpos) : 0; transpose[(offset)+ 58] |= (w & (ONE << 58)) ? (bitpos) : 0; transpose[(offset)+ 59] |= (w & (ONE << 59)) ? (bitpos) : 0; transpose[(offset)+ 60] |= (w & (ONE << 60)) ? (bitpos) : 0; transpose[(offset)+ 61] |= (w & (ONE << 61)) ? (bitpos) : 0; transpose[(offset)+ 62] |= (w & (ONE << 62)) ? (bitpos) : 0; transpose[(offset)+ 63] |= (w & (ONE << 63)) ? (bitpos) : 0; } } } __device__ void bs_transpose_rev(word_t * blocks) { int k; word_t w; word_t transpose[BLOCK_SIZE]; memset(transpose, 0, sizeof(transpose)); for(k=0; k < BLOCK_SIZE; k++) { w = blocks[k]; word_t bitpos = ONE << (k % WORD_SIZE); word_t offset = k / WORD_SIZE; transpose[0 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 0 )) ? bitpos : 0; transpose[1 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 1 )) ? bitpos : 0; transpose[2 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 2 )) ? bitpos : 0; transpose[3 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 3 )) ? bitpos : 0; transpose[4 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 4 )) ? bitpos : 0; transpose[5 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 5 )) ? bitpos : 0; transpose[6 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 6 )) ? bitpos : 0; transpose[7 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 7 )) ? bitpos : 0; transpose[8 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 8 )) ? bitpos : 0; transpose[9 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 9 )) ? bitpos : 0; transpose[10 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 10)) ? bitpos : 0; transpose[11 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 11)) ? bitpos : 0; transpose[12 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 12)) ? bitpos : 0; transpose[13 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 13)) ? bitpos : 0; transpose[14 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 14)) ? bitpos : 0; transpose[15 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 15)) ? bitpos : 0; transpose[16 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 16)) ? bitpos : 0; transpose[17 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 17)) ? bitpos : 0; transpose[18 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 18)) ? bitpos : 0; transpose[19 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 19)) ? bitpos : 0; transpose[20 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 20)) ? bitpos : 0; transpose[21 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 21)) ? bitpos : 0; transpose[22 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 22)) ? bitpos : 0; transpose[23 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 23)) ? bitpos : 0; transpose[24 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 24)) ? bitpos : 0; transpose[25 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 25)) ? bitpos : 0; transpose[26 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 26)) ? bitpos : 0; transpose[27 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 27)) ? bitpos : 0; transpose[28 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 28)) ? bitpos : 0; transpose[29 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 29)) ? bitpos : 0; transpose[30 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 30)) ? bitpos : 0; transpose[31 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 31)) ? bitpos : 0; transpose[32 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 32)) ? bitpos : 0; transpose[33 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 33)) ? bitpos : 0; transpose[34 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 34)) ? bitpos : 0; transpose[35 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 35)) ? bitpos : 0; transpose[36 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 36)) ? bitpos : 0; transpose[37 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 37)) ? bitpos : 0; transpose[38 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 38)) ? bitpos : 0; transpose[39 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 39)) ? bitpos : 0; transpose[40 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 40)) ? bitpos : 0; transpose[41 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 41)) ? bitpos : 0; transpose[42 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 42)) ? bitpos : 0; transpose[43 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 43)) ? bitpos : 0; transpose[44 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 44)) ? bitpos : 0; transpose[45 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 45)) ? bitpos : 0; transpose[46 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 46)) ? bitpos : 0; transpose[47 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 47)) ? bitpos : 0; transpose[48 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 48)) ? bitpos : 0; transpose[49 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 49)) ? bitpos : 0; transpose[50 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 50)) ? bitpos : 0; transpose[51 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 51)) ? bitpos : 0; transpose[52 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 52)) ? bitpos : 0; transpose[53 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 53)) ? bitpos : 0; transpose[54 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 54)) ? bitpos : 0; transpose[55 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 55)) ? bitpos : 0; transpose[56 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 56)) ? bitpos : 0; transpose[57 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 57)) ? bitpos : 0; transpose[58 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 58)) ? bitpos : 0; transpose[59 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 59)) ? bitpos : 0; transpose[60 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 60)) ? bitpos : 0; transpose[61 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 61)) ? bitpos : 0; transpose[62 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 62)) ? bitpos : 0; transpose[63 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 63)) ? bitpos : 0; } memcpy(blocks,transpose,sizeof(transpose)); } void key_transpose(word_t * blocks) { word_t transpose[BLOCK_SIZE]; memset(transpose, 0, sizeof(transpose)); key_transpose_dst(transpose,blocks); memcpy(blocks,transpose,sizeof(transpose)); } __device__ void bs_transpose(word_t * blocks) { word_t transpose[BLOCK_SIZE]; memset(transpose, 0, sizeof(transpose)); bs_transpose_dst(transpose,blocks); memcpy(blocks,transpose,sizeof(transpose)); } void bs_expand_key(word_t (* rk)[BLOCK_SIZE], uint8_t * _key) { // TODO integrate this better uint8_t key[KEY_SCHEDULE_SIZE]; memmove(key,_key,BLOCK_SIZE/8); expand_key(key); int i, j = 0, k, l; for (i = 0; i < KEY_SCHEDULE_SIZE; i += (BLOCK_SIZE/8)) { memmove(rk[j], key + i, BLOCK_SIZE / 8); for (k = WORDS_PER_BLOCK; k < 128; k += WORDS_PER_BLOCK) { for (l = 0; l < WORDS_PER_BLOCK; l++) { rk[j][k + l] = rk[j][l]; } } key_transpose(rk[j]); j++; } } __device__ void bs_addroundkey(word_t * B, word_t * rk) { int i; for (i = 0; i < BLOCK_SIZE; i++) B[i] ^= rk[i]; } __device__ void bs_sbox(word_t U[8]) { word_t S[8]; word_t T1,T2,T3,T4,T5,T6,T7,T8, T9,T10,T11,T12,T13,T14,T15,T16, T17,T18,T19,T20,T21,T22,T23,T24, T25, T26, T27; word_t M1,M2,M3,M4,M5,M6,M7,M8, M9,M10,M11,M12,M13,M14,M15, M16,M17,M18,M19,M20,M21,M22, M23,M24,M25,M26,M27,M28,M29, M30,M31,M32,M33,M34,M35,M36, M37,M38,M39,M40,M41,M42,M43, M44,M45,M46,M47,M48,M49,M50, M51,M52,M53,M54,M55,M56,M57, M58,M59,M60,M61,M62,M63; word_t L0,L1,L2,L3,L4,L5,L6,L7,L8, L9,L10,L11,L12,L13,L14, L15,L16,L17,L18,L19,L20, L21,L22,L23,L24,L25,L26, L27,L28,L29; T1 = U[7] ^ U[4]; T2 = U[7] ^ U[2]; T3 = U[7] ^ U[1]; T4 = U[4] ^ U[2]; T5 = U[3] ^ U[1]; T6 = T1 ^ T5; T7 = U[6] ^ U[5]; T8 = U[0] ^ T6; T9 = U[0] ^ T7; T10 = T6 ^ T7; T11 = U[6] ^ U[2]; T12 = U[5] ^ U[2]; T13 = T3 ^ T4; T14 = T6 ^ T11; T15 = T5 ^ T11; T16 = T5 ^ T12; T17 = T9 ^ T16; T18 = U[4] ^ U[0]; T19 = T7 ^ T18; T20 = T1 ^ T19; T21 = U[1] ^ U[0]; T22 = T7 ^ T21; T23 = T2 ^ T22; T24 = T2 ^ T10; T25 = T20 ^ T17; T26 = T3 ^ T16; T27 = T1 ^ T12; M1 = T13 & T6; M2 = T23 & T8; M3 = T14 ^ M1; M4 = T19 & U[0]; M5 = M4 ^ M1; M6 = T3 & T16; M7 = T22 & T9; M8 = T26 ^ M6; M9 = T20 & T17; M10 = M9 ^ M6; M11 = T1 & T15; M12 = T4 & T27; M13 = M12 ^ M11; M14 = T2 & T10; M15 = M14 ^ M11; M16 = M3 ^ M2; M17 = M5 ^ T24; M18 = M8 ^ M7; M19 = M10 ^ M15; M20 = M16 ^ M13; M21 = M17 ^ M15; M22 = M18 ^ M13; M23 = M19 ^ T25; M24 = M22 ^ M23; M25 = M22 & M20; M26 = M21 ^ M25; M27 = M20 ^ M21; M28 = M23 ^ M25; M29 = M28 & M27; M30 = M26 & M24; M31 = M20 & M23; M32 = M27 & M31; M33 = M27 ^ M25; M34 = M21 & M22; M35 = M24 & M34; M36 = M24 ^ M25; M37 = M21 ^ M29; M38 = M32 ^ M33; M39 = M23 ^ M30; M40 = M35 ^ M36; M41 = M38 ^ M40; M42 = M37 ^ M39; M43 = M37 ^ M38; M44 = M39 ^ M40; M45 = M42 ^ M41; M46 = M44 & T6; M47 = M40 & T8; M48 = M39 & U[0]; M49 = M43 & T16; M50 = M38 & T9; M51 = M37 & T17; M52 = M42 & T15; M53 = M45 & T27; M54 = M41 & T10; M55 = M44 & T13; M56 = M40 & T23; M57 = M39 & T19; M58 = M43 & T3; M59 = M38 & T22; M60 = M37 & T20; M61 = M42 & T1; M62 = M45 & T4; M63 = M41 & T2; L0 = M61 ^ M62; L1 = M50 ^ M56; L2 = M46 ^ M48; L3 = M47 ^ M55; L4 = M54 ^ M58; L5 = M49 ^ M61; L6 = M62 ^ L5; L7 = M46 ^ L3; L8 = M51 ^ M59; L9 = M52 ^ M53; L10 = M53 ^ L4; L11 = M60 ^ L2; L12 = M48 ^ M51; L13 = M50 ^ L0; L14 = M52 ^ M61; L15 = M55 ^ L1; L16 = M56 ^ L0; L17 = M57 ^ L1; L18 = M58 ^ L8; L19 = M63 ^ L4; L20 = L0 ^ L1; L21 = L1 ^ L7; L22 = L3 ^ L12; L23 = L18 ^ L2; L24 = L15 ^ L9; L25 = L6 ^ L10; L26 = L7 ^ L9; L27 = L8 ^ L10; L28 = L11 ^ L14; L29 = L11 ^ L17; S[7] = L6 ^ L24; S[6] = ~(L16 ^ L26); S[5] = ~(L19 ^ L28); S[4] = L6 ^ L21; S[3] = L20 ^ L22; S[2] = L25 ^ L29; S[1] = ~(L13 ^ L27); S[0] = ~(L6 ^ L23); memcpy(U,S,sizeof(S)); } __device__ void bs_apply_sbox(word_t * input) { int i; for(i=0; i < BLOCK_SIZE; i+=8) { bs_sbox(input+i); } } #define A0 0 #define A1 8 #define A2 16 #define A3 24 #define R0 0 #define R1 8 #define R2 16 #define R3 24 #define B0 0 #define B1 32 #define B2 64 #define B3 96 // Does shift rows and mix columns in same step __device__ void bs_shiftmix(word_t * B) { word_t Bp_space[BLOCK_SIZE]; word_t * Bp = Bp_space; word_t * Br0 = B + 0; word_t * Br1 = B + 32; word_t * Br2 = B + 64; word_t * Br3 = B + 96; uint8_t offsetr0 = 0; uint8_t offsetr1 = 32; uint8_t offsetr2 = 64; uint8_t offsetr3 = 96; Br0 = B + offsetr0; Br1 = B + offsetr1; Br2 = B + offsetr2; Br3 = B + offsetr3; int i; for (i = 0; i < 4; i++) { // B0 // 2*A0 2*A1 A1 A2 A3 word_t of =Br0[R0+7]^ Br1[R1+7]; Bp[A0+0] = Br1[R1+0] ^ Br2[R2+0] ^ Br3[R3+0] ^ of; Bp[A0+1] = Br0[R0+0] ^ Br1[R1+0] ^ Br1[R1+1] ^ Br2[R2+1] ^ Br3[R3+1] ^ of; Bp[A0+2] = Br0[R0+1] ^ Br1[R1+1] ^ Br1[R1+2] ^ Br2[R2+2] ^ Br3[R3+2]; Bp[A0+3] = Br0[R0+2] ^ Br1[R1+2] ^ Br1[R1+3] ^ Br2[R2+3] ^ Br3[R3+3] ^ of; Bp[A0+4] = Br0[R0+3] ^ Br1[R1+3] ^ Br1[R1+4] ^ Br2[R2+4] ^ Br3[R3+4] ^ of; Bp[A0+5] = Br0[R0+4] ^ Br1[R1+4] ^ Br1[R1+5] ^ Br2[R2+5] ^ Br3[R3+5]; Bp[A0+6] = Br0[R0+5] ^ Br1[R1+5] ^ Br1[R1+6] ^ Br2[R2+6] ^ Br3[R3+6]; Bp[A0+7] = Br0[R0+6] ^ Br1[R1+6] ^ Br1[R1+7] ^ Br2[R2+7] ^ Br3[R3+7]; // A0 2*A1 2*A2 A2 A3 of = Br1[R1+7] ^ Br2[R2+7]; Bp[A1+0] = Br0[R0+0] ^ Br2[R2+0] ^ Br3[R3+0] ^ of; Bp[A1+1] = Br0[R0+1] ^ Br1[R1+0] ^ Br2[R2+0] ^ Br2[R2+1] ^ Br3[R3+1] ^ of; Bp[A1+2] = Br0[R0+2] ^ Br1[R1+1] ^ Br2[R2+1] ^ Br2[R2+2] ^ Br3[R3+2]; Bp[A1+3] = Br0[R0+3] ^ Br1[R1+2] ^ Br2[R2+2] ^ Br2[R2+3] ^ Br3[R3+3] ^ of; Bp[A1+4] = Br0[R0+4] ^ Br1[R1+3] ^ Br2[R2+3] ^ Br2[R2+4] ^ Br3[R3+4] ^ of; Bp[A1+5] = Br0[R0+5] ^ Br1[R1+4] ^ Br2[R2+4] ^ Br2[R2+5] ^ Br3[R3+5]; Bp[A1+6] = Br0[R0+6] ^ Br1[R1+5] ^ Br2[R2+5] ^ Br2[R2+6] ^ Br3[R3+6]; Bp[A1+7] = Br0[R0+7] ^ Br1[R1+6] ^ Br2[R2+6] ^ Br2[R2+7] ^ Br3[R3+7]; // A0 A1 2*A2 2*A3 A3 of = Br2[R2+7] ^ Br3[R3+7]; Bp[A2+0] = Br0[R0+0] ^ Br1[R1+0] ^ Br3[R3+0] ^ of; Bp[A2+1] = Br0[R0+1] ^ Br1[R1+1] ^ Br2[R2+0] ^ Br3[R3+0] ^ Br3[R3+1] ^ of; Bp[A2+2] = Br0[R0+2] ^ Br1[R1+2] ^ Br2[R2+1] ^ Br3[R3+1] ^ Br3[R3+2]; Bp[A2+3] = Br0[R0+3] ^ Br1[R1+3] ^ Br2[R2+2] ^ Br3[R3+2] ^ Br3[R3+3] ^ of; Bp[A2+4] = Br0[R0+4] ^ Br1[R1+4] ^ Br2[R2+3] ^ Br3[R3+3] ^ Br3[R3+4] ^ of; Bp[A2+5] = Br0[R0+5] ^ Br1[R1+5] ^ Br2[R2+4] ^ Br3[R3+4] ^ Br3[R3+5]; Bp[A2+6] = Br0[R0+6] ^ Br1[R1+6] ^ Br2[R2+5] ^ Br3[R3+5] ^ Br3[R3+6]; Bp[A2+7] = Br0[R0+7] ^ Br1[R1+7] ^ Br2[R2+6] ^ Br3[R3+6] ^ Br3[R3+7]; // A0 2*A0 A1 A2 2*A3 of = Br0[R0+7] ^ Br3[R3+7]; Bp[A3+0] = Br0[R0+0] ^ Br1[R1+0] ^ Br2[R2+0] ^ of; Bp[A3+1] = Br0[R0+1] ^ Br0[R0+0] ^ Br1[R1+1] ^ Br2[R2+1] ^ Br3[R3+0] ^ of; Bp[A3+2] = Br0[R0+2] ^ Br0[R0+1] ^ Br1[R1+2] ^ Br2[R2+2] ^ Br3[R3+1]; Bp[A3+3] = Br0[R0+3] ^ Br0[R0+2] ^ Br1[R1+3] ^ Br2[R2+3] ^ Br3[R3+2] ^ of; Bp[A3+4] = Br0[R0+4] ^ Br0[R0+3] ^ Br1[R1+4] ^ Br2[R2+4] ^ Br3[R3+3] ^ of; Bp[A3+5] = Br0[R0+5] ^ Br0[R0+4] ^ Br1[R1+5] ^ Br2[R2+5] ^ Br3[R3+4]; Bp[A3+6] = Br0[R0+6] ^ Br0[R0+5] ^ Br1[R1+6] ^ Br2[R2+6] ^ Br3[R3+5]; Bp[A3+7] = Br0[R0+7] ^ Br0[R0+6] ^ Br1[R1+7] ^ Br2[R2+7] ^ Br3[R3+6]; Bp += BLOCK_SIZE/4; offsetr0 = (offsetr0 + BLOCK_SIZE/4) & 0x7f; offsetr1 = (offsetr1 + BLOCK_SIZE/4) & 0x7f; offsetr2 = (offsetr2 + BLOCK_SIZE/4) & 0x7f; offsetr3 = (offsetr3 + BLOCK_SIZE/4) & 0x7f; Br0 = B + offsetr0; Br1 = B + offsetr1; Br2 = B + offsetr2; Br3 = B + offsetr3; } memcpy(B,Bp_space,sizeof(Bp_space)); } __device__ void bs_shiftrows(word_t * B) { word_t Bp_space[BLOCK_SIZE]; word_t * Bp = Bp_space; word_t * Br0 = B + 0; word_t * Br1 = B + 32; word_t * Br2 = B + 64; word_t * Br3 = B + 96; uint8_t offsetr0 = 0; uint8_t offsetr1 = 32; uint8_t offsetr2 = 64; uint8_t offsetr3 = 96; int i; for(i=0; i<4; i++) { Bp[B0 + 0] = Br0[0]; Bp[B0 + 1] = Br0[1]; Bp[B0 + 2] = Br0[2]; Bp[B0 + 3] = Br0[3]; Bp[B0 + 4] = Br0[4]; Bp[B0 + 5] = Br0[5]; Bp[B0 + 6] = Br0[6]; Bp[B0 + 7] = Br0[7]; Bp[B1 + 0] = Br1[0]; Bp[B1 + 1] = Br1[1]; Bp[B1 + 2] = Br1[2]; Bp[B1 + 3] = Br1[3]; Bp[B1 + 4] = Br1[4]; Bp[B1 + 5] = Br1[5]; Bp[B1 + 6] = Br1[6]; Bp[B1 + 7] = Br1[7]; Bp[B2 + 0] = Br2[0]; Bp[B2 + 1] = Br2[1]; Bp[B2 + 2] = Br2[2]; Bp[B2 + 3] = Br2[3]; Bp[B2 + 4] = Br2[4]; Bp[B2 + 5] = Br2[5]; Bp[B2 + 6] = Br2[6]; Bp[B2 + 7] = Br2[7]; Bp[B3 + 0] = Br3[0]; Bp[B3 + 1] = Br3[1]; Bp[B3 + 2] = Br3[2]; Bp[B3 + 3] = Br3[3]; Bp[B3 + 4] = Br3[4]; Bp[B3 + 5] = Br3[5]; Bp[B3 + 6] = Br3[6]; Bp[B3 + 7] = Br3[7]; offsetr0 = (offsetr0 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; offsetr1 = (offsetr1 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; offsetr2 = (offsetr2 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; offsetr3 = (offsetr3 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; Br0 = B + offsetr0; Br1 = B + offsetr1; Br2 = B + offsetr2; Br3 = B + offsetr3; Bp += 8; } memcpy(B,Bp_space,sizeof(Bp_space)); } __device__ void bs_cipher(word_t state[BLOCK_SIZE], word_t (* rk)[BLOCK_SIZE]) { int round; bs_transpose(state); bs_addroundkey(state,rk[0]); for (round = 1; round < 10; round++) { bs_apply_sbox(state); bs_shiftmix(state); bs_addroundkey(state,rk[round]); } bs_apply_sbox(state); bs_shiftrows(state); bs_addroundkey(state,rk[10]); bs_transpose_rev(state); } __device__ void aes_ecb_encrypt(uint8_t * outputb, uint8_t * inputb, size_t size, word_t (* rk)[BLOCK_SIZE]) { word_t input_space[BLOCK_SIZE]; memset(outputb,0,size); word_t * state = (word_t *)outputb; while (size > 0) { if (size < BS_BLOCK_SIZE)//128*8 { memset(input_space,0,BS_BLOCK_SIZE); memcpy(input_space, inputb, size); bs_cipher(input_space,rk); memcpy(outputb, input_space, size); size = 0; state += size; } else { memcpy(state,inputb,BS_BLOCK_SIZE); bs_cipher(state,rk); size -= BS_BLOCK_SIZE; state += BS_BLOCK_SIZE; } } } __global__ void AES_Encrypt(aes_block aes_block_array[],word_t (* rk)[BLOCK_SIZE],int block_number){ int global_thread_index = blockDim.x*blockIdx.x + threadIdx.x; int stride=blockDim.x*gridDim.x; for(int real_thread=global_thread_index;real_thread < block_number;real_thread+=stride){ BYTE block[16]; BYTE output[16]; for(int i=0;i<16;i++){ block[i] =aes_block_array[real_thread].block[i]; } aes_ecb_encrypt(output,block,16,rk); for(int i=0;i<16;i++){ aes_block_array[real_thread].block[i]=output[i]; } } } int main(int argc, char* argv[]){ ifstream ifs; ifs.open(argv[1], ios::binary); if(!ifs){ cerr<<""<<endl; exit(1); } ifs.seekg(0, ios::end); int infileLength = ifs.tellg(); infileLength-=1; ifs.seekg(0, ios::beg); cout<<"() "<<infileLength<<endl<<" "<<infileLength/16<<endl; int block_number = infileLength/16 ; int number_of_zero_pending = infileLength%16; aes_block* aes_block_array; BYTE key[16]; //AESkey int keyLen = 0; // int blockLen = 16; ifstream key_fp; key_fp.open(argv[2]); while(key_fp.peek()!=EOF) { key_fp>>key[keyLen]; if(key_fp.eof()) break; keyLen++; } cout<<":"<<keyLen<<endl; word_t rk[11][BLOCK_SIZE]; bs_expand_key(rk, key); if(number_of_zero_pending != 0) aes_block_array = new aes_block [ block_number + 1]; else aes_block_array = new aes_block[ block_number ]; char temp[16]; // FILE* en_fp; // // en_fp = fopen(argv[3], "wb"); for(int i=0; i<block_number; i++){ ifs.read(temp, 16); for(int j=0; j<16; j++){ aes_block_array[i].block[j] = (unsigned char)temp[j]; } } if(number_of_zero_pending != 0) { ifs.read(temp, number_of_zero_pending); for(int j=0; j<16; j++){ aes_block_array[block_number].block[j] = (unsigned char)temp[j]; } for(int j=1; j<=16-number_of_zero_pending; j++) aes_block_array[block_number].block[16-j] = '\0'; block_number++; } hipSetDevice(0); // hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); int num_sm = prop.multiProcessorCount; aes_block *cuda_aes_block_array; word_t (*cuda_key)[BLOCK_SIZE]; // int thrdperblock = block_number/num_sm; // if(block_number%num_sm>0) // thrdperblock++; // //1024 // if(thrdperblock>1024){ // thrdperblock = 1024; // num_sm = block_number/1024; // if(block_number%1024>0){ // num_sm++; // } // } dim3 ThreadperBlock(256); dim3 BlockperGrid(num_sm); hipMalloc(&cuda_aes_block_array, block_number*sizeof(class aes_block)); hipMalloc(&cuda_key,11*BLOCK_SIZE*sizeof(word_t)); hipMemcpy(cuda_aes_block_array, aes_block_array, block_number*sizeof(class aes_block), hipMemcpyHostToDevice); hipMemcpy(cuda_key,rk,11*BLOCK_SIZE*sizeof(word_t),hipMemcpyHostToDevice); printf(": %d\n", block_number); hipEvent_t start1; hipEventCreate(&start1); hipEvent_t stop1; hipEventCreate(&stop1); hipEventRecord(start1, NULL); hipLaunchKernelGGL(( AES_Encrypt) , dim3(BlockperGrid),dim3(ThreadperBlock), 0, 0, cuda_aes_block_array,cuda_key,block_number); hipEventRecord(stop1, NULL); hipEventSynchronize(stop1); float msecTotal1 = 0.0f,total; hipEventElapsedTime(&msecTotal1, start1, stop1); total=msecTotal1/1000; cout<<""<<total<<endl; long r=1<<23; // cout<<""<<block_number/total/r<<" Gbps"<<endl; hipMemcpy(aes_block_array, cuda_aes_block_array, block_number*sizeof(class aes_block), hipMemcpyDeviceToHost); // // // for(int i=0; i<block_number; i++) // f1printBytes(aes_block_array[i].block, blockLen, en_fp); return 0; }
14d9ffe3e24a785fd9e071294449ce9e1204826e.cu
//初始版本,每个线程加密1块数据,线程网格每块最多256个线程,自己电脑运行吞吐量为0.0235297Gbps #include<stdio.h> #include<string.h> #include <stdint.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <cstring> #include <cuda.h> #include <iomanip> #include <time.h> typedef uint64_t word_t; #define BYTE unsigned char #define BLOCK_SIZE 128 #define KEY_SCHEDULE_SIZE 176 #define WORD_SIZE 64 #define BS_BLOCK_SIZE (BLOCK_SIZE * WORD_SIZE / 8) #define WORDS_PER_BLOCK (BLOCK_SIZE / WORD_SIZE) #define ONE 1ULL #define MUL_SHIFT 6 #define bs2le(x) (x) #define bs2be(x) (x) using namespace std; class aes_block { public: BYTE block[16]; }; void printBytes(uint8_t b[],int len){ for(int i=0; i<len; i++) printf("%2x ",b[i]); printf("\n"); } __device__ void printByte(uint8_t b[],int len){ for(int i=0; i<len; i++) printf("%2x ",b[i]); printf("\n"); } void f1printBytes(BYTE b[], int len, FILE* fp) { int i; for (i=0; i<len; i++) fprintf(fp, "%02x ", b[i]); fprintf(fp, "\n"); } static const uint8_t sbox[256] = { //0 1 2 3 4 5 6 7 8 9 A B C D E F 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; static void rotate(unsigned char *in) { unsigned char a,c; a = in[0]; for(c=0;c<3;c++) in[c] = in[c + 1]; in[3] = a; return; } /* Calculate the rcon used in key expansion */ static unsigned char rcon(unsigned char in) { unsigned char c=1; if(in == 0) return 0; while(in != 1) { unsigned char b; b = c & 0x80; c <<= 1; if(b == 0x80) { c ^= 0x1b; } in--; } return c; } static void schedule_core(unsigned char *in, unsigned char i) { char a; /* Rotate the input 8 bits to the left */ rotate(in); /* Apply Rijndael's s-box on all 4 bytes */ for(a = 0; a < 4; a++) in[a] = sbox[in[a]]; /* On just the first byte, add 2^i to the byte */ in[0] ^= rcon(i); } void expand_key(unsigned char *in) { unsigned char t[4]; /* c is 16 because the first sub-key is the user-supplied key */ unsigned char c = 16; unsigned char i = 1; unsigned char a; /* We need 11 sets of sixteen bytes each for 128-bit mode */ while(c < 176) { /* Copy the temporary variable over from the last 4-byte * block */ for(a = 0; a < 4; a++) t[a] = in[a + c - 4]; /* Every four blocks (of four bytes), * do a complex calculation */ if(c % 16 == 0) { schedule_core(t,i); i++; } for(a = 0; a < 4; a++) { in[c] = in[c - 16] ^ t[a]; c++; } } } void key_transpose_dst(word_t * transpose, word_t * blocks) { int i,k; word_t w; for(k=0; k < WORD_SIZE; k++) { int bitpos = ONE << k; for (i=0; i < WORDS_PER_BLOCK; i++) { w = blocks[k * WORDS_PER_BLOCK + i]; int offset = i << MUL_SHIFT; transpose[(offset)+ 0 ] |= (w & (ONE << 0 )) ? (bitpos) : 0; transpose[(offset)+ 1 ] |= (w & (ONE << 1 )) ? (bitpos) : 0; transpose[(offset)+ 2 ] |= (w & (ONE << 2 )) ? (bitpos) : 0; transpose[(offset)+ 3 ] |= (w & (ONE << 3 )) ? (bitpos) : 0; transpose[(offset)+ 4 ] |= (w & (ONE << 4 )) ? (bitpos) : 0; transpose[(offset)+ 5 ] |= (w & (ONE << 5 )) ? (bitpos) : 0; transpose[(offset)+ 6 ] |= (w & (ONE << 6 )) ? (bitpos) : 0; transpose[(offset)+ 7 ] |= (w & (ONE << 7 )) ? (bitpos) : 0; transpose[(offset)+ 8 ] |= (w & (ONE << 8 )) ? (bitpos) : 0; transpose[(offset)+ 9 ] |= (w & (ONE << 9 )) ? (bitpos) : 0; transpose[(offset)+ 10] |= (w & (ONE << 10)) ? (bitpos) : 0; transpose[(offset)+ 11] |= (w & (ONE << 11)) ? (bitpos) : 0; transpose[(offset)+ 12] |= (w & (ONE << 12)) ? (bitpos) : 0; transpose[(offset)+ 13] |= (w & (ONE << 13)) ? (bitpos) : 0; transpose[(offset)+ 14] |= (w & (ONE << 14)) ? (bitpos) : 0; transpose[(offset)+ 15] |= (w & (ONE << 15)) ? (bitpos) : 0; transpose[(offset)+ 16] |= (w & (ONE << 16)) ? (bitpos) : 0; transpose[(offset)+ 17] |= (w & (ONE << 17)) ? (bitpos) : 0; transpose[(offset)+ 18] |= (w & (ONE << 18)) ? (bitpos) : 0; transpose[(offset)+ 19] |= (w & (ONE << 19)) ? (bitpos) : 0; transpose[(offset)+ 20] |= (w & (ONE << 20)) ? (bitpos) : 0; transpose[(offset)+ 21] |= (w & (ONE << 21)) ? (bitpos) : 0; transpose[(offset)+ 22] |= (w & (ONE << 22)) ? (bitpos) : 0; transpose[(offset)+ 23] |= (w & (ONE << 23)) ? (bitpos) : 0; transpose[(offset)+ 24] |= (w & (ONE << 24)) ? (bitpos) : 0; transpose[(offset)+ 25] |= (w & (ONE << 25)) ? (bitpos) : 0; transpose[(offset)+ 26] |= (w & (ONE << 26)) ? (bitpos) : 0; transpose[(offset)+ 27] |= (w & (ONE << 27)) ? (bitpos) : 0; transpose[(offset)+ 28] |= (w & (ONE << 28)) ? (bitpos) : 0; transpose[(offset)+ 29] |= (w & (ONE << 29)) ? (bitpos) : 0; transpose[(offset)+ 30] |= (w & (ONE << 30)) ? (bitpos) : 0; transpose[(offset)+ 31] |= (w & (ONE << 31)) ? (bitpos) : 0; transpose[(offset)+ 32] |= (w & (ONE << 32)) ? (bitpos) : 0; transpose[(offset)+ 33] |= (w & (ONE << 33)) ? (bitpos) : 0; transpose[(offset)+ 34] |= (w & (ONE << 34)) ? (bitpos) : 0; transpose[(offset)+ 35] |= (w & (ONE << 35)) ? (bitpos) : 0; transpose[(offset)+ 36] |= (w & (ONE << 36)) ? (bitpos) : 0; transpose[(offset)+ 37] |= (w & (ONE << 37)) ? (bitpos) : 0; transpose[(offset)+ 38] |= (w & (ONE << 38)) ? (bitpos) : 0; transpose[(offset)+ 39] |= (w & (ONE << 39)) ? (bitpos) : 0; transpose[(offset)+ 40] |= (w & (ONE << 40)) ? (bitpos) : 0; transpose[(offset)+ 41] |= (w & (ONE << 41)) ? (bitpos) : 0; transpose[(offset)+ 42] |= (w & (ONE << 42)) ? (bitpos) : 0; transpose[(offset)+ 43] |= (w & (ONE << 43)) ? (bitpos) : 0; transpose[(offset)+ 44] |= (w & (ONE << 44)) ? (bitpos) : 0; transpose[(offset)+ 45] |= (w & (ONE << 45)) ? (bitpos) : 0; transpose[(offset)+ 46] |= (w & (ONE << 46)) ? (bitpos) : 0; transpose[(offset)+ 47] |= (w & (ONE << 47)) ? (bitpos) : 0; transpose[(offset)+ 48] |= (w & (ONE << 48)) ? (bitpos) : 0; transpose[(offset)+ 49] |= (w & (ONE << 49)) ? (bitpos) : 0; transpose[(offset)+ 50] |= (w & (ONE << 50)) ? (bitpos) : 0; transpose[(offset)+ 51] |= (w & (ONE << 51)) ? (bitpos) : 0; transpose[(offset)+ 52] |= (w & (ONE << 52)) ? (bitpos) : 0; transpose[(offset)+ 53] |= (w & (ONE << 53)) ? (bitpos) : 0; transpose[(offset)+ 54] |= (w & (ONE << 54)) ? (bitpos) : 0; transpose[(offset)+ 55] |= (w & (ONE << 55)) ? (bitpos) : 0; transpose[(offset)+ 56] |= (w & (ONE << 56)) ? (bitpos) : 0; transpose[(offset)+ 57] |= (w & (ONE << 57)) ? (bitpos) : 0; transpose[(offset)+ 58] |= (w & (ONE << 58)) ? (bitpos) : 0; transpose[(offset)+ 59] |= (w & (ONE << 59)) ? (bitpos) : 0; transpose[(offset)+ 60] |= (w & (ONE << 60)) ? (bitpos) : 0; transpose[(offset)+ 61] |= (w & (ONE << 61)) ? (bitpos) : 0; transpose[(offset)+ 62] |= (w & (ONE << 62)) ? (bitpos) : 0; transpose[(offset)+ 63] |= (w & (ONE << 63)) ? (bitpos) : 0; } } } __device__ void bs_transpose_dst(word_t * transpose, word_t * blocks) { int i,k; word_t w; for(k=0; k < WORD_SIZE; k++) { int bitpos = ONE << k; for (i=0; i < WORDS_PER_BLOCK; i++) { w = bs2le(blocks[k * WORDS_PER_BLOCK + i]); int offset = i << MUL_SHIFT; transpose[(offset)+ 0 ] |= (w & (ONE << 0 )) ? (bitpos) : 0; transpose[(offset)+ 1 ] |= (w & (ONE << 1 )) ? (bitpos) : 0; transpose[(offset)+ 2 ] |= (w & (ONE << 2 )) ? (bitpos) : 0; transpose[(offset)+ 3 ] |= (w & (ONE << 3 )) ? (bitpos) : 0; transpose[(offset)+ 4 ] |= (w & (ONE << 4 )) ? (bitpos) : 0; transpose[(offset)+ 5 ] |= (w & (ONE << 5 )) ? (bitpos) : 0; transpose[(offset)+ 6 ] |= (w & (ONE << 6 )) ? (bitpos) : 0; transpose[(offset)+ 7 ] |= (w & (ONE << 7 )) ? (bitpos) : 0; transpose[(offset)+ 8 ] |= (w & (ONE << 8 )) ? (bitpos) : 0; transpose[(offset)+ 9 ] |= (w & (ONE << 9 )) ? (bitpos) : 0; transpose[(offset)+ 10] |= (w & (ONE << 10)) ? (bitpos) : 0; transpose[(offset)+ 11] |= (w & (ONE << 11)) ? (bitpos) : 0; transpose[(offset)+ 12] |= (w & (ONE << 12)) ? (bitpos) : 0; transpose[(offset)+ 13] |= (w & (ONE << 13)) ? (bitpos) : 0; transpose[(offset)+ 14] |= (w & (ONE << 14)) ? (bitpos) : 0; transpose[(offset)+ 15] |= (w & (ONE << 15)) ? (bitpos) : 0; transpose[(offset)+ 16] |= (w & (ONE << 16)) ? (bitpos) : 0; transpose[(offset)+ 17] |= (w & (ONE << 17)) ? (bitpos) : 0; transpose[(offset)+ 18] |= (w & (ONE << 18)) ? (bitpos) : 0; transpose[(offset)+ 19] |= (w & (ONE << 19)) ? (bitpos) : 0; transpose[(offset)+ 20] |= (w & (ONE << 20)) ? (bitpos) : 0; transpose[(offset)+ 21] |= (w & (ONE << 21)) ? (bitpos) : 0; transpose[(offset)+ 22] |= (w & (ONE << 22)) ? (bitpos) : 0; transpose[(offset)+ 23] |= (w & (ONE << 23)) ? (bitpos) : 0; transpose[(offset)+ 24] |= (w & (ONE << 24)) ? (bitpos) : 0; transpose[(offset)+ 25] |= (w & (ONE << 25)) ? (bitpos) : 0; transpose[(offset)+ 26] |= (w & (ONE << 26)) ? (bitpos) : 0; transpose[(offset)+ 27] |= (w & (ONE << 27)) ? (bitpos) : 0; transpose[(offset)+ 28] |= (w & (ONE << 28)) ? (bitpos) : 0; transpose[(offset)+ 29] |= (w & (ONE << 29)) ? (bitpos) : 0; transpose[(offset)+ 30] |= (w & (ONE << 30)) ? (bitpos) : 0; transpose[(offset)+ 31] |= (w & (ONE << 31)) ? (bitpos) : 0; transpose[(offset)+ 32] |= (w & (ONE << 32)) ? (bitpos) : 0; transpose[(offset)+ 33] |= (w & (ONE << 33)) ? (bitpos) : 0; transpose[(offset)+ 34] |= (w & (ONE << 34)) ? (bitpos) : 0; transpose[(offset)+ 35] |= (w & (ONE << 35)) ? (bitpos) : 0; transpose[(offset)+ 36] |= (w & (ONE << 36)) ? (bitpos) : 0; transpose[(offset)+ 37] |= (w & (ONE << 37)) ? (bitpos) : 0; transpose[(offset)+ 38] |= (w & (ONE << 38)) ? (bitpos) : 0; transpose[(offset)+ 39] |= (w & (ONE << 39)) ? (bitpos) : 0; transpose[(offset)+ 40] |= (w & (ONE << 40)) ? (bitpos) : 0; transpose[(offset)+ 41] |= (w & (ONE << 41)) ? (bitpos) : 0; transpose[(offset)+ 42] |= (w & (ONE << 42)) ? (bitpos) : 0; transpose[(offset)+ 43] |= (w & (ONE << 43)) ? (bitpos) : 0; transpose[(offset)+ 44] |= (w & (ONE << 44)) ? (bitpos) : 0; transpose[(offset)+ 45] |= (w & (ONE << 45)) ? (bitpos) : 0; transpose[(offset)+ 46] |= (w & (ONE << 46)) ? (bitpos) : 0; transpose[(offset)+ 47] |= (w & (ONE << 47)) ? (bitpos) : 0; transpose[(offset)+ 48] |= (w & (ONE << 48)) ? (bitpos) : 0; transpose[(offset)+ 49] |= (w & (ONE << 49)) ? (bitpos) : 0; transpose[(offset)+ 50] |= (w & (ONE << 50)) ? (bitpos) : 0; transpose[(offset)+ 51] |= (w & (ONE << 51)) ? (bitpos) : 0; transpose[(offset)+ 52] |= (w & (ONE << 52)) ? (bitpos) : 0; transpose[(offset)+ 53] |= (w & (ONE << 53)) ? (bitpos) : 0; transpose[(offset)+ 54] |= (w & (ONE << 54)) ? (bitpos) : 0; transpose[(offset)+ 55] |= (w & (ONE << 55)) ? (bitpos) : 0; transpose[(offset)+ 56] |= (w & (ONE << 56)) ? (bitpos) : 0; transpose[(offset)+ 57] |= (w & (ONE << 57)) ? (bitpos) : 0; transpose[(offset)+ 58] |= (w & (ONE << 58)) ? (bitpos) : 0; transpose[(offset)+ 59] |= (w & (ONE << 59)) ? (bitpos) : 0; transpose[(offset)+ 60] |= (w & (ONE << 60)) ? (bitpos) : 0; transpose[(offset)+ 61] |= (w & (ONE << 61)) ? (bitpos) : 0; transpose[(offset)+ 62] |= (w & (ONE << 62)) ? (bitpos) : 0; transpose[(offset)+ 63] |= (w & (ONE << 63)) ? (bitpos) : 0; } } } __device__ void bs_transpose_rev(word_t * blocks) { int k; word_t w; word_t transpose[BLOCK_SIZE]; memset(transpose, 0, sizeof(transpose)); for(k=0; k < BLOCK_SIZE; k++) { w = blocks[k]; word_t bitpos = ONE << (k % WORD_SIZE); word_t offset = k / WORD_SIZE; transpose[0 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 0 )) ? bitpos : 0; transpose[1 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 1 )) ? bitpos : 0; transpose[2 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 2 )) ? bitpos : 0; transpose[3 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 3 )) ? bitpos : 0; transpose[4 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 4 )) ? bitpos : 0; transpose[5 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 5 )) ? bitpos : 0; transpose[6 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 6 )) ? bitpos : 0; transpose[7 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 7 )) ? bitpos : 0; transpose[8 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 8 )) ? bitpos : 0; transpose[9 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 9 )) ? bitpos : 0; transpose[10 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 10)) ? bitpos : 0; transpose[11 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 11)) ? bitpos : 0; transpose[12 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 12)) ? bitpos : 0; transpose[13 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 13)) ? bitpos : 0; transpose[14 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 14)) ? bitpos : 0; transpose[15 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 15)) ? bitpos : 0; transpose[16 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 16)) ? bitpos : 0; transpose[17 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 17)) ? bitpos : 0; transpose[18 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 18)) ? bitpos : 0; transpose[19 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 19)) ? bitpos : 0; transpose[20 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 20)) ? bitpos : 0; transpose[21 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 21)) ? bitpos : 0; transpose[22 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 22)) ? bitpos : 0; transpose[23 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 23)) ? bitpos : 0; transpose[24 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 24)) ? bitpos : 0; transpose[25 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 25)) ? bitpos : 0; transpose[26 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 26)) ? bitpos : 0; transpose[27 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 27)) ? bitpos : 0; transpose[28 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 28)) ? bitpos : 0; transpose[29 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 29)) ? bitpos : 0; transpose[30 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 30)) ? bitpos : 0; transpose[31 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 31)) ? bitpos : 0; transpose[32 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 32)) ? bitpos : 0; transpose[33 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 33)) ? bitpos : 0; transpose[34 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 34)) ? bitpos : 0; transpose[35 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 35)) ? bitpos : 0; transpose[36 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 36)) ? bitpos : 0; transpose[37 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 37)) ? bitpos : 0; transpose[38 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 38)) ? bitpos : 0; transpose[39 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 39)) ? bitpos : 0; transpose[40 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 40)) ? bitpos : 0; transpose[41 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 41)) ? bitpos : 0; transpose[42 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 42)) ? bitpos : 0; transpose[43 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 43)) ? bitpos : 0; transpose[44 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 44)) ? bitpos : 0; transpose[45 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 45)) ? bitpos : 0; transpose[46 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 46)) ? bitpos : 0; transpose[47 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 47)) ? bitpos : 0; transpose[48 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 48)) ? bitpos : 0; transpose[49 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 49)) ? bitpos : 0; transpose[50 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 50)) ? bitpos : 0; transpose[51 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 51)) ? bitpos : 0; transpose[52 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 52)) ? bitpos : 0; transpose[53 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 53)) ? bitpos : 0; transpose[54 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 54)) ? bitpos : 0; transpose[55 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 55)) ? bitpos : 0; transpose[56 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 56)) ? bitpos : 0; transpose[57 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 57)) ? bitpos : 0; transpose[58 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 58)) ? bitpos : 0; transpose[59 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 59)) ? bitpos : 0; transpose[60 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 60)) ? bitpos : 0; transpose[61 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 61)) ? bitpos : 0; transpose[62 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 62)) ? bitpos : 0; transpose[63 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 63)) ? bitpos : 0; } memcpy(blocks,transpose,sizeof(transpose)); } void key_transpose(word_t * blocks) { word_t transpose[BLOCK_SIZE]; memset(transpose, 0, sizeof(transpose)); key_transpose_dst(transpose,blocks); memcpy(blocks,transpose,sizeof(transpose)); } __device__ void bs_transpose(word_t * blocks) { word_t transpose[BLOCK_SIZE]; memset(transpose, 0, sizeof(transpose)); bs_transpose_dst(transpose,blocks); memcpy(blocks,transpose,sizeof(transpose)); } void bs_expand_key(word_t (* rk)[BLOCK_SIZE], uint8_t * _key) { // TODO integrate this better uint8_t key[KEY_SCHEDULE_SIZE]; memmove(key,_key,BLOCK_SIZE/8); expand_key(key); int i, j = 0, k, l; for (i = 0; i < KEY_SCHEDULE_SIZE; i += (BLOCK_SIZE/8)) { memmove(rk[j], key + i, BLOCK_SIZE / 8); for (k = WORDS_PER_BLOCK; k < 128; k += WORDS_PER_BLOCK) { for (l = 0; l < WORDS_PER_BLOCK; l++) { rk[j][k + l] = rk[j][l]; } } key_transpose(rk[j]); j++; } } __device__ void bs_addroundkey(word_t * B, word_t * rk) { int i; for (i = 0; i < BLOCK_SIZE; i++) B[i] ^= rk[i]; } __device__ void bs_sbox(word_t U[8]) { word_t S[8]; word_t T1,T2,T3,T4,T5,T6,T7,T8, T9,T10,T11,T12,T13,T14,T15,T16, T17,T18,T19,T20,T21,T22,T23,T24, T25, T26, T27; word_t M1,M2,M3,M4,M5,M6,M7,M8, M9,M10,M11,M12,M13,M14,M15, M16,M17,M18,M19,M20,M21,M22, M23,M24,M25,M26,M27,M28,M29, M30,M31,M32,M33,M34,M35,M36, M37,M38,M39,M40,M41,M42,M43, M44,M45,M46,M47,M48,M49,M50, M51,M52,M53,M54,M55,M56,M57, M58,M59,M60,M61,M62,M63; word_t L0,L1,L2,L3,L4,L5,L6,L7,L8, L9,L10,L11,L12,L13,L14, L15,L16,L17,L18,L19,L20, L21,L22,L23,L24,L25,L26, L27,L28,L29; T1 = U[7] ^ U[4]; T2 = U[7] ^ U[2]; T3 = U[7] ^ U[1]; T4 = U[4] ^ U[2]; T5 = U[3] ^ U[1]; T6 = T1 ^ T5; T7 = U[6] ^ U[5]; T8 = U[0] ^ T6; T9 = U[0] ^ T7; T10 = T6 ^ T7; T11 = U[6] ^ U[2]; T12 = U[5] ^ U[2]; T13 = T3 ^ T4; T14 = T6 ^ T11; T15 = T5 ^ T11; T16 = T5 ^ T12; T17 = T9 ^ T16; T18 = U[4] ^ U[0]; T19 = T7 ^ T18; T20 = T1 ^ T19; T21 = U[1] ^ U[0]; T22 = T7 ^ T21; T23 = T2 ^ T22; T24 = T2 ^ T10; T25 = T20 ^ T17; T26 = T3 ^ T16; T27 = T1 ^ T12; M1 = T13 & T6; M2 = T23 & T8; M3 = T14 ^ M1; M4 = T19 & U[0]; M5 = M4 ^ M1; M6 = T3 & T16; M7 = T22 & T9; M8 = T26 ^ M6; M9 = T20 & T17; M10 = M9 ^ M6; M11 = T1 & T15; M12 = T4 & T27; M13 = M12 ^ M11; M14 = T2 & T10; M15 = M14 ^ M11; M16 = M3 ^ M2; M17 = M5 ^ T24; M18 = M8 ^ M7; M19 = M10 ^ M15; M20 = M16 ^ M13; M21 = M17 ^ M15; M22 = M18 ^ M13; M23 = M19 ^ T25; M24 = M22 ^ M23; M25 = M22 & M20; M26 = M21 ^ M25; M27 = M20 ^ M21; M28 = M23 ^ M25; M29 = M28 & M27; M30 = M26 & M24; M31 = M20 & M23; M32 = M27 & M31; M33 = M27 ^ M25; M34 = M21 & M22; M35 = M24 & M34; M36 = M24 ^ M25; M37 = M21 ^ M29; M38 = M32 ^ M33; M39 = M23 ^ M30; M40 = M35 ^ M36; M41 = M38 ^ M40; M42 = M37 ^ M39; M43 = M37 ^ M38; M44 = M39 ^ M40; M45 = M42 ^ M41; M46 = M44 & T6; M47 = M40 & T8; M48 = M39 & U[0]; M49 = M43 & T16; M50 = M38 & T9; M51 = M37 & T17; M52 = M42 & T15; M53 = M45 & T27; M54 = M41 & T10; M55 = M44 & T13; M56 = M40 & T23; M57 = M39 & T19; M58 = M43 & T3; M59 = M38 & T22; M60 = M37 & T20; M61 = M42 & T1; M62 = M45 & T4; M63 = M41 & T2; L0 = M61 ^ M62; L1 = M50 ^ M56; L2 = M46 ^ M48; L3 = M47 ^ M55; L4 = M54 ^ M58; L5 = M49 ^ M61; L6 = M62 ^ L5; L7 = M46 ^ L3; L8 = M51 ^ M59; L9 = M52 ^ M53; L10 = M53 ^ L4; L11 = M60 ^ L2; L12 = M48 ^ M51; L13 = M50 ^ L0; L14 = M52 ^ M61; L15 = M55 ^ L1; L16 = M56 ^ L0; L17 = M57 ^ L1; L18 = M58 ^ L8; L19 = M63 ^ L4; L20 = L0 ^ L1; L21 = L1 ^ L7; L22 = L3 ^ L12; L23 = L18 ^ L2; L24 = L15 ^ L9; L25 = L6 ^ L10; L26 = L7 ^ L9; L27 = L8 ^ L10; L28 = L11 ^ L14; L29 = L11 ^ L17; S[7] = L6 ^ L24; S[6] = ~(L16 ^ L26); S[5] = ~(L19 ^ L28); S[4] = L6 ^ L21; S[3] = L20 ^ L22; S[2] = L25 ^ L29; S[1] = ~(L13 ^ L27); S[0] = ~(L6 ^ L23); memcpy(U,S,sizeof(S)); } __device__ void bs_apply_sbox(word_t * input) { int i; for(i=0; i < BLOCK_SIZE; i+=8) { bs_sbox(input+i); } } #define A0 0 #define A1 8 #define A2 16 #define A3 24 #define R0 0 #define R1 8 #define R2 16 #define R3 24 #define B0 0 #define B1 32 #define B2 64 #define B3 96 // Does shift rows and mix columns in same step __device__ void bs_shiftmix(word_t * B) { word_t Bp_space[BLOCK_SIZE]; word_t * Bp = Bp_space; word_t * Br0 = B + 0; word_t * Br1 = B + 32; word_t * Br2 = B + 64; word_t * Br3 = B + 96; uint8_t offsetr0 = 0; uint8_t offsetr1 = 32; uint8_t offsetr2 = 64; uint8_t offsetr3 = 96; Br0 = B + offsetr0; Br1 = B + offsetr1; Br2 = B + offsetr2; Br3 = B + offsetr3; int i; for (i = 0; i < 4; i++) { // B0 // 2*A0 2*A1 A1 A2 A3 word_t of =Br0[R0+7]^ Br1[R1+7]; Bp[A0+0] = Br1[R1+0] ^ Br2[R2+0] ^ Br3[R3+0] ^ of; Bp[A0+1] = Br0[R0+0] ^ Br1[R1+0] ^ Br1[R1+1] ^ Br2[R2+1] ^ Br3[R3+1] ^ of; Bp[A0+2] = Br0[R0+1] ^ Br1[R1+1] ^ Br1[R1+2] ^ Br2[R2+2] ^ Br3[R3+2]; Bp[A0+3] = Br0[R0+2] ^ Br1[R1+2] ^ Br1[R1+3] ^ Br2[R2+3] ^ Br3[R3+3] ^ of; Bp[A0+4] = Br0[R0+3] ^ Br1[R1+3] ^ Br1[R1+4] ^ Br2[R2+4] ^ Br3[R3+4] ^ of; Bp[A0+5] = Br0[R0+4] ^ Br1[R1+4] ^ Br1[R1+5] ^ Br2[R2+5] ^ Br3[R3+5]; Bp[A0+6] = Br0[R0+5] ^ Br1[R1+5] ^ Br1[R1+6] ^ Br2[R2+6] ^ Br3[R3+6]; Bp[A0+7] = Br0[R0+6] ^ Br1[R1+6] ^ Br1[R1+7] ^ Br2[R2+7] ^ Br3[R3+7]; // A0 2*A1 2*A2 A2 A3 of = Br1[R1+7] ^ Br2[R2+7]; Bp[A1+0] = Br0[R0+0] ^ Br2[R2+0] ^ Br3[R3+0] ^ of; Bp[A1+1] = Br0[R0+1] ^ Br1[R1+0] ^ Br2[R2+0] ^ Br2[R2+1] ^ Br3[R3+1] ^ of; Bp[A1+2] = Br0[R0+2] ^ Br1[R1+1] ^ Br2[R2+1] ^ Br2[R2+2] ^ Br3[R3+2]; Bp[A1+3] = Br0[R0+3] ^ Br1[R1+2] ^ Br2[R2+2] ^ Br2[R2+3] ^ Br3[R3+3] ^ of; Bp[A1+4] = Br0[R0+4] ^ Br1[R1+3] ^ Br2[R2+3] ^ Br2[R2+4] ^ Br3[R3+4] ^ of; Bp[A1+5] = Br0[R0+5] ^ Br1[R1+4] ^ Br2[R2+4] ^ Br2[R2+5] ^ Br3[R3+5]; Bp[A1+6] = Br0[R0+6] ^ Br1[R1+5] ^ Br2[R2+5] ^ Br2[R2+6] ^ Br3[R3+6]; Bp[A1+7] = Br0[R0+7] ^ Br1[R1+6] ^ Br2[R2+6] ^ Br2[R2+7] ^ Br3[R3+7]; // A0 A1 2*A2 2*A3 A3 of = Br2[R2+7] ^ Br3[R3+7]; Bp[A2+0] = Br0[R0+0] ^ Br1[R1+0] ^ Br3[R3+0] ^ of; Bp[A2+1] = Br0[R0+1] ^ Br1[R1+1] ^ Br2[R2+0] ^ Br3[R3+0] ^ Br3[R3+1] ^ of; Bp[A2+2] = Br0[R0+2] ^ Br1[R1+2] ^ Br2[R2+1] ^ Br3[R3+1] ^ Br3[R3+2]; Bp[A2+3] = Br0[R0+3] ^ Br1[R1+3] ^ Br2[R2+2] ^ Br3[R3+2] ^ Br3[R3+3] ^ of; Bp[A2+4] = Br0[R0+4] ^ Br1[R1+4] ^ Br2[R2+3] ^ Br3[R3+3] ^ Br3[R3+4] ^ of; Bp[A2+5] = Br0[R0+5] ^ Br1[R1+5] ^ Br2[R2+4] ^ Br3[R3+4] ^ Br3[R3+5]; Bp[A2+6] = Br0[R0+6] ^ Br1[R1+6] ^ Br2[R2+5] ^ Br3[R3+5] ^ Br3[R3+6]; Bp[A2+7] = Br0[R0+7] ^ Br1[R1+7] ^ Br2[R2+6] ^ Br3[R3+6] ^ Br3[R3+7]; // A0 2*A0 A1 A2 2*A3 of = Br0[R0+7] ^ Br3[R3+7]; Bp[A3+0] = Br0[R0+0] ^ Br1[R1+0] ^ Br2[R2+0] ^ of; Bp[A3+1] = Br0[R0+1] ^ Br0[R0+0] ^ Br1[R1+1] ^ Br2[R2+1] ^ Br3[R3+0] ^ of; Bp[A3+2] = Br0[R0+2] ^ Br0[R0+1] ^ Br1[R1+2] ^ Br2[R2+2] ^ Br3[R3+1]; Bp[A3+3] = Br0[R0+3] ^ Br0[R0+2] ^ Br1[R1+3] ^ Br2[R2+3] ^ Br3[R3+2] ^ of; Bp[A3+4] = Br0[R0+4] ^ Br0[R0+3] ^ Br1[R1+4] ^ Br2[R2+4] ^ Br3[R3+3] ^ of; Bp[A3+5] = Br0[R0+5] ^ Br0[R0+4] ^ Br1[R1+5] ^ Br2[R2+5] ^ Br3[R3+4]; Bp[A3+6] = Br0[R0+6] ^ Br0[R0+5] ^ Br1[R1+6] ^ Br2[R2+6] ^ Br3[R3+5]; Bp[A3+7] = Br0[R0+7] ^ Br0[R0+6] ^ Br1[R1+7] ^ Br2[R2+7] ^ Br3[R3+6]; Bp += BLOCK_SIZE/4; offsetr0 = (offsetr0 + BLOCK_SIZE/4) & 0x7f; offsetr1 = (offsetr1 + BLOCK_SIZE/4) & 0x7f; offsetr2 = (offsetr2 + BLOCK_SIZE/4) & 0x7f; offsetr3 = (offsetr3 + BLOCK_SIZE/4) & 0x7f; Br0 = B + offsetr0; Br1 = B + offsetr1; Br2 = B + offsetr2; Br3 = B + offsetr3; } memcpy(B,Bp_space,sizeof(Bp_space)); } __device__ void bs_shiftrows(word_t * B) { word_t Bp_space[BLOCK_SIZE]; word_t * Bp = Bp_space; word_t * Br0 = B + 0; word_t * Br1 = B + 32; word_t * Br2 = B + 64; word_t * Br3 = B + 96; uint8_t offsetr0 = 0; uint8_t offsetr1 = 32; uint8_t offsetr2 = 64; uint8_t offsetr3 = 96; int i; for(i=0; i<4; i++) { Bp[B0 + 0] = Br0[0]; Bp[B0 + 1] = Br0[1]; Bp[B0 + 2] = Br0[2]; Bp[B0 + 3] = Br0[3]; Bp[B0 + 4] = Br0[4]; Bp[B0 + 5] = Br0[5]; Bp[B0 + 6] = Br0[6]; Bp[B0 + 7] = Br0[7]; Bp[B1 + 0] = Br1[0]; Bp[B1 + 1] = Br1[1]; Bp[B1 + 2] = Br1[2]; Bp[B1 + 3] = Br1[3]; Bp[B1 + 4] = Br1[4]; Bp[B1 + 5] = Br1[5]; Bp[B1 + 6] = Br1[6]; Bp[B1 + 7] = Br1[7]; Bp[B2 + 0] = Br2[0]; Bp[B2 + 1] = Br2[1]; Bp[B2 + 2] = Br2[2]; Bp[B2 + 3] = Br2[3]; Bp[B2 + 4] = Br2[4]; Bp[B2 + 5] = Br2[5]; Bp[B2 + 6] = Br2[6]; Bp[B2 + 7] = Br2[7]; Bp[B3 + 0] = Br3[0]; Bp[B3 + 1] = Br3[1]; Bp[B3 + 2] = Br3[2]; Bp[B3 + 3] = Br3[3]; Bp[B3 + 4] = Br3[4]; Bp[B3 + 5] = Br3[5]; Bp[B3 + 6] = Br3[6]; Bp[B3 + 7] = Br3[7]; offsetr0 = (offsetr0 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; offsetr1 = (offsetr1 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; offsetr2 = (offsetr2 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; offsetr3 = (offsetr3 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; Br0 = B + offsetr0; Br1 = B + offsetr1; Br2 = B + offsetr2; Br3 = B + offsetr3; Bp += 8; } memcpy(B,Bp_space,sizeof(Bp_space)); } __device__ void bs_cipher(word_t state[BLOCK_SIZE], word_t (* rk)[BLOCK_SIZE]) { int round; bs_transpose(state); bs_addroundkey(state,rk[0]); for (round = 1; round < 10; round++) { bs_apply_sbox(state); bs_shiftmix(state); bs_addroundkey(state,rk[round]); } bs_apply_sbox(state); bs_shiftrows(state); bs_addroundkey(state,rk[10]); bs_transpose_rev(state); } __device__ void aes_ecb_encrypt(uint8_t * outputb, uint8_t * inputb, size_t size, word_t (* rk)[BLOCK_SIZE]) { word_t input_space[BLOCK_SIZE]; memset(outputb,0,size); word_t * state = (word_t *)outputb; while (size > 0) { if (size < BS_BLOCK_SIZE)//128*8 { memset(input_space,0,BS_BLOCK_SIZE); memcpy(input_space, inputb, size); bs_cipher(input_space,rk); memcpy(outputb, input_space, size); size = 0; state += size; } else { memcpy(state,inputb,BS_BLOCK_SIZE); bs_cipher(state,rk); size -= BS_BLOCK_SIZE; state += BS_BLOCK_SIZE; } } } __global__ void AES_Encrypt(aes_block aes_block_array[],word_t (* rk)[BLOCK_SIZE],int block_number){ int global_thread_index = blockDim.x*blockIdx.x + threadIdx.x; int stride=blockDim.x*gridDim.x; for(int real_thread=global_thread_index;real_thread < block_number;real_thread+=stride){ BYTE block[16]; BYTE output[16]; for(int i=0;i<16;i++){ block[i] =aes_block_array[real_thread].block[i]; } aes_ecb_encrypt(output,block,16,rk); for(int i=0;i<16;i++){ aes_block_array[real_thread].block[i]=output[i]; } } } int main(int argc, char* argv[]){ ifstream ifs; ifs.open(argv[1], ios::binary); if(!ifs){ cerr<<"错误:无法打开加密文件"<<endl; exit(1); } ifs.seekg(0, ios::end); int infileLength = ifs.tellg(); infileLength-=1; ifs.seekg(0, ios::beg); cout<<"输入文件长度为(字节): "<<infileLength<<endl<<"文件块个数为: "<<infileLength/16<<endl; int block_number = infileLength/16 ; int number_of_zero_pending = infileLength%16; aes_block* aes_block_array; BYTE key[16]; //定义AES中需要的最大的key int keyLen = 0; // int blockLen = 16; ifstream key_fp; key_fp.open(argv[2]); while(key_fp.peek()!=EOF) { key_fp>>key[keyLen]; if(key_fp.eof()) break; keyLen++; } cout<<"密码长度为(字节):"<<keyLen<<endl; word_t rk[11][BLOCK_SIZE]; bs_expand_key(rk, key); if(number_of_zero_pending != 0) aes_block_array = new aes_block [ block_number + 1]; else aes_block_array = new aes_block[ block_number ]; char temp[16]; // FILE* en_fp; //定义加密文件 // en_fp = fopen(argv[3], "wb"); for(int i=0; i<block_number; i++){ ifs.read(temp, 16); for(int j=0; j<16; j++){ aes_block_array[i].block[j] = (unsigned char)temp[j]; } } if(number_of_zero_pending != 0) { ifs.read(temp, number_of_zero_pending); for(int j=0; j<16; j++){ aes_block_array[block_number].block[j] = (unsigned char)temp[j]; } for(int j=1; j<=16-number_of_zero_pending; j++) aes_block_array[block_number].block[16-j] = '\0'; block_number++; } cudaSetDevice(0); //选择设备 cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); int num_sm = prop.multiProcessorCount; aes_block *cuda_aes_block_array; word_t (*cuda_key)[BLOCK_SIZE]; // int thrdperblock = block_number/num_sm; // if(block_number%num_sm>0) // thrdperblock++; // //设备线程快内线程数最多为1024 // if(thrdperblock>1024){ // thrdperblock = 1024; // num_sm = block_number/1024; // if(block_number%1024>0){ // num_sm++; // } // } dim3 ThreadperBlock(256); dim3 BlockperGrid(num_sm); cudaMalloc(&cuda_aes_block_array, block_number*sizeof(class aes_block)); cudaMalloc(&cuda_key,11*BLOCK_SIZE*sizeof(word_t)); cudaMemcpy(cuda_aes_block_array, aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyHostToDevice); cudaMemcpy(cuda_key,rk,11*BLOCK_SIZE*sizeof(word_t),cudaMemcpyHostToDevice); printf("加密数据块数: %d\n", block_number); cudaEvent_t start1; cudaEventCreate(&start1); cudaEvent_t stop1; cudaEventCreate(&stop1); cudaEventRecord(start1, NULL); AES_Encrypt <<<BlockperGrid,ThreadperBlock>>>(cuda_aes_block_array,cuda_key,block_number); cudaEventRecord(stop1, NULL); cudaEventSynchronize(stop1); float msecTotal1 = 0.0f,total; cudaEventElapsedTime(&msecTotal1, start1, stop1); total=msecTotal1/1000; cout<<"加密时间:"<<total<<endl; long r=1<<23; //单位换算常数 cout<<"吞吐量为:"<<block_number/total/r<<" Gbps"<<endl; cudaMemcpy(aes_block_array, cuda_aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyDeviceToHost); // //将加密块写入文件 // for(int i=0; i<block_number; i++) // f1printBytes(aes_block_array[i].block, blockLen, en_fp); return 0; }
51fb4c2dd053ce4ff9cad9ed854ee4ee739d6d3d.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include "NativeOpExecutioner.h" #include "../NativeOps.h" #include <hip/hip_runtime.h> #include <buffer.h> #include <loops/transform_any.h> #include <loops/reduce_bool.h> #include <loops/reduce_long.h> #include <loops/scalar.h> #include <helpers/threshold.h> #include <ops/specials_cuda.h> #include <helpers/DebugHelper.h> #include <AffinityManager.h> #include <exceptions/datatype_exception.h> #include <exceptions/cuda_exception.h> #include <helpers/CudaLaunchHelper.h> #include <GraphExecutioner.h> #include <helpers/BlasHelper.h> #include <graph/GraphHolder.h> #include <ops/declarable/CustomOperations.h> #include <PointersManager.h> //#include <sys/time.h> #include <hiprand/hiprand.h> #include <Status.h> #include <helpers/DebugHelper.h> using namespace nd4j; #include <loops/special_kernels.h> #include <performance/benchmarking/FullBenchmarkSuit.h> #include <performance/benchmarking/LightBenchmarkSuit.h> hipDeviceProp_t *deviceProperties; hipFuncAttributes *funcAttributes = new hipFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __ND4J_EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jLong)ptrToDeviceId; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(hipStream_t stream) { Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<Nd4jLong> *scalarDimension; nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(hipStream_t stream) { auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong))); CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(Nd4jLong)); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } Nd4jLong *getShapeInfoHostPointer() { return scalarShapeInfo->data; } Nd4jLong * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } Nd4jLong * getDimensionHostPointer() { return scalarDimension->data; } Nd4jLong * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; hipStream_t streamRef; public: ScalarInfo(hipStream_t stream) { T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T))); CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T)); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ Nd4jLong *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the dZ pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ Nd4jLong *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; void execPairwiseTransform( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseTransform(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execPairwiseTransformBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseBoolTransform(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStatsScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execBroadcastBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { //Nd4jLong *tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); //Nd4jLong *tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[1]); //Nd4jLong *tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[2]); //Nd4jLong *tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[3]); auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcastBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void execBroadcast( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcast(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloatScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSameScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSame(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceLong(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hXShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("execReduceBool requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ //////////////////////////////////////////////////////////////////////// void execIndexReduce(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduce(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloat(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ //////////////////////////////////////////////////////////////////////// void execIndexReduceScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo){ try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduceScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformSame(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformSame(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformBool(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformAny(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto streamSpecial = reinterpret_cast<hipStream_t &>(extraPointers[4]); LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3], reinterpret_cast<int *>(extraPointers[6])); NativeOpExecutioner::execTransformAny(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, nullptr); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformStrict(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformStrict(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformFloat(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformFloat(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void checkP2P() { int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; hipSetDevice(dX); hipDeviceCanAccessPeer(&canAccess, dX , dY); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; hipSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; hipSetDevice(dX); hipDeviceCanAccessPeer(&canAccess, dX , dY); if (canAccess) { if (enable) { hipDeviceEnablePeerAccess(dY, 0); } else { hipDeviceDisablePeerAccess(dY); } } else { if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY); } } } hipSetDevice(curDevice); } allowedP2P = enable; hipSetDevice(curDevice); } bool isP2PAvailable() { return supportedP2P; } void initializeDevicesAndFunctions() { try { int devCnt = 0; hipGetDeviceCount(&devCnt); deviceProperties = new hipDeviceProp_t[devCnt]; for (int i = 0; i < devCnt; i++) { hipSetDevice(i); hipGetDeviceProperties(&deviceProperties[i], i); hipDeviceSetLimit(hipLimitStackSize, 4096); } hipSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void initializeFunctions(Nd4jPointer *functions) { nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions); /* hipblasSgemv = (CublasSgemv)functions[0]; hipblasDgemv = (CublasDgemv)functions[1]; hipblasHgemm = (CublasHgemm)functions[2]; hipblasSgemm = (CublasSgemm)functions[3]; hipblasDgemm = (CublasDgemm)functions[4]; cublasSgemmEx = (CublasSgemmEx)functions[5]; hipblasHgemmBatched = (CublasHgemmBatched)functions[6]; hipblasSgemmBatched = (CublasSgemmBatched)functions[7]; hipblasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer mallocHost(Nd4jLong memorySize, int flags) { Nd4jPointer pointer; // hipHostMallocMapped |hipHostMallocPortable auto res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8, hipHostMallocDefault); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostMalloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer mallocDevice(Nd4jLong memorySize, int deviceId, int flags) { Nd4jPointer pointer; auto res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMalloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int freeHost(Nd4jPointer pointer) { auto res = hipHostFree(reinterpret_cast<void *>(pointer)); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostFree failed"); } return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int freeDevice(Nd4jPointer pointer, int deviceId) { auto res = hipFree(reinterpret_cast<void *>(pointer)); // we're intentionally skipping if (res != 0 && res != 1) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipFree failed"); } return res == 0 ? 1L : 0L; } Nd4jPointer createContext() { return 0L; } Nd4jPointer createStream() { auto stream = new hipStream_t(); auto dZ = hipStreamCreate(stream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamCreate failed"); } return stream; } Nd4jPointer createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t)); CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(hipEvent_t)); auto dZ = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventCreateWithFlags failed"); } return nativeEvent; } int registerEvent(Nd4jPointer event, Nd4jPointer stream) { auto pEvent = reinterpret_cast<hipEvent_t *>(&event); auto pStream = reinterpret_cast<hipStream_t *>(stream); auto dZ = hipEventRecord(*pEvent, *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventRecord failed"); } return 1; } int setDevice(int deviceId) { AffinityManager::setCurrentDevice(deviceId); return 1; } Nd4jLong getDeviceFreeMemoryDefault() { size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); return (Nd4jLong) memFree; } Nd4jLong getDeviceFreeMemory(int device) { int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jLong) memFree; } Nd4jLong getDeviceTotalMemory(int device) { int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jLong) memTotal; } int memcpySync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<hipStream_t *>(reserved); hipMemcpyKind kind; //nd4j::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed"); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } break; case 3: { kind = hipMemcpyDeviceToDevice; } break; default: { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY"); return 0; } } auto dZ = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream); //auto dZ = hipMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind); if (dZ != 0) { printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyAsync failed"); } return 1; } int memsetSync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto dZ = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size)); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemset failed"); } return 1; } int memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<hipStream_t *>(reserved); auto dZ = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemsetAsync failed"); } return 1; } int destroyEvent(Nd4jPointer event) { auto pEvent = reinterpret_cast<hipEvent_t *>(&event); auto dZ = hipEventDestroy(*pEvent); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventDestroy failed"); } return 1; } int streamSynchronize(Nd4jPointer stream) { auto pStream = reinterpret_cast<hipStream_t *>(stream); auto dZ = hipStreamSynchronize(*pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamSynchronize failed"); } return 1L; } int eventSynchronize(Nd4jPointer event) { auto pEvent = reinterpret_cast<hipEvent_t *>(&event); auto dZ = hipEventSynchronize(*pEvent); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventSynchronize failed"); } return 1L; } int getAvailableDevices() { int devCnt = 0; hipGetDeviceCount(&devCnt); return devCnt; } void enableDebugMode(bool reallyEnable) { nd4j::Environment::getInstance()->setDebug(reallyEnable); } void setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int ompGetMaxThreads() { return maxThreads; } int ompGetNumThreads() { return maxThreads; } void setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void enableVerboseMode(bool reallyEnable) { nd4j::Environment::getInstance()->setVerbose(reallyEnable); } int getDeviceMajor(int device) { return deviceProperties[device].major; } int getDeviceMinor(int device) { return deviceProperties[device].minor; } const char * getDeviceName(int device) { return deviceProperties[device].name; } void specialConcat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { try { BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), nd4j::SpecialMethods, ::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo), LIBND4J_TYPES); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * This method saves */ nd4j::TadPack* tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength) { try { auto pack = new TadPack(); *pack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(dXShapeInfo, dimension, dimensionLength); return pack; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong* getPrimaryShapeInfo(nd4j::TadPack* pack) { return pack->primaryShapeInfo(); } Nd4jLong* getPrimaryOffsets(nd4j::TadPack* pack) { return pack->primaryOffsets(); } Nd4jLong* getSpecialShapeInfo(nd4j::TadPack* pack) { return pack->specialShapeInfo(); } Nd4jLong* getSpecialOffsets(nd4j::TadPack* pack) { return pack->specialOffsets(); } Nd4jLong getNumberOfTads(nd4j::TadPack* pack) { return pack->numberOfTads(); } int getShapeInfoLength(nd4j::TadPack* pack) { return pack->shapeInfoLength(); } int memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(reserved); hipMemcpyKind kind; DEBUG_KERNEL(pStream, -1); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; } auto dZ = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyToSymbolAsync failed"); } return 1; } Nd4jPointer getConstantSpace() { Nd4jPointer dConstAddr; hipError_t dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipGetSymbolAddress failed"); } return dConstAddr; } void pullRows(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); dim3 launchDims(64, 256, 1024); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void average(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length, bool propagate) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(256, 256, 4096); BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void accumulate(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length) { try { auto stream = reinterpret_cast<hipStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(n, 256, 16384); BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void shuffle(Nd4jPointer *extras, Nd4jPointer *x, Nd4jPointer *xShapeInfo, Nd4jPointer *dx, Nd4jPointer *dXShapeInfo, Nd4jPointer *z, Nd4jPointer *zShapeInfo, Nd4jPointer *dz, Nd4jPointer *dZShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); auto dX = reinterpret_cast<void **>(dx); auto dZ = reinterpret_cast<void **>(dz); auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo); auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo); auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets); auto xType = nd4j::ArrayOptions::dataType(xShape[0]); dim3 launchDims(256, 512, 8192); BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "shuffle(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } bool isExperimentalEnabled() { return nd4j::Environment::getInstance()->isExperimentalBuild(); } void setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int getDevice() { return nd4j::AffinityManager::currentDeviceId(); } void setElementThreshold(int num) { // this is no-op for CUDA } void setTADThreshold(int num) { // this is no-op for CUDA } //////////////////////////////////////////////////////////////////////// void execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, bool biasCorrected, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Tad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); auto tadLength = shape::length(tadPack.primaryShapeInfo()); auto yLength = shape::length(hYShapeInfo); auto xLength = shape::length(hXShapeInfo); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); if (tadLength == yLength || tadLength == xLength) { // nd4j_printf("== way\n",""); NativeOpExecutioner::execReduce3(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } else NativeOpExecutioner::execReduce3TAD(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Scalar(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3Scalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalar, hScalarShapeInfo, dScalar, dScalarShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBoolTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalars, hScalarShapeInfo, dScalars, dScalarShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalar, hScalarShapeInfo, dScalar, dScalarShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != nd4j::DataType::BOOL && !isExperimentalEnabled()) throw nd4j::datatype_exception::build("execScalar both operands must have same data type", xType, yType); dim3 launchDims(256, 256, 16384); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void execAggregate(Nd4jPointer *extraPointers, int opNum, void **arguments, int numArguments, Nd4jLong **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, void *realArguments, int numRealArguments, nd4j::DataType dtype) { } void batchExecutor(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { } void execAggregateBatch(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { } //////////////////////////////////////////////////////////////////////// void execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom2(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom3(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // hipStreamSynchronize(*stream); auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev)); buffer->propagateToDevice(buffer, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A"); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B"); return buffer; } void destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice hipDeviceSynchronize(); delete buffer; } void refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); hipStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream); } void reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); hipStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int lengthForShapeBufferPointer(Nd4jPointer buffer) { auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer pointerForAddress(Nd4jLong address) { return reinterpret_cast<Nd4jPointer >(address); } void tear(Nd4jPointer *extras, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); dim3 launchDims(512, 512, 512); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) { auto stream = reinterpret_cast<hipStream_t *>(extras[1]); auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]); int blockSize = 512; // max size of the thread blocks int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize)))); int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (nd4j::isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = nd4j::floorPow2(numElements); int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2); int np2LastBlock = 0; int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts int extraSpace = numEltsPerBlock / NUM_BANKS; int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); dim3 gridOnes(1, 1, 1); dim3 threadsOnes(numThreadsLastBlock, 1, 1); if (sharedMemSize < 2048) sharedMemSize = 2048; if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048; // execute the scan if (numBlocks > 1) { nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { hipLaunchKernelGGL(( nd4j::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0); } else { nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0); } nd4j::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed"); } void encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); //encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz); prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){ try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 4096); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){ try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto zType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3All(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParamsVals, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3All(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParamsVals, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sort(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, bool descending) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); auto yType = nd4j::ArrayOptions::dataType(yShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(yShapeInfo); auto yType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); auto yType = nd4j::ArrayOptions::dataType(yShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = nd4j::ArrayOptions::dataType(yShapeInfo); auto yType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTad(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { try { // to be implemented auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 512, 33768); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTad(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) { throw std::runtime_error("sortCooIndices:: Not implemented yet"); } Nd4jLong encodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed"); Nd4jLong dZ = (Nd4jLong) resultPointer[0]; resultPointer[0] = 0; return dZ; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 0; } } void decodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jLong* mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) { return nullptr; } void munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) { } nd4j::graph::ResultWrapper* executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { try { return nd4j::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getResultWrapperSize(nd4j::graph::ResultWrapper* ptr) { return ptr->size(); } Nd4jPointer getResultWrapperPointer(nd4j::graph::ResultWrapper* ptr) { return ptr->pointer(); } const char* getAllCustomOps() { return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations(); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { nd4j::graph::VariableSpace varSpace; Context block(2, &varSpace); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]); // we shouldn't copy buffer if that's empty array void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes]; auto array = new nd4j::NDArray(buffer_, bufferD_, shape_); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.launchContext()->getWorkspace() != nullptr) shapeList->detach(); return shapeList; } nd4j::ShapeList* calculateOutputShapes2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { Context block(1); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e])); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } nd4j::ShapeList* calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getShapeListSize(nd4j::ShapeList* list) { return list->size(); } Nd4jLong* getShape(nd4j::ShapeList* list, Nd4jLong i) { return list->at(i); } static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<nd4j::NDArray*> inputs(numInputs); std::vector<nd4j::NDArray*> outputs(numOutputs); std::vector<double> ttArgs(numTArgs); std::vector<bool> bbArgs(numBArgs); std::vector<Nd4jLong> iiArgs(numIArgs); // filling block now with inputs for (int e = 0; e < numInputs; e++) { auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs]; inputs[e] = new nd4j::NDArray(buffer, bufferD, shape); } // if not inplace - transferring output arrays if (!isInplace) for (int e = 0; e < numOutputs; e++) { // we want to keep original output shape intact auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e])); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e]; void *bufferD = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs]; // FIXME: revisit this. bool canNullify = true; for (int i = 0; i < numInputs; i++) { void *ibuffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i]; if (ibuffer == buffer) { canNullify = false; break; } } if (canNullify && buffer != nullptr) memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape))); auto array = new nd4j::NDArray(buffer, bufferD, shape); outputs[e] = array; } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; for (int e = 0; e < numBArgs; e++) bbArgs[e] = bArgs[e]; // hypothetically at this point we have everything filled auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace); //auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace); if (!isInplace) for (int e = 0; e < numOutputs; e++) { //shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]); //shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo()); //outputs[e]->printIndexedBuffer("C++ raw output"); //outputs[e]->printBuffer("C++ indexed output"); if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))) outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))); } for (auto v: inputs) delete v; for (auto v: outputs) delete v; return Status::OK(); } int execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int execCustomOp2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); auto context = reinterpret_cast<Context *>(opContext); auto result = op->execute(context); auto res = hipStreamSynchronize(*context->launchContext()->getCudaStream()); if (res != 0) throw nd4j::cuda_exception::build("customOp execution failed", res); for (auto v:context->fastpath_in()) { if (!v->isEmpty()) v->syncToDevice(); } for (auto v:context->fastpath_out()) { if (!v->isEmpty()) v->syncToDevice(); } return result; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) { try { auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<nd4j::NDArray*> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e])); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace); auto varSet = new nd4j::graph::VariablesSet(dZ); if (dZ == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet* executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { try { return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getVariablesSetSize(nd4j::graph::VariablesSet* set) { return set->size(); } Nd4jStatus getVariablesSetStatus(nd4j::graph::VariablesSet* set) { return set->status(); } nd4j::graph::Variable* getVariable(nd4j::graph::VariablesSet* set, Nd4jLong i) { return set->at(i); } int getVariableId(nd4j::graph::Variable* variable) { return variable->id(); } int getVariableIndex(nd4j::graph::Variable* variable) { return variable->index(); } const char* getVariableName(nd4j::graph::Variable* variable) { return variable->getName()->c_str(); } Nd4jLong* getVariableShape(nd4j::graph::Variable* variable) { return variable->getNDArray()->shapeInfo(); } void* getVariableBuffer(nd4j::graph::Variable* variable) { return variable->getNDArray()->buffer(); } int unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) { try { nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId); return ND4J_STATUS_OK; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void deleteCharArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<char *>(pointer); delete[] ptr; } void deleteIntArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } void deleteLongArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<Nd4jLong *>(pointer); delete[] ptr; } void deleteVariablesSet(nd4j::graph::VariablesSet* pointer) { delete pointer; } void deleteShapeList(Nd4jPointer shapeList) { nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList); //list->destroy(); delete list; } const char* getAllOperations() { return nd4j::OpTracker::getInstance()->exportOperations(); } Nd4jPointer getGraphState(Nd4jLong id) { return (Nd4jPointer) new nd4j::graph::GraphState(id); } void deleteGraphState(Nd4jPointer state) { auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state); delete stateP; } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are Node node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = inputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]); auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { // nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto dZ = LogicExecutor::processNode(graph, &node); if (dZ != Status::OK()) return dZ; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = outputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]); NDArray array(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { try { return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deleteResultWrapper(Nd4jPointer ptr) { // just 0 room for compiler s@!t auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr); delete p; } int estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) { throw std::runtime_error("estimateThreshold: Not implemented yet"); } /* * TypeDef: * void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ); */ void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) { try { auto dx = reinterpret_cast<void *>(dX); auto dz = reinterpret_cast<void *>(dZ); if (srcType == ND4J_FLOAT8) { if (dstType == ND4J_FLOAT8) { // convertKernel<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: eventually we might want to add it } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_UINT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: still might want to add } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: .... ^^^ } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float16>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO... } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz); } else { printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT24) { } else if (srcType == ND4J_FLOAT32) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_DOUBLE) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { // } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_THRESHOLD) { if (dstType == ND4J_FLOAT16) { //nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz); } else if (dstType == ND4J_FLOAT32) { //nd4j::convertFromThreshold<float>(nullptr, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::convertFromThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) { auto u = new nd4j::utf8string(string, length); return reinterpret_cast<Nd4jPointer>(u); } Nd4jLong getUtf8StringLength(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<nd4j::utf8string*>(ptr)->_length; } char* getUtf8StringBuffer(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<nd4j::utf8string*>(ptr)->_buffer; } void deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) { delete(reinterpret_cast<nd4j::utf8string*>(ptr)); } /////////////////////////////////////////////////////////////////// template<typename T, typename I> __global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* vindexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; auto indexes = reinterpret_cast<const I*>(vindexes); for (int e = 0; e < numOfSubArrs; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const auto yOffset = shape::getIndexOffset(i, yShapeInfo); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T, typename I> __host__ static void scatterUpdateCudaLauncher(const hipStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* indexes) { hipLaunchKernelGGL(( scatterUpdateCuda<T, I>), dim3(512), dim3(256), MAX_NUM_THREADS, *stream, opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs, void* hX, Nd4jLong* hXShapeInfo, Nd4jLong* hXOffsets, void* dX, Nd4jLong* dXShapeInfo, Nd4jLong* dXOffsets, void* hY, Nd4jLong* hYShapeInfo, Nd4jLong* hYOffsets, void* dY, Nd4jLong* dYShapeInfo, Nd4jLong* dYOffsets, void* hIindexes, Nd4jLong* hIndicesShapeInfo, void* dIindexes, Nd4jLong* dIndicesShapeInfo) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto type = ArrayOptions::dataType(hXShapeInfo); auto iType = ArrayOptions::dataType(hIndicesShapeInfo); BUILD_DOUBLE_SELECTOR(type, iType, scatterUpdateCudaLauncher, (stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes), LIBND4J_TYPES, INDEXING_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); auto p = reinterpret_cast<nd4j::DebugInfo *>(debugInfo); NDArray array(buffer, specialBuffer, shapeInfo, &lc); nd4j::DebugHelper::retrieveDebugStatistics(p, &array); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void __global__ tryPointerKernel(void* p, int len) { auto buf = reinterpret_cast<int8_t*>(p); auto tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int b; if (tid < len) atomicAdd(&b, buf[tid]); __syncthreads(); if (threadIdx.x ==0 && blockIdx.x == 0) printf("Pointer check complete: %i\n", b); } void tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) { try { hipStream_t stream; hipStreamCreate(&stream); tryPointerKernel << < 256, 512, len + 64, stream >> > (p, len); auto e = hipStreamSynchronize(stream); if (e != 0) throw nd4j::cuda_exception::build("tryPointer failed", e); hipStreamDestroy(stream); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } int dataTypeFromNpyHeader(void *header) { return (int) cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header)); } nd4j::ConstantDataBuffer* shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *strides, nd4j::DataType dtype, char order, Nd4jLong ews, bool empty) { try { auto buffer = new ConstantDataBuffer(); *buffer = nd4j::ConstantShapeHelper::getInstance()->bufferForShapeInfo( ShapeDescriptor(dtype, order, shape, strides, rank, ews, empty)); return buffer; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } void deleteShapeBuffer(nd4j::ConstantDataBuffer* ptr) { delete ptr; } void deleteTadPack(nd4j::TadPack* ptr) { delete ptr; } bool isBlasVersionMatches(int major, int minor, int build) { auto result = major == Environment::getInstance()->_blasMajorVersion && minor == Environment::getInstance()->_blasMinorVersion && build == Environment::getInstance()->_blasPatchVersion; if (!result) { nd4j_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n", Environment::getInstance()->_blasMajorVersion, Environment::getInstance()->_blasMinorVersion, Environment::getInstance()->_blasPatchVersion, major, minor, build); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(152); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch"); } return result; } nd4j::ConstantDataBuffer* constantBufferLong(nd4j::DataType dtype, Nd4jLong *data, int length) { return nd4j::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype); } nd4j::ConstantDataBuffer* constantBufferDouble(nd4j::DataType dtype, double *data, int length) { return nd4j::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype); } nd4j::ConstantDataBuffer* constantBuffer(nd4j::DataType dtype, nd4j::ConstantDescriptor *descriptor) { return nd4j::ConstantHelper::getInstance()->constantBuffer(*descriptor, dtype); } Nd4jPointer getConstantDataBufferPrimary(nd4j::ConstantDataBuffer* dbf) { return dbf->primary(); } Nd4jPointer getConstantDataBufferSpecial(nd4j::ConstantDataBuffer* dbf) { return dbf->special(); } Nd4jLong getConstantDataBufferLength(nd4j::ConstantDataBuffer* dbf) { return dbf->length(); } Nd4jLong getConstantDataBufferSizeOf(nd4j::ConstantDataBuffer* dbf) { return dbf->sizeOf(); } nd4j::graph::Context* createGraphContext(int nodeId) { return new nd4j::graph::Context(nodeId); } nd4j::graph::RandomGenerator* getGraphContextRandomGenerator(nd4j::graph::Context* ptr) { return &ptr->randomGenerator(); } void markGraphContextInplace(nd4j::graph::Context* ptr, bool reallyInplace) { ptr->markInplace(reallyInplace); } void setGraphContextCudaContext(nd4j::graph::Context* ptr, void *stream, void *reductionPointer, void *allocationPointer) { ptr->setCudaContext(stream, reductionPointer, allocationPointer); } void setGraphContextInputArray(nd4j::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextOutputArray(nd4j::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextTArguments(nd4j::graph::Context* ptr, double *arguments, int numberOfArguments) { ptr->setTArguments(arguments, numberOfArguments); } void setGraphContextIArguments(nd4j::graph::Context* ptr, Nd4jLong *arguments, int numberOfArguments) { ptr->setIArguments(arguments, numberOfArguments); } void setGraphContextBArguments(nd4j::graph::Context* ptr, bool *arguments, int numberOfArguments) { ptr->setBArguments(arguments, numberOfArguments); } void deleteGraphContext(nd4j::graph::Context* ptr) { delete ptr; } nd4j::graph::RandomGenerator* createRandomGenerator(Nd4jLong rootSeed, Nd4jLong nodeSeed) { return new nd4j::graph::RandomGenerator(rootSeed, nodeSeed); } Nd4jLong getRandomGeneratorRootState(nd4j::graph::RandomGenerator* ptr) { return ptr->rootState(); } Nd4jLong getRandomGeneratorNodeState(nd4j::graph::RandomGenerator* ptr) { return ptr->nodeState(); } void setRandomGeneratorStates(nd4j::graph::RandomGenerator* ptr, Nd4jLong rootSeed, Nd4jLong nodeSeed) { ptr->setStates(rootSeed, nodeSeed); } int getRandomGeneratorRelativeInt(nd4j::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeInt(index); } Nd4jLong getRandomGeneratorRelativeLong(nd4j::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeLong(index); } void deleteRandomGenerator(nd4j::graph::RandomGenerator* ptr) { delete ptr; } Nd4jPointer shapeBufferForNumpy(Nd4jPointer npyArray) { try { cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); unsigned int shapeSize = arr.shape.size(); std::vector<Nd4jLong> shape(shapeSize); bool _empty = false; for (unsigned int i = 0; i < shapeSize; i++) { shape[i] = arr.shape[i]; if (arr.shape[i] == 0) _empty = true; } auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray)); Nd4jLong *shapeBuffer; if (shape.size() == 1 && shape[0] == 0) { // scalar case shapeBuffer = nd4j::ShapeBuilders::createScalarShapeInfo(dtype); } else if (_empty) { if (shapeSize > 0) shapeBuffer = nd4j::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); else shapeBuffer = nd4j::ShapeBuilders::emptyShapeInfo(dtype); } else { shapeBuffer = nd4j::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); } return reinterpret_cast<Nd4jPointer>(nd4j::ConstantShapeHelper::getInstance()->createFromExisting(shapeBuffer, true)); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runLightBenchmarkSuit(bool printOut) { try { nd4j::LightBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runFullBenchmarkSuit(bool printOut) { try { nd4j::FullBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getCachedMemory(int deviceId) { return nd4j::ConstantHelper::getInstance()->getCachedAmount(deviceId); } nd4j::LaunchContext* defaultLaunchContext() { return LaunchContext::defaultContext(); } Nd4jPointer lcScalarPointer(OpaqueLaunchContext* lc) { return lc->getScalarPointer(); } Nd4jPointer lcReductionPointer(OpaqueLaunchContext* lc) { return lc->getReductionPointer(); } Nd4jPointer lcAllocationPointer(OpaqueLaunchContext* lc) { return lc->getAllocationPointer(); } Nd4jPointer lcExecutionStream(OpaqueLaunchContext* lc) { return lc->getCudaStream(); } Nd4jPointer lcCopyStream(OpaqueLaunchContext* lc) { return lc->getCudaSpecialStream(); } Nd4jPointer lcBlasHandle(OpaqueLaunchContext* lc) { return lc->getCublasHandle(); } Nd4jPointer lcSolverHandle(OpaqueLaunchContext* lc) { return lc->getCusolverHandle(); } int lastErrorCode() { return nd4j::LaunchContext::defaultContext()->errorReference()->errorCode(); } const char* lastErrorMessage() { return nd4j::LaunchContext::defaultContext()->errorReference()->errorMessage(); } int binaryLevel() { return 0; } int optimalLevel() { return 0; } bool isMinimalRequirementsMet() { return true; } bool isOptimalRequirementsMet() { return true; } void ctxAllowHelpers(OpaqueContext* ptr, bool reallyAllow) { ptr->allowHelpers(reallyAllow); }
51fb4c2dd053ce4ff9cad9ed854ee4ee739d6d3d.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include "NativeOpExecutioner.h" #include "../NativeOps.h" #include <cuda.h> #include <buffer.h> #include <loops/transform_any.h> #include <loops/reduce_bool.h> #include <loops/reduce_long.h> #include <loops/scalar.h> #include <helpers/threshold.h> #include <ops/specials_cuda.h> #include <helpers/DebugHelper.h> #include <AffinityManager.h> #include <exceptions/datatype_exception.h> #include <exceptions/cuda_exception.h> #include <helpers/CudaLaunchHelper.h> #include <GraphExecutioner.h> #include <helpers/BlasHelper.h> #include <graph/GraphHolder.h> #include <ops/declarable/CustomOperations.h> #include <PointersManager.h> //#include <sys/time.h> #include <curand.h> #include <Status.h> #include <helpers/DebugHelper.h> using namespace nd4j; #include <loops/special_kernels.h> #include <performance/benchmarking/FullBenchmarkSuit.h> #include <performance/benchmarking/LightBenchmarkSuit.h> cudaDeviceProp *deviceProperties; cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __ND4J_EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jLong)ptrToDeviceId; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } nd4j::buffer::Buffer<Nd4jLong> * createScalarBuffer(cudaStream_t stream) { Nd4jLong *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<Nd4jLong> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<Nd4jLong> *scalarDimension; nd4j::buffer::Buffer<Nd4jLong> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(cudaStream_t stream) { auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong))); CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(Nd4jLong)); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } Nd4jLong *getShapeInfoHostPointer() { return scalarShapeInfo->data; } Nd4jLong * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } Nd4jLong * getDimensionHostPointer() { return scalarDimension->data; } Nd4jLong * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; cudaStream_t streamRef; public: ScalarInfo(cudaStream_t stream) { T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T))); CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T)); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ Nd4jLong *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the dZ pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ Nd4jLong *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; void execPairwiseTransform( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseTransform(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execPairwiseTransformBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseBoolTransform(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStatsScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execBroadcastBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { //Nd4jLong *tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); //Nd4jLong *tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[1]); //Nd4jLong *tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[2]); //Nd4jLong *tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[3]); auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcastBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void execBroadcast( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hYShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F3 opNum:[%i]\n", opNum); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcast(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloatScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSameScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSame(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceLong(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::INT64) throw datatype_exception::build("execReduceLong wrong Z data type", nd4j::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hXShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (zType != nd4j::DataType::BOOL) throw std::runtime_error("execReduceBool requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ //////////////////////////////////////////////////////////////////////// void execIndexReduce(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduce(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat2(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloat(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ //////////////////////////////////////////////////////////////////////// void execIndexReduceScalar( Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo){ try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduceScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformSame(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformSame(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformBool(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformAny(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto streamSpecial = reinterpret_cast<cudaStream_t &>(extraPointers[4]); LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3], reinterpret_cast<int *>(extraPointers[6])); NativeOpExecutioner::execTransformAny(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, nullptr, nullptr); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformStrict(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformStrict(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformFloat(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraParams) { try { auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformFloat(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraParams, tadShapeInfo, tadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void checkP2P() { int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; cudaSetDevice(dX); cudaDeviceCanAccessPeer(&canAccess, dX , dY); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; cudaSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; cudaSetDevice(dX); cudaDeviceCanAccessPeer(&canAccess, dX , dY); if (canAccess) { if (enable) { cudaDeviceEnablePeerAccess(dY, 0); } else { cudaDeviceDisablePeerAccess(dY); } } else { if (nd4j::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY); } } } cudaSetDevice(curDevice); } allowedP2P = enable; cudaSetDevice(curDevice); } bool isP2PAvailable() { return supportedP2P; } void initializeDevicesAndFunctions() { try { int devCnt = 0; cudaGetDeviceCount(&devCnt); deviceProperties = new cudaDeviceProp[devCnt]; for (int i = 0; i < devCnt; i++) { cudaSetDevice(i); cudaGetDeviceProperties(&deviceProperties[i], i); cudaDeviceSetLimit(cudaLimitStackSize, 4096); } cudaSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void initializeFunctions(Nd4jPointer *functions) { nd4j::BlasHelper::getInstance()->initializeDeviceFunctions(functions); /* cublasSgemv = (CublasSgemv)functions[0]; cublasDgemv = (CublasDgemv)functions[1]; cublasHgemm = (CublasHgemm)functions[2]; cublasSgemm = (CublasSgemm)functions[3]; cublasDgemm = (CublasDgemm)functions[4]; cublasSgemmEx = (CublasSgemmEx)functions[5]; cublasHgemmBatched = (CublasHgemmBatched)functions[6]; cublasSgemmBatched = (CublasSgemmBatched)functions[7]; cublasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer mallocHost(Nd4jLong memorySize, int flags) { Nd4jPointer pointer; // cudaHostAllocMapped |cudaHostAllocPortable auto res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize + 8, cudaHostAllocDefault); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaHostAlloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer mallocDevice(Nd4jLong memorySize, int deviceId, int flags) { Nd4jPointer pointer; auto res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMalloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int freeHost(Nd4jPointer pointer) { auto res = cudaFreeHost(reinterpret_cast<void *>(pointer)); if (res != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFreeHost failed"); } return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int freeDevice(Nd4jPointer pointer, int deviceId) { auto res = cudaFree(reinterpret_cast<void *>(pointer)); // we're intentionally skipping if (res != 0 && res != 1) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFree failed"); } return res == 0 ? 1L : 0L; } Nd4jPointer createContext() { return 0L; } Nd4jPointer createStream() { auto stream = new cudaStream_t(); auto dZ = cudaStreamCreate(stream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamCreate failed"); } return stream; } Nd4jPointer createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t)); CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(cudaEvent_t)); auto dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventCreateWithFlags failed"); } return nativeEvent; } int registerEvent(Nd4jPointer event, Nd4jPointer stream) { auto pEvent = reinterpret_cast<cudaEvent_t *>(&event); auto pStream = reinterpret_cast<cudaStream_t *>(stream); auto dZ = cudaEventRecord(*pEvent, *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventRecord failed"); } return 1; } int setDevice(int deviceId) { AffinityManager::setCurrentDevice(deviceId); return 1; } Nd4jLong getDeviceFreeMemoryDefault() { size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); return (Nd4jLong) memFree; } Nd4jLong getDeviceFreeMemory(int device) { int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jLong) memFree; } Nd4jLong getDeviceTotalMemory(int device) { int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jLong) memTotal; } int memcpySync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<cudaStream_t *>(reserved); cudaMemcpyKind kind; //nd4j::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed"); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } break; case 3: { kind = cudaMemcpyDeviceToDevice; } break; default: { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY"); return 0; } } auto dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream); //auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind); if (dZ != 0) { printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyAsync failed"); } return 1; } int memsetSync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size)); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemset failed"); } return 1; } int memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<cudaStream_t *>(reserved); auto dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemsetAsync failed"); } return 1; } int destroyEvent(Nd4jPointer event) { auto pEvent = reinterpret_cast<cudaEvent_t *>(&event); auto dZ = cudaEventDestroy(*pEvent); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventDestroy failed"); } return 1; } int streamSynchronize(Nd4jPointer stream) { auto pStream = reinterpret_cast<cudaStream_t *>(stream); auto dZ = cudaStreamSynchronize(*pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamSynchronize failed"); } return 1L; } int eventSynchronize(Nd4jPointer event) { auto pEvent = reinterpret_cast<cudaEvent_t *>(&event); auto dZ = cudaEventSynchronize(*pEvent); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventSynchronize failed"); } return 1L; } int getAvailableDevices() { int devCnt = 0; cudaGetDeviceCount(&devCnt); return devCnt; } void enableDebugMode(bool reallyEnable) { nd4j::Environment::getInstance()->setDebug(reallyEnable); } void setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int ompGetMaxThreads() { return maxThreads; } int ompGetNumThreads() { return maxThreads; } void setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void enableVerboseMode(bool reallyEnable) { nd4j::Environment::getInstance()->setVerbose(reallyEnable); } int getDeviceMajor(int device) { return deviceProperties[device].major; } int getDeviceMinor(int device) { return deviceProperties[device].minor; } const char * getDeviceName(int device) { return deviceProperties[device].name; } void specialConcat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { try { BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), nd4j::SpecialMethods, ::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo), LIBND4J_TYPES); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * This method saves */ nd4j::TadPack* tadOnlyShapeInfo(Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength) { try { auto pack = new TadPack(); *pack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(dXShapeInfo, dimension, dimensionLength); return pack; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong* getPrimaryShapeInfo(nd4j::TadPack* pack) { return pack->primaryShapeInfo(); } Nd4jLong* getPrimaryOffsets(nd4j::TadPack* pack) { return pack->primaryOffsets(); } Nd4jLong* getSpecialShapeInfo(nd4j::TadPack* pack) { return pack->specialShapeInfo(); } Nd4jLong* getSpecialOffsets(nd4j::TadPack* pack) { return pack->specialOffsets(); } Nd4jLong getNumberOfTads(nd4j::TadPack* pack) { return pack->numberOfTads(); } int getShapeInfoLength(nd4j::TadPack* pack) { return pack->shapeInfoLength(); } int memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(reserved); cudaMemcpyKind kind; DEBUG_KERNEL(pStream, -1); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; } auto dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyToSymbolAsync failed"); } return 1; } Nd4jPointer getConstantSpace() { Nd4jPointer dConstAddr; cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory); if (dZ != 0) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaGetSymbolAddress failed"); } return dConstAddr; } void pullRows(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); dim3 launchDims(64, 256, 1024); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dX, dZ, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void average(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length, bool propagate) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("averageFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(256, 256, 4096); BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void accumulate(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong *xShapeInfo, Nd4jPointer *dx, Nd4jLong *dXShapeInfo, void *z, Nd4jLong *zShapeInfo, void *dz, Nd4jLong *dzShapeInfo, int n, Nd4jLong length) { try { auto stream = reinterpret_cast<cudaStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("accumulateFloat called\n"); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(n, 256, 16384); BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, nd4j::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void shuffle(Nd4jPointer *extras, Nd4jPointer *x, Nd4jPointer *xShapeInfo, Nd4jPointer *dx, Nd4jPointer *dXShapeInfo, Nd4jPointer *z, Nd4jPointer *zShapeInfo, Nd4jPointer *dz, Nd4jPointer *dZShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); auto dX = reinterpret_cast<void **>(dx); auto dZ = reinterpret_cast<void **>(dz); auto xShape = reinterpret_cast<Nd4jLong **>(xShapeInfo); auto dxShape = reinterpret_cast<Nd4jLong **>(dXShapeInfo); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo); auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets); auto xType = nd4j::ArrayOptions::dataType(xShape[0]); dim3 launchDims(256, 512, 8192); BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "shuffle(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } bool isExperimentalEnabled() { return nd4j::Environment::getInstance()->isExperimentalBuild(); } void setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int getDevice() { return nd4j::AffinityManager::currentDeviceId(); } void setElementThreshold(int num) { // this is no-op for CUDA } void setTADThreshold(int num) { // this is no-op for CUDA } //////////////////////////////////////////////////////////////////////// void execSummaryStats(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, bool biasCorrected) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, bool biasCorrected, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, biasCorrected); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Tad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo, reinterpret_cast<int *>(hDimension), shape::length(hDimensionShape)); auto tadLength = shape::length(tadPack.primaryShapeInfo()); auto yLength = shape::length(hYShapeInfo); auto xLength = shape::length(hXShapeInfo); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); if (tadLength == yLength || tadLength == xLength) { // nd4j_printf("== way\n",""); NativeOpExecutioner::execReduce3(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } else NativeOpExecutioner::execReduce3TAD(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Scalar(Nd4jPointer *extraPointers,int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParams, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3Scalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBool(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalar, hScalarShapeInfo, dScalar, dScalarShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBoolTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalars, hScalarShapeInfo, dScalars, dScalarShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalar(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalar, Nd4jLong *hScalarShapeInfo, void *dScalar, Nd4jLong *dScalarShapeInfo, void *extraParams) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalar(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, hScalar, hScalarShapeInfo, dScalar, dScalarShapeInfo, extraParams); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarTad(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hScalars, Nd4jLong *hScalarShapeInfo, void *dScalars, Nd4jLong *dScalarShapeInfo, void *extraParams, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); auto yType = nd4j::ArrayOptions::dataType(hScalarShapeInfo); auto zType = nd4j::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != nd4j::DataType::BOOL && !isExperimentalEnabled()) throw nd4j::datatype_exception::build("execScalar both operands must have same data type", xType, yType); dim3 launchDims(256, 256, 16384); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void execAggregate(Nd4jPointer *extraPointers, int opNum, void **arguments, int numArguments, Nd4jLong **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, void *realArguments, int numRealArguments, nd4j::DataType dtype) { } void batchExecutor(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { } void execAggregateBatch(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, nd4j::DataType dtype) { } //////////////////////////////////////////////////////////////////////// void execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom2(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hX, hXShapeInfo, dX, dXShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom3(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *extraArguments) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, hX, hXShapeInfo, dX, dXShapeInfo, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, extraArguments); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // cudaStreamSynchronize(*stream); auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); auto buffer = new nd4j::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev)); buffer->propagateToDevice(buffer, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A"); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream); nd4j::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B"); return buffer; } void destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice cudaDeviceSynchronize(); delete buffer; } void refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); cudaStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream); } void reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); cudaStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int lengthForShapeBufferPointer(Nd4jPointer buffer) { auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer pointerForAddress(Nd4jLong address) { return reinterpret_cast<Nd4jPointer >(address); } void tear(Nd4jPointer *extras, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, Nd4jPointer *targets, Nd4jLong *zShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); dim3 launchDims(512, 512, 512); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dX, dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) { auto stream = reinterpret_cast<cudaStream_t *>(extras[1]); auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]); int blockSize = 512; // max size of the thread blocks int numBlocks = nd4j::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize)))); int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (nd4j::isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = nd4j::floorPow2(numElements); int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; int numThreadsLastBlock = nd4j::math::nd4j_max<int>(1, numEltsLastBlock / 2); int np2LastBlock = 0; int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts int extraSpace = numEltsPerBlock / NUM_BANKS; int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); dim3 gridOnes(1, 1, 1); dim3 threadsOnes(numThreadsLastBlock, 1, 1); if (sharedMemSize < 2048) sharedMemSize = 2048; if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048; // execute the scan if (numBlocks > 1) { nd4j::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { nd4j::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); nd4j::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { nd4j::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { nd4j::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0); } else { nd4j::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0); } nd4j::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed"); } void encodeThresholdP1(Nd4jPointer *extras, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP1Generic, (launchDims, stream, dx, N, dz, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP1Float(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void encodeThresholdP2Int(Nd4jPointer *extraPointers, int *dx, Nd4jLong N, int *dz) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); //encoderKernelP2Float<<<numBlocks, blockSize , 1024 * sizeof(float), *stream>>>(dx, N, dz); prescanArrayRecursive(extraPointers, dz, dx + 1, (int) N, 0); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP2Int(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void encodeThresholdP3(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, int *offsets, Nd4jLong N, int *dz){ try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); int blockSize = 1024; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 4096); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, encoderKernelP3Generic, (launchDims, stream, dx, offsets, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeThresholdP3Float(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void decodeThreshold(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo){ try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); // we probably want to have smaller blocks here, memory writes are misaligned anyway int blockSize = 128; int numBlocks = N / blockSize + (N % blockSize ? 1 : 0); dim3 launchDims(numBlocks, blockSize, 1024); auto zType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(zType, decoderKernelGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeThresholdFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3All(Nd4jPointer *extraPointers, int opNum, void *hX, Nd4jLong *hXShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *extraParamsVals, void *hY, Nd4jLong *hYShapeInfo, void *dY, Nd4jLong *dYShapeInfo, void *hZ, Nd4jLong *hZShapeInfo, void *dZ, Nd4jLong *dZShapeInfo, void *hDimension, Nd4jLong *hDimensionShape, void *dDimension, Nd4jLong *dDimensionShape, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { try { auto dimension = reinterpret_cast<int *>(dDimension); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3All(&lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParamsVals, hY, hYShapeInfo, dY, dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo, dimension, dimensionLength, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sort(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, bool descending) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } nd4j::DebugHelper::checkErrorCode(stream, "sort(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); auto yType = nd4j::ArrayOptions::dataType(yShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = nd4j::ArrayOptions::dataType(yShapeInfo); auto yType = nd4j::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = nd4j::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = nd4j::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); auto yType = nd4j::ArrayOptions::dataType(yShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, void *y, Nd4jLong *yShapeInfo, void *dy, Nd4jLong *dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = nd4j::ArrayOptions::dataType(yShapeInfo); auto yType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTad(Nd4jPointer *extraPointers, void *x, Nd4jLong *xShapeInfo, void *dX, Nd4jLong *dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { try { // to be implemented auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 512, 33768); auto xType = nd4j::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "sortTad(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) { throw std::runtime_error("sortCooIndices:: Not implemented yet"); } Nd4jLong encodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong *hXShapeInfo, Nd4jLong N, int *dz, float threshold) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); int *resultPointer = reinterpret_cast<int *>(extraPointers[2]); int *reductionPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims(512, 512, 32768); auto xType = nd4j::ArrayOptions::dataType(hXShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaEncodeBitmapGeneric, (launchDims, stream, dx, N, dz, resultPointer, reductionPointer, threshold), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "encodeBitmapFloat(...) failed"); Nd4jLong dZ = (Nd4jLong) resultPointer[0]; resultPointer[0] = 0; return dZ; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 0; } } void decodeBitmap(Nd4jPointer *extraPointers, void *dx, Nd4jLong N, void *dz, Nd4jLong *zShapeInfo) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); dim3 launchDims(512, 512, 16384); auto xType = nd4j::ArrayOptions::dataType(zShapeInfo); BUILD_SINGLE_SELECTOR(xType, cudaDecodeBitmapGeneric, (launchDims, stream, dx, N, dz), LIBND4J_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "decodeBitmapFloat(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jLong* mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) { return nullptr; } void munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) { } nd4j::graph::ResultWrapper* executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { try { return nd4j::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getResultWrapperSize(nd4j::graph::ResultWrapper* ptr) { return ptr->size(); } Nd4jPointer getResultWrapperPointer(nd4j::graph::ResultWrapper* ptr) { return ptr->pointer(); } const char* getAllCustomOps() { return nd4j::ops::OpRegistrator::getInstance()->getAllCustomOperations(); } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { nd4j::graph::VariableSpace varSpace; Context block(2, &varSpace); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]); // we shouldn't copy buffer if that's empty array void *buffer_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD_ = nd4j::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes]; auto array = new nd4j::NDArray(buffer_, bufferD_, shape_); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.launchContext()->getWorkspace() != nullptr) shapeList->detach(); return shapeList; } nd4j::ShapeList* calculateOutputShapes2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } nd4j::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, nd4j::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { Context block(1); nd4j::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e])); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } nd4j::ShapeList* calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getShapeListSize(nd4j::ShapeList* list) { return list->size(); } Nd4jLong* getShape(nd4j::ShapeList* list, Nd4jLong i) { return list->at(i); } static FORCEINLINE Nd4jStatus realExec(nd4j::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<nd4j::NDArray*> inputs(numInputs); std::vector<nd4j::NDArray*> outputs(numOutputs); std::vector<double> ttArgs(numTArgs); std::vector<bool> bbArgs(numBArgs); std::vector<Nd4jLong> iiArgs(numIArgs); // filling block now with inputs for (int e = 0; e < numInputs; e++) { auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs]; inputs[e] = new nd4j::NDArray(buffer, bufferD, shape); } // if not inplace - transferring output arrays if (!isInplace) for (int e = 0; e < numOutputs; e++) { // we want to keep original output shape intact auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e])); void *buffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e]; void *bufferD = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs]; // FIXME: revisit this. bool canNullify = true; for (int i = 0; i < numInputs; i++) { void *ibuffer = nd4j::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i]; if (ibuffer == buffer) { canNullify = false; break; } } if (canNullify && buffer != nullptr) memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape))); auto array = new nd4j::NDArray(buffer, bufferD, shape); outputs[e] = array; } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; for (int e = 0; e < numBArgs; e++) bbArgs[e] = bArgs[e]; // hypothetically at this point we have everything filled auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, isInplace); //auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace); if (!isInplace) for (int e = 0; e < numOutputs; e++) { //shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]); //shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo()); //outputs[e]->printIndexedBuffer("C++ raw output"); //outputs[e]->printBuffer("C++ indexed output"); if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))) outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))); } for (auto v: inputs) delete v; for (auto v: outputs) delete v; return Status::OK(); } int execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int execCustomOp2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) { try { auto op = nd4j::ops::OpRegistrator::getInstance()->getOperation(hash); auto context = reinterpret_cast<Context *>(opContext); auto result = op->execute(context); auto res = cudaStreamSynchronize(*context->launchContext()->getCudaStream()); if (res != 0) throw nd4j::cuda_exception::build("customOp execution failed", res); for (auto v:context->fastpath_in()) { if (!v->isEmpty()) v->syncToDevice(); } for (auto v:context->fastpath_out()) { if (!v->isEmpty()) v->syncToDevice(); } return result; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) { try { auto graph = nd4j::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer); nd4j::graph::GraphHolder::getInstance()->registerGraph(graphId, graph); return ND4J_STATUS_OK; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = nd4j::graph::GraphHolder::getInstance()->pullGraph(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<nd4j::NDArray*> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new nd4j::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e])); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto dZ = nd4j::graph::GraphExecutioner::execute(graph, varSpace); auto varSet = new nd4j::graph::VariablesSet(dZ); if (dZ == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet* executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { try { return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getVariablesSetSize(nd4j::graph::VariablesSet* set) { return set->size(); } Nd4jStatus getVariablesSetStatus(nd4j::graph::VariablesSet* set) { return set->status(); } nd4j::graph::Variable* getVariable(nd4j::graph::VariablesSet* set, Nd4jLong i) { return set->at(i); } int getVariableId(nd4j::graph::Variable* variable) { return variable->id(); } int getVariableIndex(nd4j::graph::Variable* variable) { return variable->index(); } const char* getVariableName(nd4j::graph::Variable* variable) { return variable->getName()->c_str(); } Nd4jLong* getVariableShape(nd4j::graph::Variable* variable) { return variable->getNDArray()->shapeInfo(); } void* getVariableBuffer(nd4j::graph::Variable* variable) { return variable->getNDArray()->buffer(); } int unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) { try { nd4j::graph::GraphHolder::getInstance()->dropGraphAny(graphId); return ND4J_STATUS_OK; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void deleteCharArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<char *>(pointer); delete[] ptr; } void deleteIntArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } void deleteLongArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<Nd4jLong *>(pointer); delete[] ptr; } void deleteVariablesSet(nd4j::graph::VariablesSet* pointer) { delete pointer; } void deleteShapeList(Nd4jPointer shapeList) { nd4j::ShapeList* list = reinterpret_cast<nd4j::ShapeList*>(shapeList); //list->destroy(); delete list; } const char* getAllOperations() { return nd4j::OpTracker::getInstance()->exportOperations(); } Nd4jPointer getGraphState(Nd4jLong id) { return (Nd4jPointer) new nd4j::graph::GraphState(id); } void deleteGraphState(Nd4jPointer state) { auto stateP = reinterpret_cast<nd4j::graph::GraphState*>(state); delete stateP; } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, nd4j::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are Node node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = inputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]); auto array = new nd4j::NDArray(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { // nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto dZ = LogicExecutor::processNode(graph, &node); if (dZ != Status::OK()) return dZ; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = outputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]); NDArray array(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { try { return execCustomOpWithScope(extraPointers, reinterpret_cast<nd4j::graph::GraphState *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deleteResultWrapper(Nd4jPointer ptr) { // just 0 room for compiler s@!t auto p = reinterpret_cast<nd4j::graph::ResultWrapper *>(ptr); delete p; } int estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong *dXShapeInfo, int N, float threshold) { throw std::runtime_error("estimateThreshold: Not implemented yet"); } /* * TypeDef: * void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ); */ void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) { try { auto dx = reinterpret_cast<void *>(dX); auto dz = reinterpret_cast<void *>(dZ); if (srcType == ND4J_FLOAT8) { if (dstType == ND4J_FLOAT8) { // convertKernel<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint8>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::int16>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, nd4j::uint16>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::TypeCast::convertGenericCuda<nd4j::float8, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<nd4j::int8, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //convertKernel<nd4j::int8, nd4j::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: eventually we might want to add it } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_UINT8) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<uint8_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: still might want to add } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float16, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: .... ^^^ } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float16>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT16) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<int16_t, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO... } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz); } else { printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT24) { } else if (srcType == ND4J_FLOAT32) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<float, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_DOUBLE) { nd4j::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<float>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_DOUBLE) { if (dstType == ND4J_FLOAT8) { //nd4j::TypeCast::convertGenericCuda<double, nd4j::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { nd4j::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { nd4j::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { nd4j::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { nd4j::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { nd4j::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { nd4j::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { // } else if (dstType == ND4J_THRESHOLD) { //nd4j::convertToThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_THRESHOLD) { if (dstType == ND4J_FLOAT16) { //nd4j::convertFromThreshold<float16>(nullptr, dx, N, dz); } else if (dstType == ND4J_FLOAT32) { //nd4j::convertFromThreshold<float>(nullptr, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //nd4j::convertFromThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) { auto u = new nd4j::utf8string(string, length); return reinterpret_cast<Nd4jPointer>(u); } Nd4jLong getUtf8StringLength(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<nd4j::utf8string*>(ptr)->_length; } char* getUtf8StringBuffer(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<nd4j::utf8string*>(ptr)->_buffer; } void deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) { delete(reinterpret_cast<nd4j::utf8string*>(ptr)); } /////////////////////////////////////////////////////////////////// template<typename T, typename I> __global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* vindexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; auto indexes = reinterpret_cast<const I*>(vindexes); for (int e = 0; e < numOfSubArrs; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const auto yOffset = shape::getIndexOffset(i, yShapeInfo); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T, typename I> __host__ static void scatterUpdateCudaLauncher(const cudaStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* indexes) { scatterUpdateCuda<T, I><<<512, 256, MAX_NUM_THREADS, *stream>>>(opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs, void* hX, Nd4jLong* hXShapeInfo, Nd4jLong* hXOffsets, void* dX, Nd4jLong* dXShapeInfo, Nd4jLong* dXOffsets, void* hY, Nd4jLong* hYShapeInfo, Nd4jLong* hYOffsets, void* dY, Nd4jLong* dYShapeInfo, Nd4jLong* dYOffsets, void* hIindexes, Nd4jLong* hIndicesShapeInfo, void* dIindexes, Nd4jLong* dIndicesShapeInfo) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto type = ArrayOptions::dataType(hXShapeInfo); auto iType = ArrayOptions::dataType(hIndicesShapeInfo); BUILD_DOUBLE_SELECTOR(type, iType, scatterUpdateCudaLauncher, (stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes), LIBND4J_TYPES, INDEXING_TYPES); nd4j::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed"); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); auto p = reinterpret_cast<nd4j::DebugInfo *>(debugInfo); NDArray array(buffer, specialBuffer, shapeInfo, &lc); nd4j::DebugHelper::retrieveDebugStatistics(p, &array); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void __global__ tryPointerKernel(void* p, int len) { auto buf = reinterpret_cast<int8_t*>(p); auto tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int b; if (tid < len) atomicAdd(&b, buf[tid]); __syncthreads(); if (threadIdx.x ==0 && blockIdx.x == 0) printf("Pointer check complete: %i\n", b); } void tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) { try { cudaStream_t stream; cudaStreamCreate(&stream); tryPointerKernel << < 256, 512, len + 64, stream >> > (p, len); auto e = cudaStreamSynchronize(stream); if (e != 0) throw nd4j::cuda_exception::build("tryPointer failed", e); cudaStreamDestroy(stream); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } int dataTypeFromNpyHeader(void *header) { return (int) cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header)); } nd4j::ConstantDataBuffer* shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *strides, nd4j::DataType dtype, char order, Nd4jLong ews, bool empty) { try { auto buffer = new ConstantDataBuffer(); *buffer = nd4j::ConstantShapeHelper::getInstance()->bufferForShapeInfo( ShapeDescriptor(dtype, order, shape, strides, rank, ews, empty)); return buffer; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } void deleteShapeBuffer(nd4j::ConstantDataBuffer* ptr) { delete ptr; } void deleteTadPack(nd4j::TadPack* ptr) { delete ptr; } bool isBlasVersionMatches(int major, int minor, int build) { auto result = major == Environment::getInstance()->_blasMajorVersion && minor == Environment::getInstance()->_blasMinorVersion && build == Environment::getInstance()->_blasPatchVersion; if (!result) { nd4j_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n", Environment::getInstance()->_blasMajorVersion, Environment::getInstance()->_blasMinorVersion, Environment::getInstance()->_blasPatchVersion, major, minor, build); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(152); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch"); } return result; } nd4j::ConstantDataBuffer* constantBufferLong(nd4j::DataType dtype, Nd4jLong *data, int length) { return nd4j::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype); } nd4j::ConstantDataBuffer* constantBufferDouble(nd4j::DataType dtype, double *data, int length) { return nd4j::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype); } nd4j::ConstantDataBuffer* constantBuffer(nd4j::DataType dtype, nd4j::ConstantDescriptor *descriptor) { return nd4j::ConstantHelper::getInstance()->constantBuffer(*descriptor, dtype); } Nd4jPointer getConstantDataBufferPrimary(nd4j::ConstantDataBuffer* dbf) { return dbf->primary(); } Nd4jPointer getConstantDataBufferSpecial(nd4j::ConstantDataBuffer* dbf) { return dbf->special(); } Nd4jLong getConstantDataBufferLength(nd4j::ConstantDataBuffer* dbf) { return dbf->length(); } Nd4jLong getConstantDataBufferSizeOf(nd4j::ConstantDataBuffer* dbf) { return dbf->sizeOf(); } nd4j::graph::Context* createGraphContext(int nodeId) { return new nd4j::graph::Context(nodeId); } nd4j::graph::RandomGenerator* getGraphContextRandomGenerator(nd4j::graph::Context* ptr) { return &ptr->randomGenerator(); } void markGraphContextInplace(nd4j::graph::Context* ptr, bool reallyInplace) { ptr->markInplace(reallyInplace); } void setGraphContextCudaContext(nd4j::graph::Context* ptr, void *stream, void *reductionPointer, void *allocationPointer) { ptr->setCudaContext(stream, reductionPointer, allocationPointer); } void setGraphContextInputArray(nd4j::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextOutputArray(nd4j::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextTArguments(nd4j::graph::Context* ptr, double *arguments, int numberOfArguments) { ptr->setTArguments(arguments, numberOfArguments); } void setGraphContextIArguments(nd4j::graph::Context* ptr, Nd4jLong *arguments, int numberOfArguments) { ptr->setIArguments(arguments, numberOfArguments); } void setGraphContextBArguments(nd4j::graph::Context* ptr, bool *arguments, int numberOfArguments) { ptr->setBArguments(arguments, numberOfArguments); } void deleteGraphContext(nd4j::graph::Context* ptr) { delete ptr; } nd4j::graph::RandomGenerator* createRandomGenerator(Nd4jLong rootSeed, Nd4jLong nodeSeed) { return new nd4j::graph::RandomGenerator(rootSeed, nodeSeed); } Nd4jLong getRandomGeneratorRootState(nd4j::graph::RandomGenerator* ptr) { return ptr->rootState(); } Nd4jLong getRandomGeneratorNodeState(nd4j::graph::RandomGenerator* ptr) { return ptr->nodeState(); } void setRandomGeneratorStates(nd4j::graph::RandomGenerator* ptr, Nd4jLong rootSeed, Nd4jLong nodeSeed) { ptr->setStates(rootSeed, nodeSeed); } int getRandomGeneratorRelativeInt(nd4j::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeInt(index); } Nd4jLong getRandomGeneratorRelativeLong(nd4j::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeLong(index); } void deleteRandomGenerator(nd4j::graph::RandomGenerator* ptr) { delete ptr; } Nd4jPointer shapeBufferForNumpy(Nd4jPointer npyArray) { try { cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); unsigned int shapeSize = arr.shape.size(); std::vector<Nd4jLong> shape(shapeSize); bool _empty = false; for (unsigned int i = 0; i < shapeSize; i++) { shape[i] = arr.shape[i]; if (arr.shape[i] == 0) _empty = true; } auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray)); Nd4jLong *shapeBuffer; if (shape.size() == 1 && shape[0] == 0) { // scalar case shapeBuffer = nd4j::ShapeBuilders::createScalarShapeInfo(dtype); } else if (_empty) { if (shapeSize > 0) shapeBuffer = nd4j::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); else shapeBuffer = nd4j::ShapeBuilders::emptyShapeInfo(dtype); } else { shapeBuffer = nd4j::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); } return reinterpret_cast<Nd4jPointer>(nd4j::ConstantShapeHelper::getInstance()->createFromExisting(shapeBuffer, true)); } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runLightBenchmarkSuit(bool printOut) { try { nd4j::LightBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runFullBenchmarkSuit(bool printOut) { try { nd4j::FullBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { nd4j::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); nd4j::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getCachedMemory(int deviceId) { return nd4j::ConstantHelper::getInstance()->getCachedAmount(deviceId); } nd4j::LaunchContext* defaultLaunchContext() { return LaunchContext::defaultContext(); } Nd4jPointer lcScalarPointer(OpaqueLaunchContext* lc) { return lc->getScalarPointer(); } Nd4jPointer lcReductionPointer(OpaqueLaunchContext* lc) { return lc->getReductionPointer(); } Nd4jPointer lcAllocationPointer(OpaqueLaunchContext* lc) { return lc->getAllocationPointer(); } Nd4jPointer lcExecutionStream(OpaqueLaunchContext* lc) { return lc->getCudaStream(); } Nd4jPointer lcCopyStream(OpaqueLaunchContext* lc) { return lc->getCudaSpecialStream(); } Nd4jPointer lcBlasHandle(OpaqueLaunchContext* lc) { return lc->getCublasHandle(); } Nd4jPointer lcSolverHandle(OpaqueLaunchContext* lc) { return lc->getCusolverHandle(); } int lastErrorCode() { return nd4j::LaunchContext::defaultContext()->errorReference()->errorCode(); } const char* lastErrorMessage() { return nd4j::LaunchContext::defaultContext()->errorReference()->errorMessage(); } int binaryLevel() { return 0; } int optimalLevel() { return 0; } bool isMinimalRequirementsMet() { return true; } bool isOptimalRequirementsMet() { return true; } void ctxAllowHelpers(OpaqueContext* ptr, bool reallyAllow) { ptr->allowHelpers(reallyAllow); }
2a011587a1eb0b7672805a57368aca8257163268.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <float.h> #include "cuda_auxiliary.h" typedef struct { double *x; double *y; double *z; } points; __global__ void gpu_norm(points array, double *norm, int N) { for (int ix = blockIdx.x * blockDim.x + threadIdx.x; ix < N; ix += blockDim.x * gridDim.x) norm[ix] = sqrt(array.x[ix] * array.x[ix] + array.y[ix] * array.y[ix] + array.z[ix] * array.z[ix]); } void cpu_norm(points array, double *norm, int N) { for (int ix = 0; ix < N; ++ix) norm[ix] = sqrt(array.x[ix] * array.x[ix] + array.y[ix] * array.y[ix] + array.z[ix] * array.z[ix]); } void check_result(double *cpu_c, double *gpu_c, int N) { for (int i = 0; i < N; ++i) if (abs(cpu_c[i] - gpu_c[i]) >= 3 * DBL_EPSILON) { printf("CPU and GPU results differ at position %d\n", i); printf("CPU value: %lg\n", cpu_c[i]); printf("GPU value: %lg\n", gpu_c[i]); return; } printf("GPU result is correct\n"); } int main(int argc, char **argv) { int N = 1 << 24; points hst_point; points dev_point; double *hst_n = NULL; double *hst_r = NULL; double *dev_n = NULL; dim3 block; dim3 grid; double cpu_time = 0.0; double gpu_time = 0.0; if (argc != 2) { fprintf(stderr, "usage: %s dimx\n", argv[0]); goto die; } host_alloc(hst_point.x, double, 3 * N); host_alloc(hst_n, double, N); host_alloc(hst_r, double, N); cuda_exec(hipMalloc(&dev_point.x, 3 * N * sizeof(double))); cuda_exec(hipMalloc(&dev_n, N * sizeof(double))); hst_point.y = hst_point.x + N; hst_point.z = hst_point.y + N; dev_point.y = dev_point.x + N; dev_point.z = dev_point.y + N; init_matrix(hst_point.x, 3 * N, 1, 3 * N); cuda_exec(hipMemcpy(dev_point.x, hst_point.x, 3 * N * sizeof(double), hipMemcpyHostToDevice)); block.x = atoi(argv[1]); grid.x = min((N + block.x - 1) / block.x, 65535); cpu_time -= timer(); cpu_norm(hst_point, hst_n, N); cpu_time += timer(); gpu_time -= timer(); hipLaunchKernelGGL(( gpu_norm), dim3(grid), dim3(block), 0, 0, dev_point, dev_n, N); cuda_exec(hipDeviceSynchronize()); gpu_time += timer(); cuda_exec(hipMemcpy(hst_r, dev_n, N * sizeof(double), hipMemcpyDeviceToHost)); cuda_exec(hipDeviceSynchronize()); printf("Execution configuration: %d blocks, %d threads\n", grid.x, block.x); printf("CPU time: %dms\n", (int) (1000 * cpu_time)); printf("GPU time: %dms\n", (int) (1000 * gpu_time)); check_result(hst_n, hst_r, N); die: free(hst_point.x); free(hst_n); free(hst_r); cuda_exec(hipFree(dev_point.x)); cuda_exec(hipFree(dev_n)); return 0; }
2a011587a1eb0b7672805a57368aca8257163268.cu
#include <stdio.h> #include <float.h> #include "cuda_auxiliary.h" typedef struct { double *x; double *y; double *z; } points; __global__ void gpu_norm(points array, double *norm, int N) { for (int ix = blockIdx.x * blockDim.x + threadIdx.x; ix < N; ix += blockDim.x * gridDim.x) norm[ix] = sqrt(array.x[ix] * array.x[ix] + array.y[ix] * array.y[ix] + array.z[ix] * array.z[ix]); } void cpu_norm(points array, double *norm, int N) { for (int ix = 0; ix < N; ++ix) norm[ix] = sqrt(array.x[ix] * array.x[ix] + array.y[ix] * array.y[ix] + array.z[ix] * array.z[ix]); } void check_result(double *cpu_c, double *gpu_c, int N) { for (int i = 0; i < N; ++i) if (abs(cpu_c[i] - gpu_c[i]) >= 3 * DBL_EPSILON) { printf("CPU and GPU results differ at position %d\n", i); printf("CPU value: %lg\n", cpu_c[i]); printf("GPU value: %lg\n", gpu_c[i]); return; } printf("GPU result is correct\n"); } int main(int argc, char **argv) { int N = 1 << 24; points hst_point; points dev_point; double *hst_n = NULL; double *hst_r = NULL; double *dev_n = NULL; dim3 block; dim3 grid; double cpu_time = 0.0; double gpu_time = 0.0; if (argc != 2) { fprintf(stderr, "usage: %s dimx\n", argv[0]); goto die; } host_alloc(hst_point.x, double, 3 * N); host_alloc(hst_n, double, N); host_alloc(hst_r, double, N); cuda_exec(cudaMalloc(&dev_point.x, 3 * N * sizeof(double))); cuda_exec(cudaMalloc(&dev_n, N * sizeof(double))); hst_point.y = hst_point.x + N; hst_point.z = hst_point.y + N; dev_point.y = dev_point.x + N; dev_point.z = dev_point.y + N; init_matrix(hst_point.x, 3 * N, 1, 3 * N); cuda_exec(cudaMemcpy(dev_point.x, hst_point.x, 3 * N * sizeof(double), cudaMemcpyHostToDevice)); block.x = atoi(argv[1]); grid.x = min((N + block.x - 1) / block.x, 65535); cpu_time -= timer(); cpu_norm(hst_point, hst_n, N); cpu_time += timer(); gpu_time -= timer(); gpu_norm<<<grid, block>>>(dev_point, dev_n, N); cuda_exec(cudaDeviceSynchronize()); gpu_time += timer(); cuda_exec(cudaMemcpy(hst_r, dev_n, N * sizeof(double), cudaMemcpyDeviceToHost)); cuda_exec(cudaDeviceSynchronize()); printf("Execution configuration: %d blocks, %d threads\n", grid.x, block.x); printf("CPU time: %dms\n", (int) (1000 * cpu_time)); printf("GPU time: %dms\n", (int) (1000 * gpu_time)); check_result(hst_n, hst_r, N); die: free(hst_point.x); free(hst_n); free(hst_r); cuda_exec(cudaFree(dev_point.x)); cuda_exec(cudaFree(dev_n)); return 0; }
0eaa08bc819107ed0564b5f81ac042c2f4fa8694.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/matrix/dense_kernels.hpp" #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/base/range_accessors.hpp> #include <ginkgo/core/matrix/coo.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/ell.hpp> #include <ginkgo/core/matrix/sellp.hpp> #include <ginkgo/core/matrix/sparsity_csr.hpp> #include "cuda/base/cublas_bindings.hpp" #include "cuda/base/pointer_mode_guard.hpp" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/uninitialized_array.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The Dense matrix format namespace. * * @ingroup dense */ namespace dense { constexpr auto default_block_size = 512; template <typename ValueType> void simple_apply(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *a, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c) { if (cublas::is_supported<ValueType>::value) { auto handle = exec->get_cublas_handle(); { cublas::pointer_mode_guard pm_guard(handle); auto alpha = one<ValueType>(); auto beta = zero<ValueType>(); cublas::gemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, c->get_size()[1], c->get_size()[0], a->get_size()[1], &alpha, b->get_const_values(), b->get_stride(), a->get_const_values(), a->get_stride(), &beta, c->get_values(), c->get_stride()); } } else { GKO_NOT_IMPLEMENTED; } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_SIMPLE_APPLY_KERNEL); template <typename ValueType> void apply(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *alpha, const matrix::Dense<ValueType> *a, const matrix::Dense<ValueType> *b, const matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *c) { if (cublas::is_supported<ValueType>::value) { cublas::gemm(exec->get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N, c->get_size()[1], c->get_size()[0], a->get_size()[1], alpha->get_const_values(), b->get_const_values(), b->get_stride(), a->get_const_values(), a->get_stride(), beta->get_const_values(), c->get_values(), c->get_stride()); } else { GKO_NOT_IMPLEMENTED; } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_APPLY_KERNEL); namespace kernel { template <size_type block_size, typename ValueType> __global__ __launch_bounds__(block_size) void scale( size_type num_rows, size_type num_cols, size_type num_alpha_cols, const ValueType *__restrict__ alpha, ValueType *__restrict__ x, size_type stride_x) { constexpr auto warps_per_block = block_size / cuda_config::warp_size; const auto global_id = thread::get_thread_id<cuda_config::warp_size, warps_per_block>(); const auto row_id = global_id / num_cols; const auto col_id = global_id % num_cols; const auto alpha_id = num_alpha_cols == 1 ? 0 : col_id; if (row_id < num_rows) { x[row_id * stride_x + col_id] = alpha[alpha_id] == zero<ValueType>() ? zero<ValueType>() : x[row_id * stride_x + col_id] * alpha[alpha_id]; } } } // namespace kernel template <typename ValueType> void scale(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *alpha, matrix::Dense<ValueType> *x) { if (cublas::is_supported<ValueType>::value && x->get_size()[1] == 1) { cublas::scal(exec->get_cublas_handle(), x->get_size()[0], alpha->get_const_values(), x->get_values(), x->get_stride()); } else { // TODO: tune this parameter constexpr auto block_size = default_block_size; const dim3 grid_dim = ceildiv(x->get_size()[0] * x->get_size()[1], block_size); const dim3 block_dim{cuda_config::warp_size, 1, block_size / cuda_config::warp_size}; hipLaunchKernelGGL(( kernel::scale<block_size>), dim3(grid_dim), dim3(block_dim), 0, 0, x->get_size()[0], x->get_size()[1], alpha->get_size()[1], as_cuda_type(alpha->get_const_values()), as_cuda_type(x->get_values()), x->get_stride()); } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_SCALE_KERNEL); namespace kernel { template <size_type block_size, typename ValueType> __global__ __launch_bounds__(block_size) void add_scaled( size_type num_rows, size_type num_cols, size_type num_alpha_cols, const ValueType *__restrict__ alpha, const ValueType *__restrict__ x, size_type stride_x, ValueType *__restrict__ y, size_type stride_y) { constexpr auto warps_per_block = block_size / cuda_config::warp_size; const auto global_id = thread::get_thread_id<cuda_config::warp_size, warps_per_block>(); const auto row_id = global_id / num_cols; const auto col_id = global_id % num_cols; const auto alpha_id = num_alpha_cols == 1 ? 0 : col_id; if (row_id < num_rows && alpha[alpha_id] != zero<ValueType>()) { y[row_id * stride_y + col_id] += x[row_id * stride_x + col_id] * alpha[alpha_id]; } } } // namespace kernel template <typename ValueType> void add_scaled(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *alpha, const matrix::Dense<ValueType> *x, matrix::Dense<ValueType> *y) { if (cublas::is_supported<ValueType>::value && x->get_size()[1] == 1) { cublas::axpy(exec->get_cublas_handle(), x->get_size()[0], alpha->get_const_values(), x->get_const_values(), x->get_stride(), y->get_values(), y->get_stride()); } else { // TODO: tune this parameter constexpr auto block_size = default_block_size; const dim3 grid_dim = ceildiv(x->get_size()[0] * x->get_size()[1], block_size); const dim3 block_dim{cuda_config::warp_size, 1, block_size / cuda_config::warp_size}; hipLaunchKernelGGL(( kernel::add_scaled<block_size>), dim3(grid_dim), dim3(block_dim), 0, 0, x->get_size()[0], x->get_size()[1], alpha->get_size()[1], as_cuda_type(alpha->get_const_values()), as_cuda_type(x->get_const_values()), x->get_stride(), as_cuda_type(y->get_values()), y->get_stride()); } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_ADD_SCALED_KERNEL); namespace kernel { template <size_type block_size, typename ValueType> __global__ __launch_bounds__(block_size) void compute_partial_dot( size_type num_rows, const ValueType *__restrict__ x, size_type stride_x, const ValueType *__restrict__ y, size_type stride_y, ValueType *__restrict__ work) { constexpr auto warps_per_block = block_size / cuda_config::warp_size; const auto num_blocks = gridDim.x; const auto local_id = thread::get_local_thread_id<cuda_config::warp_size>(); const auto global_id = thread::get_thread_id<cuda_config::warp_size, warps_per_block>(); auto tmp = zero<ValueType>(); for (auto i = global_id; i < num_rows; i += block_size * num_blocks) { tmp += x[i * stride_x] * y[i * stride_y]; } __shared__ UninitializedArray<ValueType, block_size> tmp_work; tmp_work[local_id] = tmp; reduce(group::this_thread_block(), static_cast<ValueType *>(tmp_work), [](const ValueType &x, const ValueType &y) { return x + y; }); if (local_id == 0) { work[thread::get_block_id()] = tmp_work[0]; } } template <size_type block_size, typename ValueType> __global__ __launch_bounds__(block_size) void finalize_dot_computation( size_type size, const ValueType *work, ValueType *result) { const auto local_id = thread::get_local_thread_id<cuda_config::warp_size>(); ValueType tmp = zero<ValueType>(); for (auto i = local_id; i < size; i += block_size) { tmp += work[i]; } __shared__ UninitializedArray<ValueType, block_size> tmp_work; tmp_work[local_id] = tmp; reduce(group::this_thread_block(), static_cast<ValueType *>(tmp_work), [](const ValueType &x, const ValueType &y) { return x + y; }); if (local_id == 0) { *result = tmp_work[0]; } } } // namespace kernel template <typename ValueType> void compute_dot(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *x, const matrix::Dense<ValueType> *y, matrix::Dense<ValueType> *result) { if (cublas::is_supported<ValueType>::value) { // TODO: write a custom kernel which does this more efficiently for (size_type col = 0; col < x->get_size()[1]; ++col) { cublas::dot(exec->get_cublas_handle(), x->get_size()[0], x->get_const_values() + col, x->get_stride(), y->get_const_values() + col, y->get_stride(), result->get_values() + col); } } else { // TODO: these are tuning parameters obtained experimentally, once // we decide how to handle this uniformly, they should be modified // appropriately constexpr auto work_per_thread = 32; constexpr auto block_size = 1024; constexpr auto work_per_block = work_per_thread * block_size; const dim3 grid_dim = ceildiv(x->get_size()[0], work_per_block); const dim3 block_dim{cuda_config::warp_size, 1, block_size / cuda_config::warp_size}; Array<ValueType> work(exec, grid_dim.x); // TODO: write a kernel which does this more efficiently for (size_type col = 0; col < x->get_size()[1]; ++col) { hipLaunchKernelGGL(( kernel::compute_partial_dot<block_size>), dim3(grid_dim), dim3(block_dim), 0, 0, x->get_size()[0], as_cuda_type(x->get_const_values() + col), x->get_stride(), as_cuda_type(y->get_const_values() + col), y->get_stride(), as_cuda_type(work.get_data())); hipLaunchKernelGGL(( kernel::finalize_dot_computation<block_size>), dim3(1), dim3(block_dim), 0, 0, grid_dim.x, as_cuda_type(work.get_const_data()), as_cuda_type(result->get_values() + col)); } } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COMPUTE_DOT_KERNEL); namespace kernel { template <typename ValueType> __global__ __launch_bounds__(default_block_size) void compute_sqrt( size_type num_cols, ValueType *__restrict__ work) { const auto tidx = static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x; if (tidx < num_cols) { work[tidx] = sqrt(abs(work[tidx])); } } } // namespace kernel template <typename ValueType> void compute_norm2(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *x, matrix::Dense<ValueType> *result) { if (cublas::is_supported<ValueType>::value) { for (size_type col = 0; col < x->get_size()[1]; ++col) { cublas::norm2(exec->get_cublas_handle(), x->get_size()[0], x->get_const_values() + col, x->get_stride(), result->get_values() + col); } } else { compute_dot(exec, x, x, result); const dim3 block_size(default_block_size, 1, 1); const dim3 grid_size(ceildiv(result->get_size()[1], block_size.x), 1, 1); hipLaunchKernelGGL(( kernel::compute_sqrt), dim3(grid_size), dim3(block_size), 0, 0, result->get_size()[1], as_cuda_type(result->get_values())); } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COMPUTE_NORM2_KERNEL); namespace kernel { template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_coo( size_type num_rows, size_type num_cols, size_type stride, const size_type *__restrict__ row_ptrs, const ValueType *__restrict__ source, IndexType *__restrict__ row_idxs, IndexType *__restrict__ col_idxs, ValueType *__restrict__ values) { const auto tidx = threadIdx.x + blockDim.x * blockIdx.x; if (tidx < num_rows) { size_type write_to = row_ptrs[tidx]; for (size_type i = 0; i < num_cols; i++) { if (source[stride * tidx + i] != zero<ValueType>()) { values[write_to] = source[stride * tidx + i]; col_idxs[write_to] = i; row_idxs[write_to] = tidx; write_to++; } } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_coo(std::shared_ptr<const CudaExecutor> exec, matrix::Coo<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) { auto num_rows = result->get_size()[0]; auto num_cols = result->get_size()[1]; auto row_idxs = result->get_row_idxs(); auto col_idxs = result->get_col_idxs(); auto values = result->get_values(); auto stride = source->get_stride(); auto nnz_prefix_sum = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_prefix_sum); const size_type grid_dim = ceildiv(num_rows, default_block_size); auto add_values = Array<size_type>(exec, grid_dim); hipLaunchKernelGGL(( start_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, as_cuda_type(nnz_prefix_sum.get_data()), as_cuda_type(add_values.get_data())); hipLaunchKernelGGL(( finalize_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, as_cuda_type(nnz_prefix_sum.get_data()), as_cuda_type(add_values.get_data())); hipLaunchKernelGGL(( kernel::fill_in_coo), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, num_cols, stride, as_cuda_type(nnz_prefix_sum.get_const_data()), as_cuda_type(source->get_const_values()), as_cuda_type(row_idxs), as_cuda_type(col_idxs), as_cuda_type(values)); nnz_prefix_sum.clear(); add_values.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_COO_KERNEL); namespace kernel { template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void count_nnz_per_row( size_type num_rows, size_type num_cols, size_type stride, const ValueType *__restrict__ work, IndexType *__restrict__ result) { constexpr auto warp_size = cuda_config::warp_size; const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; const auto row_idx = tidx / warp_size; if (row_idx < num_rows) { IndexType part_result{}; for (auto i = threadIdx.x % warp_size; i < num_cols; i += warp_size) { if (work[stride * row_idx + i] != zero<ValueType>()) { part_result += 1; } } auto warp_tile = group::tiled_partition<warp_size>(group::this_thread_block()); result[row_idx] = reduce( warp_tile, part_result, [](const size_type &a, const size_type &b) { return a + b; }); } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_csr( size_type num_rows, size_type num_cols, size_type stride, const ValueType *__restrict__ source, IndexType *__restrict__ row_ptrs, IndexType *__restrict__ col_idxs, ValueType *__restrict__ values) { const auto tidx = threadIdx.x + blockDim.x * blockIdx.x; if (tidx < num_rows) { auto write_to = row_ptrs[tidx]; for (auto i = 0; i < num_cols; i++) { if (source[stride * tidx + i] != zero<ValueType>()) { values[write_to] = source[stride * tidx + i]; col_idxs[write_to] = i; write_to++; } } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_csr(std::shared_ptr<const CudaExecutor> exec, matrix::Csr<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) { auto num_rows = result->get_size()[0]; auto num_cols = result->get_size()[1]; auto row_ptrs = result->get_row_ptrs(); auto col_idxs = result->get_col_idxs(); auto values = result->get_values(); auto stride = source->get_stride(); const auto rows_per_block = ceildiv(default_block_size, cuda_config::warp_size); const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block); hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_dim_nnz), dim3(default_block_size), 0, 0, num_rows, num_cols, stride, as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs)); size_type grid_dim = ceildiv(num_rows + 1, default_block_size); auto add_values = Array<IndexType>(exec, grid_dim); hipLaunchKernelGGL(( start_prefix_sum<default_block_size>) , dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_data())); hipLaunchKernelGGL(( finalize_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_const_data())); hipLaunchKernelGGL(( kernel::fill_in_csr), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, num_cols, stride, as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs), as_cuda_type(col_idxs), as_cuda_type(values)); add_values.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_CSR_KERNEL); namespace kernel { template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_ell( size_type num_rows, size_type num_cols, size_type source_stride, const ValueType *__restrict__ source, size_type max_nnz_per_row, size_type result_stride, IndexType *__restrict__ col_ptrs, ValueType *__restrict__ values) { const auto tidx = threadIdx.x + blockDim.x * blockIdx.x; if (tidx < num_rows) { IndexType col_idx = 0; for (size_type col = 0; col < num_cols; col++) { if (source[tidx * source_stride + col] != zero<ValueType>()) { col_ptrs[col_idx * result_stride + tidx] = col; values[col_idx * result_stride + tidx] = source[tidx * source_stride + col]; col_idx++; } } for (size_type j = col_idx; j < max_nnz_per_row; j++) { col_ptrs[j * result_stride + tidx] = 0; values[j * result_stride + tidx] = zero<ValueType>(); } } else if (tidx < result_stride) { for (size_type j = 0; j < max_nnz_per_row; j++) { col_ptrs[j * result_stride + tidx] = 0; values[j * result_stride + tidx] = zero<ValueType>(); } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_ell(std::shared_ptr<const CudaExecutor> exec, matrix::Ell<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) { auto num_rows = result->get_size()[0]; auto num_cols = result->get_size()[1]; auto max_nnz_per_row = result->get_num_stored_elements_per_row(); auto col_ptrs = result->get_col_idxs(); auto values = result->get_values(); auto source_stride = source->get_stride(); auto result_stride = result->get_stride(); auto grid_dim = ceildiv(result_stride, default_block_size); hipLaunchKernelGGL(( kernel::fill_in_ell), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, num_cols, source_stride, as_cuda_type(source->get_const_values()), max_nnz_per_row, result_stride, as_cuda_type(col_ptrs), as_cuda_type(values)); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_ELL_KERNEL); template <typename ValueType, typename IndexType> void convert_to_hybrid(std::shared_ptr<const CudaExecutor> exec, matrix::Hybrid<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) GKO_NOT_IMPLEMENTED; GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_HYBRID_KERNEL); namespace kernel { __global__ __launch_bounds__(cuda_config::warp_size) void calculate_slice_lengths( size_type num_rows, size_type slice_size, int slice_num, size_type stride_factor, const size_type *__restrict__ nnz_per_row, size_type *__restrict__ slice_lengths, size_type *__restrict__ slice_sets) { constexpr auto warp_size = cuda_config::warp_size; const auto sliceid = blockIdx.x; const auto tid_in_warp = threadIdx.x; if (sliceid * slice_size + tid_in_warp < num_rows) { size_type thread_result = 0; for (auto i = tid_in_warp; i < slice_size; i += warp_size) { thread_result = (i + slice_size * sliceid < num_rows) ? max(thread_result, nnz_per_row[sliceid * slice_size + i]) : thread_result; } auto warp_tile = group::tiled_partition<warp_size>(group::this_thread_block()); auto warp_result = reduce( warp_tile, thread_result, [](const size_type &a, const size_type &b) { return max(a, b); }); if (tid_in_warp == 0) { auto slice_length = ceildiv(warp_result, stride_factor) * stride_factor; slice_lengths[sliceid] = slice_length; slice_sets[sliceid] = slice_length; } } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_sellp( size_type num_rows, size_type num_cols, size_type slice_size, size_type stride, const ValueType *__restrict__ source, size_type *__restrict__ slice_lengths, size_type *__restrict__ slice_sets, IndexType *__restrict__ col_idxs, ValueType *__restrict__ vals) { const auto global_row = threadIdx.x + blockIdx.x * blockDim.x; const auto row = global_row % slice_size; const auto sliceid = global_row / slice_size; if (global_row < num_rows) { size_type sellp_ind = slice_sets[sliceid] * slice_size + row; for (size_type col = 0; col < num_cols; col++) { auto val = source[global_row * stride + col]; if (val != zero<ValueType>()) { col_idxs[sellp_ind] = col; vals[sellp_ind] = val; sellp_ind += slice_size; } } for (size_type i = sellp_ind; i < (slice_sets[sliceid] + slice_lengths[sliceid]) * slice_size + row; i += slice_size) { col_idxs[i] = 0; vals[i] = zero<ValueType>(); } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_sellp(std::shared_ptr<const CudaExecutor> exec, matrix::Sellp<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) { const auto stride = source->get_stride(); const auto num_rows = result->get_size()[0]; const auto num_cols = result->get_size()[1]; auto vals = result->get_values(); auto col_idxs = result->get_col_idxs(); auto slice_lengths = result->get_slice_lengths(); auto slice_sets = result->get_slice_sets(); const auto slice_size = (result->get_slice_size() == 0) ? matrix::default_slice_size : result->get_slice_size(); const auto stride_factor = (result->get_stride_factor() == 0) ? matrix::default_stride_factor : result->get_stride_factor(); const int slice_num = ceildiv(num_rows, slice_size); auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); auto grid_dim = slice_num; hipLaunchKernelGGL(( kernel::calculate_slice_lengths), dim3(grid_dim), dim3(cuda_config::warp_size), 0, 0, num_rows, slice_size, slice_num, stride_factor, as_cuda_type(nnz_per_row.get_const_data()), as_cuda_type(slice_lengths), as_cuda_type(slice_sets)); auto add_values = Array<size_type>(exec, ceildiv(slice_num + 1, default_block_size)); grid_dim = ceildiv(slice_num + 1, default_block_size); hipLaunchKernelGGL(( start_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0, slice_num + 1, as_cuda_type(slice_sets), as_cuda_type(add_values.get_data())); hipLaunchKernelGGL(( finalize_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0, slice_num + 1, as_cuda_type(slice_sets), as_cuda_type(add_values.get_const_data())); grid_dim = ceildiv(num_rows, default_block_size); hipLaunchKernelGGL(( kernel::fill_in_sellp), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, num_cols, slice_size, stride, as_cuda_type(source->get_const_values()), as_cuda_type(slice_lengths), as_cuda_type(slice_sets), as_cuda_type(col_idxs), as_cuda_type(vals)); add_values.clear(); nnz_per_row.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_SELLP_KERNEL); template <typename ValueType, typename IndexType> void convert_to_sparsity_csr(std::shared_ptr<const CudaExecutor> exec, matrix::SparsityCsr<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) GKO_NOT_IMPLEMENTED; GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_SPARSITY_CSR_KERNEL); template <typename ValueType> void count_nonzeros(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *source, size_type *result) { const auto num_rows = source->get_size()[0]; auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); *result = reduce_add_array(exec, num_rows, nnz_per_row.get_const_data()); nnz_per_row.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COUNT_NONZEROS_KERNEL); namespace kernel { __global__ __launch_bounds__(default_block_size) void reduce_max_nnz( size_type size, const size_type *__restrict__ nnz_per_row, size_type *__restrict__ result) { extern __shared__ size_type block_max[]; reduce_array( size, nnz_per_row, block_max, [](const size_type &x, const size_type &y) { return max(x, y); }); if (threadIdx.x == 0) { result[blockIdx.x] = block_max[0]; } } } // namespace kernel template <typename ValueType> void calculate_max_nnz_per_row(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *source, size_type *result) { const auto num_rows = source->get_size()[0]; auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); const auto n = ceildiv(num_rows, default_block_size); const size_type grid_dim = (n <= default_block_size) ? n : default_block_size; auto block_results = Array<size_type>(exec, grid_dim); hipLaunchKernelGGL(( kernel::reduce_max_nnz), dim3(grid_dim), dim3(default_block_size), default_block_size * sizeof(size_type), 0, num_rows, as_cuda_type(nnz_per_row.get_const_data()), as_cuda_type(block_results.get_data())); auto d_result = Array<size_type>(exec, 1); hipLaunchKernelGGL(( kernel::reduce_max_nnz), dim3(1), dim3(default_block_size), default_block_size * sizeof(size_type), 0, grid_dim, as_cuda_type(block_results.get_const_data()), as_cuda_type(d_result.get_data())); exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(), result); d_result.clear(); block_results.clear(); nnz_per_row.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE( GKO_DECLARE_DENSE_CALCULATE_MAX_NNZ_PER_ROW_KERNEL); template <typename ValueType> void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *source, Array<size_type> *result) { const dim3 block_size(default_block_size, 1, 1); auto rows_per_block = ceildiv(default_block_size, cuda_config::warp_size); const size_t grid_x = ceildiv(source->get_size()[0], rows_per_block); const dim3 grid_size(grid_x, 1, 1); hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_size), dim3(block_size), 0, 0, source->get_size()[0], source->get_size()[1], source->get_stride(), as_cuda_type(source->get_const_values()), as_cuda_type(result->get_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE( GKO_DECLARE_DENSE_CALCULATE_NONZEROS_PER_ROW_KERNEL); namespace kernel { __global__ __launch_bounds__(default_block_size) void reduce_max_nnz_per_slice( size_type num_rows, size_type slice_size, size_type stride_factor, const size_type *__restrict__ nnz_per_row, size_type *__restrict__ result) { const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; constexpr auto warp_size = cuda_config::warp_size; const auto warpid = tidx / warp_size; const auto tid_in_warp = tidx % warp_size; const auto slice_num = ceildiv(num_rows, slice_size); size_type thread_result = 0; for (auto i = tid_in_warp; i < slice_size; i += warp_size) { if (warpid * slice_size + i < num_rows) { thread_result = max(thread_result, nnz_per_row[warpid * slice_size + i]); } } auto warp_tile = group::tiled_partition<warp_size>(group::this_thread_block()); auto warp_result = reduce( warp_tile, thread_result, [](const size_type &a, const size_type &b) { return max(a, b); }); if (tid_in_warp == 0 && warpid < slice_num) { result[warpid] = ceildiv(warp_result, stride_factor) * stride_factor; } } __global__ __launch_bounds__(default_block_size) void reduce_total_cols( size_type num_slices, const size_type *__restrict__ max_nnz_per_slice, size_type *__restrict__ result) { extern __shared__ size_type block_result[]; reduce_array(num_slices, max_nnz_per_slice, block_result, [](const size_type &x, const size_type &y) { return x + y; }); if (threadIdx.x == 0) { result[blockIdx.x] = block_result[0]; } } } // namespace kernel template <typename ValueType> void calculate_total_cols(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *source, size_type *result, size_type stride_factor, size_type slice_size) { const auto num_rows = source->get_size()[0]; const auto num_cols = source->get_size()[1]; const auto slice_num = ceildiv(num_rows, slice_size); auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); auto max_nnz_per_slice = Array<size_type>(exec, slice_num); auto grid_dim = ceildiv(slice_num * cuda_config::warp_size, default_block_size); hipLaunchKernelGGL(( kernel::reduce_max_nnz_per_slice), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, slice_size, stride_factor, as_cuda_type(nnz_per_row.get_const_data()), as_cuda_type(max_nnz_per_slice.get_data())); grid_dim = ceildiv(slice_num, default_block_size); auto block_results = Array<size_type>(exec, grid_dim); hipLaunchKernelGGL(( kernel::reduce_total_cols), dim3(grid_dim), dim3(default_block_size), default_block_size * sizeof(size_type), 0, slice_num, as_cuda_type(max_nnz_per_slice.get_const_data()), as_cuda_type(block_results.get_data())); auto d_result = Array<size_type>(exec, 1); hipLaunchKernelGGL(( kernel::reduce_total_cols), dim3(1), dim3(default_block_size), default_block_size * sizeof(size_type), 0, grid_dim, as_cuda_type(block_results.get_const_data()), as_cuda_type(d_result.get_data())); exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(), result); block_results.clear(); nnz_per_row.clear(); max_nnz_per_slice.clear(); d_result.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE( GKO_DECLARE_DENSE_CALCULATE_TOTAL_COLS_KERNEL); template <typename ValueType> void transpose(std::shared_ptr<const CudaExecutor> exec, matrix::Dense<ValueType> *trans, const matrix::Dense<ValueType> *orig) { if (cublas::is_supported<ValueType>::value) { auto handle = exec->get_cublas_handle(); { cublas::pointer_mode_guard pm_guard(handle); auto alpha = one<ValueType>(); auto beta = zero<ValueType>(); cublas::geam( handle, HIPBLAS_OP_T, HIPBLAS_OP_N, orig->get_size()[0], orig->get_size()[1], &alpha, orig->get_const_values(), orig->get_stride(), &beta, static_cast<ValueType *>(nullptr), trans->get_size()[1], trans->get_values(), trans->get_stride()); } } else { GKO_NOT_IMPLEMENTED; } }; GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_TRANSPOSE_KERNEL); template <typename ValueType> void conj_transpose(std::shared_ptr<const CudaExecutor> exec, matrix::Dense<ValueType> *trans, const matrix::Dense<ValueType> *orig) { if (cublas::is_supported<ValueType>::value) { auto handle = exec->get_cublas_handle(); { cublas::pointer_mode_guard pm_guard(handle); auto alpha = one<ValueType>(); auto beta = zero<ValueType>(); cublas::geam( handle, HIPBLAS_OP_C, HIPBLAS_OP_N, orig->get_size()[0], orig->get_size()[1], &alpha, orig->get_const_values(), orig->get_stride(), &beta, static_cast<ValueType *>(nullptr), trans->get_size()[1], trans->get_values(), trans->get_stride()); } } else { GKO_NOT_IMPLEMENTED; } }; GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_CONJ_TRANSPOSE_KERNEL); } // namespace dense } // namespace cuda } // namespace kernels } // namespace gko
0eaa08bc819107ed0564b5f81ac042c2f4fa8694.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/matrix/dense_kernels.hpp" #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/base/range_accessors.hpp> #include <ginkgo/core/matrix/coo.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/ell.hpp> #include <ginkgo/core/matrix/sellp.hpp> #include <ginkgo/core/matrix/sparsity_csr.hpp> #include "cuda/base/cublas_bindings.hpp" #include "cuda/base/pointer_mode_guard.hpp" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/uninitialized_array.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The Dense matrix format namespace. * * @ingroup dense */ namespace dense { constexpr auto default_block_size = 512; template <typename ValueType> void simple_apply(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *a, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c) { if (cublas::is_supported<ValueType>::value) { auto handle = exec->get_cublas_handle(); { cublas::pointer_mode_guard pm_guard(handle); auto alpha = one<ValueType>(); auto beta = zero<ValueType>(); cublas::gemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, c->get_size()[1], c->get_size()[0], a->get_size()[1], &alpha, b->get_const_values(), b->get_stride(), a->get_const_values(), a->get_stride(), &beta, c->get_values(), c->get_stride()); } } else { GKO_NOT_IMPLEMENTED; } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_SIMPLE_APPLY_KERNEL); template <typename ValueType> void apply(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *alpha, const matrix::Dense<ValueType> *a, const matrix::Dense<ValueType> *b, const matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *c) { if (cublas::is_supported<ValueType>::value) { cublas::gemm(exec->get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, c->get_size()[1], c->get_size()[0], a->get_size()[1], alpha->get_const_values(), b->get_const_values(), b->get_stride(), a->get_const_values(), a->get_stride(), beta->get_const_values(), c->get_values(), c->get_stride()); } else { GKO_NOT_IMPLEMENTED; } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_APPLY_KERNEL); namespace kernel { template <size_type block_size, typename ValueType> __global__ __launch_bounds__(block_size) void scale( size_type num_rows, size_type num_cols, size_type num_alpha_cols, const ValueType *__restrict__ alpha, ValueType *__restrict__ x, size_type stride_x) { constexpr auto warps_per_block = block_size / cuda_config::warp_size; const auto global_id = thread::get_thread_id<cuda_config::warp_size, warps_per_block>(); const auto row_id = global_id / num_cols; const auto col_id = global_id % num_cols; const auto alpha_id = num_alpha_cols == 1 ? 0 : col_id; if (row_id < num_rows) { x[row_id * stride_x + col_id] = alpha[alpha_id] == zero<ValueType>() ? zero<ValueType>() : x[row_id * stride_x + col_id] * alpha[alpha_id]; } } } // namespace kernel template <typename ValueType> void scale(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *alpha, matrix::Dense<ValueType> *x) { if (cublas::is_supported<ValueType>::value && x->get_size()[1] == 1) { cublas::scal(exec->get_cublas_handle(), x->get_size()[0], alpha->get_const_values(), x->get_values(), x->get_stride()); } else { // TODO: tune this parameter constexpr auto block_size = default_block_size; const dim3 grid_dim = ceildiv(x->get_size()[0] * x->get_size()[1], block_size); const dim3 block_dim{cuda_config::warp_size, 1, block_size / cuda_config::warp_size}; kernel::scale<block_size><<<grid_dim, block_dim>>>( x->get_size()[0], x->get_size()[1], alpha->get_size()[1], as_cuda_type(alpha->get_const_values()), as_cuda_type(x->get_values()), x->get_stride()); } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_SCALE_KERNEL); namespace kernel { template <size_type block_size, typename ValueType> __global__ __launch_bounds__(block_size) void add_scaled( size_type num_rows, size_type num_cols, size_type num_alpha_cols, const ValueType *__restrict__ alpha, const ValueType *__restrict__ x, size_type stride_x, ValueType *__restrict__ y, size_type stride_y) { constexpr auto warps_per_block = block_size / cuda_config::warp_size; const auto global_id = thread::get_thread_id<cuda_config::warp_size, warps_per_block>(); const auto row_id = global_id / num_cols; const auto col_id = global_id % num_cols; const auto alpha_id = num_alpha_cols == 1 ? 0 : col_id; if (row_id < num_rows && alpha[alpha_id] != zero<ValueType>()) { y[row_id * stride_y + col_id] += x[row_id * stride_x + col_id] * alpha[alpha_id]; } } } // namespace kernel template <typename ValueType> void add_scaled(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *alpha, const matrix::Dense<ValueType> *x, matrix::Dense<ValueType> *y) { if (cublas::is_supported<ValueType>::value && x->get_size()[1] == 1) { cublas::axpy(exec->get_cublas_handle(), x->get_size()[0], alpha->get_const_values(), x->get_const_values(), x->get_stride(), y->get_values(), y->get_stride()); } else { // TODO: tune this parameter constexpr auto block_size = default_block_size; const dim3 grid_dim = ceildiv(x->get_size()[0] * x->get_size()[1], block_size); const dim3 block_dim{cuda_config::warp_size, 1, block_size / cuda_config::warp_size}; kernel::add_scaled<block_size><<<grid_dim, block_dim>>>( x->get_size()[0], x->get_size()[1], alpha->get_size()[1], as_cuda_type(alpha->get_const_values()), as_cuda_type(x->get_const_values()), x->get_stride(), as_cuda_type(y->get_values()), y->get_stride()); } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_ADD_SCALED_KERNEL); namespace kernel { template <size_type block_size, typename ValueType> __global__ __launch_bounds__(block_size) void compute_partial_dot( size_type num_rows, const ValueType *__restrict__ x, size_type stride_x, const ValueType *__restrict__ y, size_type stride_y, ValueType *__restrict__ work) { constexpr auto warps_per_block = block_size / cuda_config::warp_size; const auto num_blocks = gridDim.x; const auto local_id = thread::get_local_thread_id<cuda_config::warp_size>(); const auto global_id = thread::get_thread_id<cuda_config::warp_size, warps_per_block>(); auto tmp = zero<ValueType>(); for (auto i = global_id; i < num_rows; i += block_size * num_blocks) { tmp += x[i * stride_x] * y[i * stride_y]; } __shared__ UninitializedArray<ValueType, block_size> tmp_work; tmp_work[local_id] = tmp; reduce(group::this_thread_block(), static_cast<ValueType *>(tmp_work), [](const ValueType &x, const ValueType &y) { return x + y; }); if (local_id == 0) { work[thread::get_block_id()] = tmp_work[0]; } } template <size_type block_size, typename ValueType> __global__ __launch_bounds__(block_size) void finalize_dot_computation( size_type size, const ValueType *work, ValueType *result) { const auto local_id = thread::get_local_thread_id<cuda_config::warp_size>(); ValueType tmp = zero<ValueType>(); for (auto i = local_id; i < size; i += block_size) { tmp += work[i]; } __shared__ UninitializedArray<ValueType, block_size> tmp_work; tmp_work[local_id] = tmp; reduce(group::this_thread_block(), static_cast<ValueType *>(tmp_work), [](const ValueType &x, const ValueType &y) { return x + y; }); if (local_id == 0) { *result = tmp_work[0]; } } } // namespace kernel template <typename ValueType> void compute_dot(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *x, const matrix::Dense<ValueType> *y, matrix::Dense<ValueType> *result) { if (cublas::is_supported<ValueType>::value) { // TODO: write a custom kernel which does this more efficiently for (size_type col = 0; col < x->get_size()[1]; ++col) { cublas::dot(exec->get_cublas_handle(), x->get_size()[0], x->get_const_values() + col, x->get_stride(), y->get_const_values() + col, y->get_stride(), result->get_values() + col); } } else { // TODO: these are tuning parameters obtained experimentally, once // we decide how to handle this uniformly, they should be modified // appropriately constexpr auto work_per_thread = 32; constexpr auto block_size = 1024; constexpr auto work_per_block = work_per_thread * block_size; const dim3 grid_dim = ceildiv(x->get_size()[0], work_per_block); const dim3 block_dim{cuda_config::warp_size, 1, block_size / cuda_config::warp_size}; Array<ValueType> work(exec, grid_dim.x); // TODO: write a kernel which does this more efficiently for (size_type col = 0; col < x->get_size()[1]; ++col) { kernel::compute_partial_dot<block_size><<<grid_dim, block_dim>>>( x->get_size()[0], as_cuda_type(x->get_const_values() + col), x->get_stride(), as_cuda_type(y->get_const_values() + col), y->get_stride(), as_cuda_type(work.get_data())); kernel::finalize_dot_computation<block_size><<<1, block_dim>>>( grid_dim.x, as_cuda_type(work.get_const_data()), as_cuda_type(result->get_values() + col)); } } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COMPUTE_DOT_KERNEL); namespace kernel { template <typename ValueType> __global__ __launch_bounds__(default_block_size) void compute_sqrt( size_type num_cols, ValueType *__restrict__ work) { const auto tidx = static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x; if (tidx < num_cols) { work[tidx] = sqrt(abs(work[tidx])); } } } // namespace kernel template <typename ValueType> void compute_norm2(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *x, matrix::Dense<ValueType> *result) { if (cublas::is_supported<ValueType>::value) { for (size_type col = 0; col < x->get_size()[1]; ++col) { cublas::norm2(exec->get_cublas_handle(), x->get_size()[0], x->get_const_values() + col, x->get_stride(), result->get_values() + col); } } else { compute_dot(exec, x, x, result); const dim3 block_size(default_block_size, 1, 1); const dim3 grid_size(ceildiv(result->get_size()[1], block_size.x), 1, 1); kernel::compute_sqrt<<<grid_size, block_size, 0, 0>>>( result->get_size()[1], as_cuda_type(result->get_values())); } } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COMPUTE_NORM2_KERNEL); namespace kernel { template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_coo( size_type num_rows, size_type num_cols, size_type stride, const size_type *__restrict__ row_ptrs, const ValueType *__restrict__ source, IndexType *__restrict__ row_idxs, IndexType *__restrict__ col_idxs, ValueType *__restrict__ values) { const auto tidx = threadIdx.x + blockDim.x * blockIdx.x; if (tidx < num_rows) { size_type write_to = row_ptrs[tidx]; for (size_type i = 0; i < num_cols; i++) { if (source[stride * tidx + i] != zero<ValueType>()) { values[write_to] = source[stride * tidx + i]; col_idxs[write_to] = i; row_idxs[write_to] = tidx; write_to++; } } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_coo(std::shared_ptr<const CudaExecutor> exec, matrix::Coo<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) { auto num_rows = result->get_size()[0]; auto num_cols = result->get_size()[1]; auto row_idxs = result->get_row_idxs(); auto col_idxs = result->get_col_idxs(); auto values = result->get_values(); auto stride = source->get_stride(); auto nnz_prefix_sum = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_prefix_sum); const size_type grid_dim = ceildiv(num_rows, default_block_size); auto add_values = Array<size_type>(exec, grid_dim); start_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>( num_rows, as_cuda_type(nnz_prefix_sum.get_data()), as_cuda_type(add_values.get_data())); finalize_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>( num_rows, as_cuda_type(nnz_prefix_sum.get_data()), as_cuda_type(add_values.get_data())); kernel::fill_in_coo<<<grid_dim, default_block_size>>>( num_rows, num_cols, stride, as_cuda_type(nnz_prefix_sum.get_const_data()), as_cuda_type(source->get_const_values()), as_cuda_type(row_idxs), as_cuda_type(col_idxs), as_cuda_type(values)); nnz_prefix_sum.clear(); add_values.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_COO_KERNEL); namespace kernel { template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void count_nnz_per_row( size_type num_rows, size_type num_cols, size_type stride, const ValueType *__restrict__ work, IndexType *__restrict__ result) { constexpr auto warp_size = cuda_config::warp_size; const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; const auto row_idx = tidx / warp_size; if (row_idx < num_rows) { IndexType part_result{}; for (auto i = threadIdx.x % warp_size; i < num_cols; i += warp_size) { if (work[stride * row_idx + i] != zero<ValueType>()) { part_result += 1; } } auto warp_tile = group::tiled_partition<warp_size>(group::this_thread_block()); result[row_idx] = reduce( warp_tile, part_result, [](const size_type &a, const size_type &b) { return a + b; }); } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_csr( size_type num_rows, size_type num_cols, size_type stride, const ValueType *__restrict__ source, IndexType *__restrict__ row_ptrs, IndexType *__restrict__ col_idxs, ValueType *__restrict__ values) { const auto tidx = threadIdx.x + blockDim.x * blockIdx.x; if (tidx < num_rows) { auto write_to = row_ptrs[tidx]; for (auto i = 0; i < num_cols; i++) { if (source[stride * tidx + i] != zero<ValueType>()) { values[write_to] = source[stride * tidx + i]; col_idxs[write_to] = i; write_to++; } } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_csr(std::shared_ptr<const CudaExecutor> exec, matrix::Csr<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) { auto num_rows = result->get_size()[0]; auto num_cols = result->get_size()[1]; auto row_ptrs = result->get_row_ptrs(); auto col_idxs = result->get_col_idxs(); auto values = result->get_values(); auto stride = source->get_stride(); const auto rows_per_block = ceildiv(default_block_size, cuda_config::warp_size); const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block); kernel::count_nnz_per_row<<<grid_dim_nnz, default_block_size>>>( num_rows, num_cols, stride, as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs)); size_type grid_dim = ceildiv(num_rows + 1, default_block_size); auto add_values = Array<IndexType>(exec, grid_dim); start_prefix_sum<default_block_size> <<<grid_dim, default_block_size>>>(num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_data())); finalize_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>( num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_const_data())); kernel::fill_in_csr<<<grid_dim, default_block_size>>>( num_rows, num_cols, stride, as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs), as_cuda_type(col_idxs), as_cuda_type(values)); add_values.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_CSR_KERNEL); namespace kernel { template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_ell( size_type num_rows, size_type num_cols, size_type source_stride, const ValueType *__restrict__ source, size_type max_nnz_per_row, size_type result_stride, IndexType *__restrict__ col_ptrs, ValueType *__restrict__ values) { const auto tidx = threadIdx.x + blockDim.x * blockIdx.x; if (tidx < num_rows) { IndexType col_idx = 0; for (size_type col = 0; col < num_cols; col++) { if (source[tidx * source_stride + col] != zero<ValueType>()) { col_ptrs[col_idx * result_stride + tidx] = col; values[col_idx * result_stride + tidx] = source[tidx * source_stride + col]; col_idx++; } } for (size_type j = col_idx; j < max_nnz_per_row; j++) { col_ptrs[j * result_stride + tidx] = 0; values[j * result_stride + tidx] = zero<ValueType>(); } } else if (tidx < result_stride) { for (size_type j = 0; j < max_nnz_per_row; j++) { col_ptrs[j * result_stride + tidx] = 0; values[j * result_stride + tidx] = zero<ValueType>(); } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_ell(std::shared_ptr<const CudaExecutor> exec, matrix::Ell<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) { auto num_rows = result->get_size()[0]; auto num_cols = result->get_size()[1]; auto max_nnz_per_row = result->get_num_stored_elements_per_row(); auto col_ptrs = result->get_col_idxs(); auto values = result->get_values(); auto source_stride = source->get_stride(); auto result_stride = result->get_stride(); auto grid_dim = ceildiv(result_stride, default_block_size); kernel::fill_in_ell<<<grid_dim, default_block_size>>>( num_rows, num_cols, source_stride, as_cuda_type(source->get_const_values()), max_nnz_per_row, result_stride, as_cuda_type(col_ptrs), as_cuda_type(values)); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_ELL_KERNEL); template <typename ValueType, typename IndexType> void convert_to_hybrid(std::shared_ptr<const CudaExecutor> exec, matrix::Hybrid<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) GKO_NOT_IMPLEMENTED; GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_HYBRID_KERNEL); namespace kernel { __global__ __launch_bounds__(cuda_config::warp_size) void calculate_slice_lengths( size_type num_rows, size_type slice_size, int slice_num, size_type stride_factor, const size_type *__restrict__ nnz_per_row, size_type *__restrict__ slice_lengths, size_type *__restrict__ slice_sets) { constexpr auto warp_size = cuda_config::warp_size; const auto sliceid = blockIdx.x; const auto tid_in_warp = threadIdx.x; if (sliceid * slice_size + tid_in_warp < num_rows) { size_type thread_result = 0; for (auto i = tid_in_warp; i < slice_size; i += warp_size) { thread_result = (i + slice_size * sliceid < num_rows) ? max(thread_result, nnz_per_row[sliceid * slice_size + i]) : thread_result; } auto warp_tile = group::tiled_partition<warp_size>(group::this_thread_block()); auto warp_result = reduce( warp_tile, thread_result, [](const size_type &a, const size_type &b) { return max(a, b); }); if (tid_in_warp == 0) { auto slice_length = ceildiv(warp_result, stride_factor) * stride_factor; slice_lengths[sliceid] = slice_length; slice_sets[sliceid] = slice_length; } } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_sellp( size_type num_rows, size_type num_cols, size_type slice_size, size_type stride, const ValueType *__restrict__ source, size_type *__restrict__ slice_lengths, size_type *__restrict__ slice_sets, IndexType *__restrict__ col_idxs, ValueType *__restrict__ vals) { const auto global_row = threadIdx.x + blockIdx.x * blockDim.x; const auto row = global_row % slice_size; const auto sliceid = global_row / slice_size; if (global_row < num_rows) { size_type sellp_ind = slice_sets[sliceid] * slice_size + row; for (size_type col = 0; col < num_cols; col++) { auto val = source[global_row * stride + col]; if (val != zero<ValueType>()) { col_idxs[sellp_ind] = col; vals[sellp_ind] = val; sellp_ind += slice_size; } } for (size_type i = sellp_ind; i < (slice_sets[sliceid] + slice_lengths[sliceid]) * slice_size + row; i += slice_size) { col_idxs[i] = 0; vals[i] = zero<ValueType>(); } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_sellp(std::shared_ptr<const CudaExecutor> exec, matrix::Sellp<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) { const auto stride = source->get_stride(); const auto num_rows = result->get_size()[0]; const auto num_cols = result->get_size()[1]; auto vals = result->get_values(); auto col_idxs = result->get_col_idxs(); auto slice_lengths = result->get_slice_lengths(); auto slice_sets = result->get_slice_sets(); const auto slice_size = (result->get_slice_size() == 0) ? matrix::default_slice_size : result->get_slice_size(); const auto stride_factor = (result->get_stride_factor() == 0) ? matrix::default_stride_factor : result->get_stride_factor(); const int slice_num = ceildiv(num_rows, slice_size); auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); auto grid_dim = slice_num; kernel::calculate_slice_lengths<<<grid_dim, cuda_config::warp_size>>>( num_rows, slice_size, slice_num, stride_factor, as_cuda_type(nnz_per_row.get_const_data()), as_cuda_type(slice_lengths), as_cuda_type(slice_sets)); auto add_values = Array<size_type>(exec, ceildiv(slice_num + 1, default_block_size)); grid_dim = ceildiv(slice_num + 1, default_block_size); start_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>( slice_num + 1, as_cuda_type(slice_sets), as_cuda_type(add_values.get_data())); finalize_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>( slice_num + 1, as_cuda_type(slice_sets), as_cuda_type(add_values.get_const_data())); grid_dim = ceildiv(num_rows, default_block_size); kernel::fill_in_sellp<<<grid_dim, default_block_size>>>( num_rows, num_cols, slice_size, stride, as_cuda_type(source->get_const_values()), as_cuda_type(slice_lengths), as_cuda_type(slice_sets), as_cuda_type(col_idxs), as_cuda_type(vals)); add_values.clear(); nnz_per_row.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_SELLP_KERNEL); template <typename ValueType, typename IndexType> void convert_to_sparsity_csr(std::shared_ptr<const CudaExecutor> exec, matrix::SparsityCsr<ValueType, IndexType> *result, const matrix::Dense<ValueType> *source) GKO_NOT_IMPLEMENTED; GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_DENSE_CONVERT_TO_SPARSITY_CSR_KERNEL); template <typename ValueType> void count_nonzeros(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *source, size_type *result) { const auto num_rows = source->get_size()[0]; auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); *result = reduce_add_array(exec, num_rows, nnz_per_row.get_const_data()); nnz_per_row.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COUNT_NONZEROS_KERNEL); namespace kernel { __global__ __launch_bounds__(default_block_size) void reduce_max_nnz( size_type size, const size_type *__restrict__ nnz_per_row, size_type *__restrict__ result) { extern __shared__ size_type block_max[]; reduce_array( size, nnz_per_row, block_max, [](const size_type &x, const size_type &y) { return max(x, y); }); if (threadIdx.x == 0) { result[blockIdx.x] = block_max[0]; } } } // namespace kernel template <typename ValueType> void calculate_max_nnz_per_row(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *source, size_type *result) { const auto num_rows = source->get_size()[0]; auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); const auto n = ceildiv(num_rows, default_block_size); const size_type grid_dim = (n <= default_block_size) ? n : default_block_size; auto block_results = Array<size_type>(exec, grid_dim); kernel::reduce_max_nnz<<<grid_dim, default_block_size, default_block_size * sizeof(size_type)>>>( num_rows, as_cuda_type(nnz_per_row.get_const_data()), as_cuda_type(block_results.get_data())); auto d_result = Array<size_type>(exec, 1); kernel::reduce_max_nnz<<<1, default_block_size, default_block_size * sizeof(size_type)>>>( grid_dim, as_cuda_type(block_results.get_const_data()), as_cuda_type(d_result.get_data())); exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(), result); d_result.clear(); block_results.clear(); nnz_per_row.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE( GKO_DECLARE_DENSE_CALCULATE_MAX_NNZ_PER_ROW_KERNEL); template <typename ValueType> void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *source, Array<size_type> *result) { const dim3 block_size(default_block_size, 1, 1); auto rows_per_block = ceildiv(default_block_size, cuda_config::warp_size); const size_t grid_x = ceildiv(source->get_size()[0], rows_per_block); const dim3 grid_size(grid_x, 1, 1); kernel::count_nnz_per_row<<<grid_size, block_size>>>( source->get_size()[0], source->get_size()[1], source->get_stride(), as_cuda_type(source->get_const_values()), as_cuda_type(result->get_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE( GKO_DECLARE_DENSE_CALCULATE_NONZEROS_PER_ROW_KERNEL); namespace kernel { __global__ __launch_bounds__(default_block_size) void reduce_max_nnz_per_slice( size_type num_rows, size_type slice_size, size_type stride_factor, const size_type *__restrict__ nnz_per_row, size_type *__restrict__ result) { const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; constexpr auto warp_size = cuda_config::warp_size; const auto warpid = tidx / warp_size; const auto tid_in_warp = tidx % warp_size; const auto slice_num = ceildiv(num_rows, slice_size); size_type thread_result = 0; for (auto i = tid_in_warp; i < slice_size; i += warp_size) { if (warpid * slice_size + i < num_rows) { thread_result = max(thread_result, nnz_per_row[warpid * slice_size + i]); } } auto warp_tile = group::tiled_partition<warp_size>(group::this_thread_block()); auto warp_result = reduce( warp_tile, thread_result, [](const size_type &a, const size_type &b) { return max(a, b); }); if (tid_in_warp == 0 && warpid < slice_num) { result[warpid] = ceildiv(warp_result, stride_factor) * stride_factor; } } __global__ __launch_bounds__(default_block_size) void reduce_total_cols( size_type num_slices, const size_type *__restrict__ max_nnz_per_slice, size_type *__restrict__ result) { extern __shared__ size_type block_result[]; reduce_array(num_slices, max_nnz_per_slice, block_result, [](const size_type &x, const size_type &y) { return x + y; }); if (threadIdx.x == 0) { result[blockIdx.x] = block_result[0]; } } } // namespace kernel template <typename ValueType> void calculate_total_cols(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *source, size_type *result, size_type stride_factor, size_type slice_size) { const auto num_rows = source->get_size()[0]; const auto num_cols = source->get_size()[1]; const auto slice_num = ceildiv(num_rows, slice_size); auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); auto max_nnz_per_slice = Array<size_type>(exec, slice_num); auto grid_dim = ceildiv(slice_num * cuda_config::warp_size, default_block_size); kernel::reduce_max_nnz_per_slice<<<grid_dim, default_block_size>>>( num_rows, slice_size, stride_factor, as_cuda_type(nnz_per_row.get_const_data()), as_cuda_type(max_nnz_per_slice.get_data())); grid_dim = ceildiv(slice_num, default_block_size); auto block_results = Array<size_type>(exec, grid_dim); kernel::reduce_total_cols<<<grid_dim, default_block_size, default_block_size * sizeof(size_type)>>>( slice_num, as_cuda_type(max_nnz_per_slice.get_const_data()), as_cuda_type(block_results.get_data())); auto d_result = Array<size_type>(exec, 1); kernel::reduce_total_cols<<<1, default_block_size, default_block_size * sizeof(size_type)>>>( grid_dim, as_cuda_type(block_results.get_const_data()), as_cuda_type(d_result.get_data())); exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(), result); block_results.clear(); nnz_per_row.clear(); max_nnz_per_slice.clear(); d_result.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE( GKO_DECLARE_DENSE_CALCULATE_TOTAL_COLS_KERNEL); template <typename ValueType> void transpose(std::shared_ptr<const CudaExecutor> exec, matrix::Dense<ValueType> *trans, const matrix::Dense<ValueType> *orig) { if (cublas::is_supported<ValueType>::value) { auto handle = exec->get_cublas_handle(); { cublas::pointer_mode_guard pm_guard(handle); auto alpha = one<ValueType>(); auto beta = zero<ValueType>(); cublas::geam( handle, CUBLAS_OP_T, CUBLAS_OP_N, orig->get_size()[0], orig->get_size()[1], &alpha, orig->get_const_values(), orig->get_stride(), &beta, static_cast<ValueType *>(nullptr), trans->get_size()[1], trans->get_values(), trans->get_stride()); } } else { GKO_NOT_IMPLEMENTED; } }; GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_TRANSPOSE_KERNEL); template <typename ValueType> void conj_transpose(std::shared_ptr<const CudaExecutor> exec, matrix::Dense<ValueType> *trans, const matrix::Dense<ValueType> *orig) { if (cublas::is_supported<ValueType>::value) { auto handle = exec->get_cublas_handle(); { cublas::pointer_mode_guard pm_guard(handle); auto alpha = one<ValueType>(); auto beta = zero<ValueType>(); cublas::geam( handle, CUBLAS_OP_C, CUBLAS_OP_N, orig->get_size()[0], orig->get_size()[1], &alpha, orig->get_const_values(), orig->get_stride(), &beta, static_cast<ValueType *>(nullptr), trans->get_size()[1], trans->get_values(), trans->get_stride()); } } else { GKO_NOT_IMPLEMENTED; } }; GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_CONJ_TRANSPOSE_KERNEL); } // namespace dense } // namespace cuda } // namespace kernels } // namespace gko
d09c2106ee9e2be406e613641b296d36956672a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // by Jan Eric Kyprianidis <www.kyprianidis.com> // Copyright (C) 2010-2012 Computer Graphics Systems Group at the // Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de> // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // #include <oz/gpu_image.h> #include <oz/gpu_binder.h> #include <oz/launch_config.h> #include <oz/gpu_plm2.h> using namespace oz; /* __global__ void imp_mag_diff( gpu_plm2<float> dst, const gpu_plm2<float> src0, const gpu_plm2<float> src1 ) { const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y; if (ix >= dst.w || iy >= dst.h) return; dst(ix, iy) = fmax( 0.0f, src0(ix, iy) - src1(ix, iy) ); } gpu_image<float> mag_diff( const gpu_image<float>& src0, const gpu_image<float>& src1 ) { gpu_image<float> dst(src0.size()); imp_mag_diff<<<dst.blocks(), dst.threads()>>>(dst, src0, src1); GPU_CHECK_ERROR(); return dst; } */ static texture<float4, 2, hipReadModeElementType> texSRC4; /* static __device__ float kstep(float x, float K, float B1, float B2) { if (x < B1) return K; if (x > B2) return 0; return K - (x - B1) / (B2 - B1); } */ __global__ void imp_color_gdog( gpu_plm2<float3> dst, const gpu_plm2<float4> tfab, float sigma_e, float sigma_r, float precision, float tau ) { const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y; if (ix >= dst.w || iy >= dst.h) return; float4 t = tfab(ix, iy); float2 n = make_float2(t.y, -t.x); float2 nabs = fabs(n); float ds = 1.0f / ((nabs.x > nabs.y)? nabs.x : nabs.y); float twoSigmaE2 = 2 * sigma_e * sigma_e; float twoSigmaR2 = 2 * sigma_r * sigma_r; float halfWidth = precision * sigma_r; float3 c0 = make_float3(tex2D(texSRC4, ix, iy)); float3 sumE = c0; float3 sumR = sumE; float2 norm = make_float2(1, 1); for( float d = ds; d <= halfWidth; d += ds ) { float kE = __expf( -d * d / twoSigmaE2 ); float kR = __expf( -d * d / twoSigmaR2 ); float2 o = d*n; float3 c = make_float3(tex2D( texSRC4, 0.5f + ix - o.x, 0.5f + iy - o.y)) + make_float3(tex2D( texSRC4, 0.5f + ix + o.x, 0.5f + iy + o.y)); sumE += kE * c; sumR += kR * c; norm += 2 * make_float2(kE, kR); } sumE /= norm.x; sumR /= norm.y; float3 hp = sumE - sumR; dst.write(ix, iy, hp); } gpu_image color_gdog( const gpu_image& src, const gpu_image& tfab, float sigma_e, float sigma_r, float precision, float tau ) { gpu_image dst(src.size(), FMT_FLOAT3); gpu_binder<float3> src_(texSRC4, src); launch_config cfg(dst); hipLaunchKernelGGL(( imp_color_gdog), dim3(cfg.blocks()), dim3(cfg.threads()), 0, 0, dst, tfab, sigma_e, sigma_r, precision, tau); OZ_CUDA_ERROR_CHECK(); return dst; } __global__ void imp_chroma_sharp( gpu_plm2<float> dst, const gpu_plm2<float> L, const gpu_plm2<float3> HP, float K, float B1, float B2 ) { const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y; if (ix >= dst.w || iy >= dst.h) return; float l = L(ix, iy); float3 hp = HP(ix, iy); float chp = sqrtf( hp.x*hp.x + hp.y*hp.y + hp.z*hp.z ); dst.write(ix, iy, sign(hp.x) * chp); } gpu_image chroma_sharp( const gpu_image& L, const gpu_image& hp, float K, float B1, float B2) { gpu_image dst(L.size(), FMT_FLOAT); launch_config cfg(dst); hipLaunchKernelGGL(( imp_chroma_sharp), dim3(cfg.blocks()), dim3(cfg.threads()), 0, 0, dst, L, hp, K, B1, B2); OZ_CUDA_ERROR_CHECK(); return dst; }
d09c2106ee9e2be406e613641b296d36956672a3.cu
// // by Jan Eric Kyprianidis <www.kyprianidis.com> // Copyright (C) 2010-2012 Computer Graphics Systems Group at the // Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de> // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // #include <oz/gpu_image.h> #include <oz/gpu_binder.h> #include <oz/launch_config.h> #include <oz/gpu_plm2.h> using namespace oz; /* __global__ void imp_mag_diff( gpu_plm2<float> dst, const gpu_plm2<float> src0, const gpu_plm2<float> src1 ) { const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y; if (ix >= dst.w || iy >= dst.h) return; dst(ix, iy) = fmax( 0.0f, src0(ix, iy) - src1(ix, iy) ); } gpu_image<float> mag_diff( const gpu_image<float>& src0, const gpu_image<float>& src1 ) { gpu_image<float> dst(src0.size()); imp_mag_diff<<<dst.blocks(), dst.threads()>>>(dst, src0, src1); GPU_CHECK_ERROR(); return dst; } */ static texture<float4, 2, cudaReadModeElementType> texSRC4; /* static __device__ float kstep(float x, float K, float B1, float B2) { if (x < B1) return K; if (x > B2) return 0; return K - (x - B1) / (B2 - B1); } */ __global__ void imp_color_gdog( gpu_plm2<float3> dst, const gpu_plm2<float4> tfab, float sigma_e, float sigma_r, float precision, float tau ) { const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y; if (ix >= dst.w || iy >= dst.h) return; float4 t = tfab(ix, iy); float2 n = make_float2(t.y, -t.x); float2 nabs = fabs(n); float ds = 1.0f / ((nabs.x > nabs.y)? nabs.x : nabs.y); float twoSigmaE2 = 2 * sigma_e * sigma_e; float twoSigmaR2 = 2 * sigma_r * sigma_r; float halfWidth = precision * sigma_r; float3 c0 = make_float3(tex2D(texSRC4, ix, iy)); float3 sumE = c0; float3 sumR = sumE; float2 norm = make_float2(1, 1); for( float d = ds; d <= halfWidth; d += ds ) { float kE = __expf( -d * d / twoSigmaE2 ); float kR = __expf( -d * d / twoSigmaR2 ); float2 o = d*n; float3 c = make_float3(tex2D( texSRC4, 0.5f + ix - o.x, 0.5f + iy - o.y)) + make_float3(tex2D( texSRC4, 0.5f + ix + o.x, 0.5f + iy + o.y)); sumE += kE * c; sumR += kR * c; norm += 2 * make_float2(kE, kR); } sumE /= norm.x; sumR /= norm.y; float3 hp = sumE - sumR; dst.write(ix, iy, hp); } gpu_image color_gdog( const gpu_image& src, const gpu_image& tfab, float sigma_e, float sigma_r, float precision, float tau ) { gpu_image dst(src.size(), FMT_FLOAT3); gpu_binder<float3> src_(texSRC4, src); launch_config cfg(dst); imp_color_gdog<<<cfg.blocks(), cfg.threads()>>>(dst, tfab, sigma_e, sigma_r, precision, tau); OZ_CUDA_ERROR_CHECK(); return dst; } __global__ void imp_chroma_sharp( gpu_plm2<float> dst, const gpu_plm2<float> L, const gpu_plm2<float3> HP, float K, float B1, float B2 ) { const int ix = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; const int iy = __mul24(blockDim.y, blockIdx.y) + threadIdx.y; if (ix >= dst.w || iy >= dst.h) return; float l = L(ix, iy); float3 hp = HP(ix, iy); float chp = sqrtf( hp.x*hp.x + hp.y*hp.y + hp.z*hp.z ); dst.write(ix, iy, sign(hp.x) * chp); } gpu_image chroma_sharp( const gpu_image& L, const gpu_image& hp, float K, float B1, float B2) { gpu_image dst(L.size(), FMT_FLOAT); launch_config cfg(dst); imp_chroma_sharp<<<cfg.blocks(), cfg.threads()>>>(dst, L, hp, K, B1, B2); OZ_CUDA_ERROR_CHECK(); return dst; }
d637ac3c2d150e1d4022547eb027b52ad4afe2ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nspca_cuda.h" #include "view.h" namespace NSPCA { __device__ __forceinline__ void solve_p_positive(double *ATZ, double *Pptr, const size_t N, const size_t P, const size_t p, const int i, const int j, const double n_lambda, const double n_scale_square) { cuView<double> AtZview(ATZ, p, P); cuView<double> PView(Pptr, p, P); double tmp = 2.0 * AtZview(i, j); double t = -n_lambda + tmp; if (t > 0) { PView(i, j) = t / (2.0 * n_scale_square); } else { PView(i, j) = 0; } } __device__ __forceinline__ void solve_p_negative(double *ATZ, double *Pptr, const size_t N, const size_t P, const size_t p, const int i, const int j, const double n_lambda, const double n_scale_square) { cuView<double> AtZview(ATZ, p, P); cuView<double> PView(Pptr, p, P); double tmp = 2.0 * AtZview(i, j); double t = n_lambda + tmp; if (t < 0) { PView(i, j) = t / (2 * n_scale_square); } } __device__ __forceinline__ void solve_p_general(double *ATZ, double *Pptr, const size_t N, const size_t P, const size_t p, const int i, const int j, const double n_lambda, const double n_scale_square) { cuView<double> AtZview(ATZ, p, P); cuView<double> PView(Pptr, p, P); // double t = (2 * ZtAview(i, j) - n_lambda); // if (t > 0) { // PView(i, j) = t / (2 * n_scale_square); // } else { // t = (2 * ZtAview(i, j) + n_lambda); // { // if (t < 0) { // PView(i, j) = t / (2 * n_scale_square); // } // } // } double tmp = 2.0 * AtZview(i, j); double t = -n_lambda + tmp; if (t > 0) { PView(i, j) = t / (2.0 * n_scale_square); } else { t = n_lambda + tmp; if (t < 0) { PView(i, j) = t / (2.0 * n_scale_square); } else { PView(i, j) = 0; } } // if self.n_alpha - 2.0 * temp_t < 0: // self.component_loading[row, col] = -(self.n_alpha - 2.0 * temp_t) / (2.0 * self.n_scale_square) // elif self.n_alpha + 2.0 * temp_t < 0: // self.component_loading[row, col] = (self.n_alpha + 2.0 * temp_t) / (2.0 * self.n_scale_square) // else: // self.component_loading[row, col] = 0.0 } template<unsigned int numThreads> __global__ void solve_p_in_nspca(double *devp, const size_t N, const size_t P, const size_t p, double *ATZ, int *restriction, const double n_lambda, const double n_scale_square) { const int tid = threadIdx.x; const int offset = numThreads * blockIdx.x; cuView<int> resView(restriction, p, P); cuView<double> AtZview = cuView<double>(ATZ, p, P); for (int index = tid + offset; index < p * P; index += numThreads * blockDim.x) { int j = index / p; int i = index - j * p; // printf("Row %d and col %d \n", i ,j); // AtZview(i, j) = 0.0; if (resView(i, j) == 2) { solve_p_general(ATZ, devp, N, P, p, i, j, n_lambda, n_scale_square); } else if (resView(i, j) == 1) { solve_p_positive(ATZ, devp, N, P, p, i, j, n_lambda, n_scale_square); } else if (resView(i, j) == -1) { solve_p_negative(ATZ, devp, N, P, p, i, j, n_lambda, n_scale_square); } } }; void solve_p_nspca(double *devp, const size_t N, const size_t P, const size_t p, double *ATZ, int *restriction, const double lambda, const double scale_square, const unsigned int numThreads, const unsigned int numBlocks, hipStream_t stream) { switch (numThreads) { case (1): solve_p_in_nspca<1> << < numBlocks, 1, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (2): solve_p_in_nspca<2> << < numBlocks, 2, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (4): solve_p_in_nspca<4> << < numBlocks, 4, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (8): solve_p_in_nspca<8> << < numBlocks, 8, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (16): solve_p_in_nspca<16> << < numBlocks, 16, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (32): solve_p_in_nspca<32> << < numBlocks, 32, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (64): solve_p_in_nspca<64> << < numBlocks, 64, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (128): solve_p_in_nspca<128> << < numBlocks, 128, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (256): solve_p_in_nspca<256> << < numBlocks, 256, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (512): solve_p_in_nspca<512> << < numBlocks, 512, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (1024): solve_p_in_nspca<1024> << < numBlocks, 1024, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; } } }
d637ac3c2d150e1d4022547eb027b52ad4afe2ea.cu
#include "nspca_cuda.h" #include "view.h" namespace NSPCA { __device__ __forceinline__ void solve_p_positive(double *ATZ, double *Pptr, const size_t N, const size_t P, const size_t p, const int i, const int j, const double n_lambda, const double n_scale_square) { cuView<double> AtZview(ATZ, p, P); cuView<double> PView(Pptr, p, P); double tmp = 2.0 * AtZview(i, j); double t = -n_lambda + tmp; if (t > 0) { PView(i, j) = t / (2.0 * n_scale_square); } else { PView(i, j) = 0; } } __device__ __forceinline__ void solve_p_negative(double *ATZ, double *Pptr, const size_t N, const size_t P, const size_t p, const int i, const int j, const double n_lambda, const double n_scale_square) { cuView<double> AtZview(ATZ, p, P); cuView<double> PView(Pptr, p, P); double tmp = 2.0 * AtZview(i, j); double t = n_lambda + tmp; if (t < 0) { PView(i, j) = t / (2 * n_scale_square); } } __device__ __forceinline__ void solve_p_general(double *ATZ, double *Pptr, const size_t N, const size_t P, const size_t p, const int i, const int j, const double n_lambda, const double n_scale_square) { cuView<double> AtZview(ATZ, p, P); cuView<double> PView(Pptr, p, P); // double t = (2 * ZtAview(i, j) - n_lambda); // if (t > 0) { // PView(i, j) = t / (2 * n_scale_square); // } else { // t = (2 * ZtAview(i, j) + n_lambda); // { // if (t < 0) { // PView(i, j) = t / (2 * n_scale_square); // } // } // } double tmp = 2.0 * AtZview(i, j); double t = -n_lambda + tmp; if (t > 0) { PView(i, j) = t / (2.0 * n_scale_square); } else { t = n_lambda + tmp; if (t < 0) { PView(i, j) = t / (2.0 * n_scale_square); } else { PView(i, j) = 0; } } // if self.n_alpha - 2.0 * temp_t < 0: // self.component_loading[row, col] = -(self.n_alpha - 2.0 * temp_t) / (2.0 * self.n_scale_square) // elif self.n_alpha + 2.0 * temp_t < 0: // self.component_loading[row, col] = (self.n_alpha + 2.0 * temp_t) / (2.0 * self.n_scale_square) // else: // self.component_loading[row, col] = 0.0 } template<unsigned int numThreads> __global__ void solve_p_in_nspca(double *devp, const size_t N, const size_t P, const size_t p, double *ATZ, int *restriction, const double n_lambda, const double n_scale_square) { const int tid = threadIdx.x; const int offset = numThreads * blockIdx.x; cuView<int> resView(restriction, p, P); cuView<double> AtZview = cuView<double>(ATZ, p, P); for (int index = tid + offset; index < p * P; index += numThreads * blockDim.x) { int j = index / p; int i = index - j * p; // printf("Row %d and col %d \n", i ,j); // AtZview(i, j) = 0.0; if (resView(i, j) == 2) { solve_p_general(ATZ, devp, N, P, p, i, j, n_lambda, n_scale_square); } else if (resView(i, j) == 1) { solve_p_positive(ATZ, devp, N, P, p, i, j, n_lambda, n_scale_square); } else if (resView(i, j) == -1) { solve_p_negative(ATZ, devp, N, P, p, i, j, n_lambda, n_scale_square); } } }; void solve_p_nspca(double *devp, const size_t N, const size_t P, const size_t p, double *ATZ, int *restriction, const double lambda, const double scale_square, const unsigned int numThreads, const unsigned int numBlocks, cudaStream_t stream) { switch (numThreads) { case (1): solve_p_in_nspca<1> << < numBlocks, 1, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (2): solve_p_in_nspca<2> << < numBlocks, 2, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (4): solve_p_in_nspca<4> << < numBlocks, 4, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (8): solve_p_in_nspca<8> << < numBlocks, 8, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (16): solve_p_in_nspca<16> << < numBlocks, 16, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (32): solve_p_in_nspca<32> << < numBlocks, 32, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (64): solve_p_in_nspca<64> << < numBlocks, 64, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (128): solve_p_in_nspca<128> << < numBlocks, 128, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (256): solve_p_in_nspca<256> << < numBlocks, 256, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (512): solve_p_in_nspca<512> << < numBlocks, 512, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; case (1024): solve_p_in_nspca<1024> << < numBlocks, 1024, 0, stream >> > (devp, N, P, p, ATZ, restriction, lambda, scale_square); break; } } }
d1a9258c5f889e7a719afdcba111a25298ee59cf.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include <ostream> #include <fstream> #include <sys/time.h> #include <time.h> using namespace std; #define CASENAME "mrt_test" #define NUMGPU 1 #define BLOCKSIZEX 64 #define BLOCKSIZEY 1 #define BLOCKSIZEZ 1 #define BLOCKSIZELRX 64 #define BLOCKSIZELRY 1 #define BLOCKSIZELRZ 1 #define BLOCKSIZEINTERP 8 #define XDIM 64 #define YDIM 124 #define ZDIM 20 //62 #define TMAX 10000 #define STARTF 3000000 #define DYNY1 123 #define DYNY2 1 #define KP 0.0f //p-control constant #define OBSTR1 31.f #define OBSTX1 31.5f #define OBSTY1 92.5f #define OBSTZ1 32.5f #define OBSTR2 10.f #define OBSTX2 25.5f #define OBSTY2 25.5f #define OBSTZ2 32.5f #define LRFACTOR 0.5f #define LRLEVEL 2 #define LRX0 128.25f //minimum x coord of LR #define XLRDIM 128 //number of nodes in x #define LRY0 64.25f #define YLRDIM 80 #define LRZ0 -0.75f #define ZLRDIM 8 #define ORDER 2 //order of accuracy of interpolation #define RE 5400.f//2000.f//100.f; #define UMAX 0.06f #define SmagLES 1 //1,0 #define MODEL "MRT" //BGK,MRT,STREAM #define REFINEMENT 0 //1,0 #define CS 0.02f #define DPDX 0.f #define DPDY -5.16e-7 #define VELAV 1 #define START_VELAV 200000 #define START_VELFLUC 1600000 inline __device__ int ImageFcnLR(float x, float y, float z) { int value = 0; if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) { value = 10; } return value; } inline __device__ int ImageFcn(int x, int y, int z, int t) { int value = 0; if(abs(x-OBSTX2) < OBSTR2 && abs(y-OBSTY2) < OBSTR2 && t < 5000) value = 10; if(abs(x-OBSTX2-3) < OBSTR2 && abs(y-OBSTY2-3) < OBSTR2 && t < 5000 && z == 10) value = 10; //if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) // value = 10; if(x == 0) value = 1;//50;//400; else if(x == XDIM-1) value = 1;//51;//300; // else if(y == 0) // value = 52;//1;//22; //// else if(y == DYNY1) //// value = 54;//1;//22; // else if(y == YDIM-1) // value = 54; //100; return value; } inline __device__ float PoisProf (float x){ float radius = (YDIM-1-1)*0.5f; float result = -1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f); return (result); } inline __device__ float PoisProf3D (float x, float y){ x = x-0.5f; y = y-0.5f; //float H = 41.f; return UMAX;//2.25f*16.f*UMAX*x*y*(H-x)*(H-y)/((H)*(H)*(H)*(H)); // float radius = (YDIM-1-1)*0.5f; // float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f); // return (result); } int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } __device__ int dmin(int a, int b) { if (a<b) return a; else return b-1; } __device__ int dmax(int a) { if (a>-1) return a; else return 0; } __device__ int dmax(int a,int b) { if (a>b) return a; else return b; } __device__ int dmin_p(int a, int b) { if (a<b) return a; else return 0; } __device__ int dmax_p(int a, int b) { if (a>-1) return a; else return b-1; } inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011, float v100, float v101, float v110, float v111, float x, float y, float z){ return v000*(1.f-x)*(1.f-y)*(1.f-z)+ v001*( x)*(1.f-y)*(1.f-z)+ v010*(1.f-x)*( y)*(1.f-z)+ v011*( x)*( y)*(1.f-z)+ v100*(1.f-x)*(1.f-y)*( z)+ v101*( x)*(1.f-y)*( z)+ v110*(1.f-x)*( y)*( z)+ v111*( x)*( y)*( z); } inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner) { if(y > YDIM-1) y = 0; if(y < 0) y = YDIM-1; int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner); index = dmax(index); index = dmin(index,19*pitch*YDIM*(zInner)); return index; } inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner) { int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner); index = dmax(index); index = dmin(index,19*pitch*YLRDIM*(zInner)); return index; } inline __device__ int f_mem_interp(int m_num, int x, int y, int z, int pitch, int zInner) { int index = (x+y*pitch+z*(YLRDIM*LRFACTOR+1)*pitch)+m_num*pitch*(YLRDIM*LRFACTOR+1)*(zInner); index = dmax(index); index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)*(zInner)); return index; } inline __device__ int buff_mem_interp(int m_num, int x, int y, int pitch, int zInner) { int index = (x+y*pitch+m_num*(YLRDIM*LRFACTOR+1)*pitch); index = dmax(index); index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)); return index; } inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch) { if(y > YDIM-1) y = 0; if(y < 0) y = YDIM-1; int index = (x+y*pitch)+f_num*pitch*YDIM; index = dmax(index); index = dmin(index,19*pitch*YDIM); return index; } inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch) { int index = (x+y*pitch)+f_num*pitch*YLRDIM; index = dmax(index); index = dmin(index,19*pitch*YLRDIM); return index; } inline __device__ void AddForce(float* f, float dpdy) { // f[1] -= 0.0555555556f*3.f*DPDX; // f[3] += 0.0555555556f*3.f*DPDX; // f[5] -= 0.0277777778f*3.f*DPDX; // f[6] += 0.0277777778f*3.f*DPDX; // f[7] += 0.0277777778f*3.f*DPDX; // f[8] -= 0.0277777778f*3.f*DPDX; // f[10]-= 0.0277777778f*3.f*DPDX; // f[12]+= 0.0277777778f*3.f*DPDX; // f[15]-= 0.0277777778f*3.f*DPDX; // f[17]+= 0.0277777778f*3.f*DPDX; f[2] -= 0.0555555556f*3.f*dpdy; f[4] += 0.0555555556f*3.f*dpdy; f[5] -= 0.0277777778f*3.f*dpdy; f[6] -= 0.0277777778f*3.f*dpdy; f[7] += 0.0277777778f*3.f*dpdy; f[8] += 0.0277777778f*3.f*dpdy; f[11]-= 0.0277777778f*3.f*dpdy; f[13]+= 0.0277777778f*3.f*dpdy; f[16]-= 0.0277777778f*3.f*dpdy; f[18]+= 0.0277777778f*3.f*dpdy; } inline __device__ void Moments(float* f, float* m) { m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ; m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18]; m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; } void Moments_host(float* f, float* m) { m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ; m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18]; m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; } void InvertMoments_host(float* f, float* m) { float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w) { meq[ 0] = rho; meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w); meq[ 2] = 7.53968254f*(u*u+v*v+w*w);; meq[ 3] = u; meq[ 4] = -0.666666667f*u; meq[ 5] = v; meq[ 6] = -0.666666667f*v; meq[ 7] = w; meq[ 8] = -0.666666667f*w; meq[ 9] = 2.f*u*u-(v*v+w*w); meq[11] = v*v-w*w; meq[13] = u*v; meq[14] = v*w; meq[15] = u*w; } inline __device__ void bgk_meq(float* meq, float rho, float u, float v, float w) { meq[ 0] = rho; meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w); meq[ 2] = 3.f*rho-5.5f*(u*u+v*v+w*w);; meq[ 3] = u; meq[ 4] = -0.666666667f*u; meq[ 5] = v; meq[ 6] = -0.666666667f*v; meq[ 7] = w; meq[ 8] = -0.666666667f*w; meq[ 9] = 2.f*u*u-(v*v+w*w); meq[10] = -0.5f*meq[9]*0.333333333333f; meq[11] = v*v-w*w; meq[12] = -0.5f*meq[11]; meq[13] = u*v; meq[14] = v*w; meq[15] = u*w; } //outputs strain rate tensor (Sxx,Syy,Szz,Sxy,Syz,Sxz) from inputs (m0,m3,m5,m7,m9,m11,m13,m14,m15) inline __device__ void StrainRate(float* S, float* m_strain, float dx) { float omega = 1.f; float m1 = 0.f;//(-11.f*m_strain[0]+19.f*(m_strain[1]*m_strain[1]+m_strain[2]*m_strain[2]+m_strain[3]*m_strain[3])); float u = m_strain[1]; float v = m_strain[2]; float w = m_strain[3]; float m9 = m_strain[4]-(2.f*u*u-(v*v+w*w)); float m11= m_strain[5]-(v*v-w*w); float m13= m_strain[6]-(u*v); float m14= m_strain[7]-(v*w); float m15= m_strain[8]-(u*w); S[0] = -0.026315789f*( m1+19.f*omega* m9); S[1] = -0.013157895f*(2.f*m1-19.f*omega*(m9-3.f*m11)); S[2] = -0.013157895f*(2.f*m1-19.f*omega*(m9+3.f*m11)); S[3] = -1.5f*omega*m13; S[4] = -1.5f*omega*m14; S[5] = -1.5f*omega*m15; //S[0] /= dx; //S[1] /= dx; //S[2] /= dx; //S[3] /= dx; //S[4] /= dx; //S[5] /= dx; } //outputs physical moments (rho,u,v,w,Pxx,Pww,Pxy,Pyz,Pxz) from f inline __device__ void PhysicalMoments(float* mom, float* f) { mom[0] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; mom[1] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; mom[2] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; mom[3] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; mom[4] = 2.f*f[1]+-f[2]+2.f*f[3]+-f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+-f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18]; mom[5] = f[2]+f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+-f[10]+-f[12]+-f[14]+-f[15]+-f[17]; mom[6] = f[5]+-f[6]+f[7]+-f[8]; mom[7] = f[11]+-f[13]+-f[16]+f[18]; mom[8] = f[10]+-f[12]+-f[15]+f[17]; } inline __device__ void InvertMoments(float* f, float* m) { float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void InvertPhysicalMoments(float* f, float* mom, float SF) { float m[19]={0}; m[ 0] = mom[0]; m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3])); m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]); m[ 3] = mom[1]; m[ 4] = -0.666666667f*mom[1]; m[ 5] = mom[2]; m[ 6] = -0.666666667f*mom[2]; m[ 7] = mom[3]; m[ 8] = -0.666666667f*mom[3]; m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3])); m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]); m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2]; m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3]; m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3]; // InvertMoments(f,m); float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void InvertPhysicalMoments_LES_fc(float* f, float* mom, float SF, float omega_f) { float tau_f = 1.f/omega_f; float S[6]={0}; StrainRate(S,mom,1.f); float Smag_f = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5])); float tau_c = tau_f+0.5f+12.f*Smag_f*CS; tau_c *= 0.5f; float omega_c = 1.f/tau_c; tau_f = tau_f+Smag_f*CS; omega_f = 1.f/tau_f; SF = (1.f-omega_c)*omega_f/(LRFACTOR*omega_c*(1.f-omega_f)); float m[19]={0}; m[ 0] = mom[0]; m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3])); m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]); m[ 3] = mom[1]; m[ 4] = -0.666666667f*mom[1]; m[ 5] = mom[2]; m[ 6] = -0.666666667f*mom[2]; m[ 7] = mom[3]; m[ 8] = -0.666666667f*mom[3]; m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3])); m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]); m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2]; m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3]; m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3]; // InvertMoments(f,m); float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void InvertPhysicalMoments_LES_cf(float* f, float* mom, float SF, float omega_c) { float tau_c = 1.f/omega_c; float S[6]={0}; StrainRate(S,mom,1.f); float Smag_c = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5])); float tau_f = 2.f*tau_c-0.5f+1.5f*Smag_c*CS; float omega_f = 1.f/tau_f; omega_f = 1.f/tau_f; tau_c = tau_c+Smag_c*CS; omega_c = 1.f/tau_c; SF = (LRFACTOR*omega_c*(1.f-omega_f))/((1.f-omega_c)*omega_f); float m[19]={0}; m[ 0] = mom[0]; m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3])); m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]); m[ 3] = mom[1]; m[ 4] = -0.666666667f*mom[1]; m[ 5] = mom[2]; m[ 6] = -0.666666667f*mom[2]; m[ 7] = mom[3]; m[ 8] = -0.666666667f*mom[3]; m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3])); m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]); m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2]; m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3]; m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3]; // InvertMoments(f,m); float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void mrt_collide(float* f, float omega, float dpdy) { float m[19]; //float u,v,w; m[3] = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; m[5] = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; m[7] = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[0] = f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+ f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; if(MODEL == "MRT"){ m[ 1] = 19.f*(-f[ 0]+ f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18] -(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]));//+8.f*(f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18]); m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18] -7.53968254f*(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]); m[ 4] = 1.666666667f*(-3.f*f[1]+3.f*f[ 3]+m[3]); m[ 6] = 1.666666667f*(-3.f*f[2]+3.f*f[ 4]+m[5]); m[ 8] = 1.666666667f*(-3.f*f[9]+3.f*f[14]+m[7]); m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+- f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18] -(2.f*m[3]*m[3]-(m[5]*m[5]+m[7]*m[7])); m[10] =-4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+-f[10] +-f[12] +- f[14]+-f[15] +-f[17] -(m[5]*m[5]-m[7]*m[7]); m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+-f[10] +-f[12] + 2.f*f[14]+-f[15] +-f[17] ; m[13] = f[ 5]+-f[ 6]+ f[ 7]+-f[ 8] -m[3]*m[5]; m[14] = f[11] +- f[13] + - f[16] + f[18] -m[5]*m[7]; m[15] = f[10] + - f[12] +-f[15] + f[17] -m[3]*m[7]; m[16] = f[ 5]+-f[ 6]+-f[ 7]+ f[ 8] -f[10] + f[12] +-f[15] + f[17] ; m[17] = -f[ 5]+-f[ 6]+ f[ 7]+ f[ 8] + f[11] +- f[13] + f[16] +- f[18]; m[18] = f[10]+- f[11]+ f[12]+- f[13] +-f[15]+ f[16]+-f[17]+ f[18]; } if(SmagLES == 1) { float usqr = m[3]*m[3]+m[5]*m[5]+m[7]*m[7]; float u = m[3]; float v = m[5]; float w = m[7]; float rho = m[0]; float feq0, feq1 ,feq2 ,feq3 ,feq4 ,feq5 ,feq6 ,feq7 ,feq8 ,feq9 ,feq10 ,feq11 ,feq12 ,feq13 ,feq14 ,feq15 ,feq16 ,feq17 ,feq18; if(MODEL == "MRT"){ feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; usqr = u*u+v*v+w*w; feq0 =(0.3333333333f*(rho-1.5f*usqr)); feq1 =(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); feq2 =(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); feq3 =(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); feq4 =(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); feq5 =(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); feq6 =(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); feq7 =(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); feq8 =(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); feq9 =(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); feq10=(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); feq11=(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr)); feq12=(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); feq13=(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr)); feq14=(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); feq15=(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); feq16=(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); feq17=(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); feq18=(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } else{ usqr = u*u+v*v+w*w; feq0 =(0.3333333333f*(rho-1.5f*usqr)); feq1 =(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); feq2 =(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); feq3 =(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); feq4 =(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); feq5 =(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); feq6 =(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); feq7 =(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); feq8 =(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); feq9 =(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); feq10=(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); feq11=(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr)); feq12=(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); feq13=(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr)); feq14=(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); feq15=(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); feq16=(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); feq17=(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); feq18=(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } float PI11 = (f[1 ]-feq1 )+(f[3 ]-feq3 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17); float PI22 = (f[2 ]-feq2 )+(f[4 ]-feq4 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18); float PI33 = (f[9 ]-feq9 )+(f[14]-feq14)+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17)+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18); float PI12 = (f[5 ]-feq5 )+(f[7 ]-feq7 )-(f[6 ]-feq6 )-(f[8 ]-feq8 ); float PI13 = (f[10]-feq10)+(f[17]-feq17)-(f[12]-feq12)-(f[15]-feq15); float PI23 = (f[11]-feq11)+(f[18]-feq18)-(f[13]-feq13)-(f[16]-feq16); float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); //float Q = sqrt(Q11*Q11+Q22*Q22+Q33*Q33+2.f*Q12*Q12+2.f*Q23*Q23+2.f*Q13*Q13); float tau0 = 1.f/omega; float tau = 0.5f*tau0+0.5f*sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q); omega = 1.f/tau; } if(MODEL == "MRT"){ //f[ 0] -=- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]); //f[ 1] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]); //f[ 2] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]); //f[ 3] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]); //f[ 4] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]); //f[ 5] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))); //f[ 6] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))); //f[ 7] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))); //f[ 8] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))); //f[ 9] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]); //f[10] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))); //f[11] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))); //f[12] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))); //f[13] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))); //f[14] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]); //f[15] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))); //f[16] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))); //f[17] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))); //f[18] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))); Moments(f,m); float meq[19]={0}; //float ftemp[19]={0}; //bgk_meq(meq,m[0],m[3],m[5],m[7]); // //for(int i = 0; i<19; i++) // meq[i] = m[i]-omega*(m[i]-meq[i]); bgk_meq(meq,m[0],m[3],m[5],m[7]); meq[9] = m[9] -omega*(m[9]-meq[9]); meq[11] = m[11]-omega*(m[11]-meq[11]); meq[13] = m[13]-omega*(m[13]-meq[13]); meq[14] = m[14]-omega*(m[14]-meq[14]); meq[15] = m[15]-omega*(m[15]-meq[15]); //meq[1 ] = m[1 ]-1.19f*(m[1 ]-meq[1 ]); //meq[2 ] = m[2 ]-1.4f *(m[2 ]-meq[2 ]); //meq[10] = m[10]-1.4f *(m[10]-meq[10]); //meq[12] = m[12]-1.4f *(m[12]-meq[12]); //meq[4 ] = m[4 ]-1.2f *(m[4 ]-meq[4 ]); //meq[6 ] = m[6 ]-1.2f *(m[6 ]-meq[6 ]); //meq[8 ] = m[8 ]-1.2f *(m[8 ]-meq[8 ]); //meq[16] = m[16]-1.98f*(m[16]-meq[16]); //meq[17] = m[17]-1.98f*(m[17]-meq[17]); //meq[18] = m[18]-1.98f*(m[18]-meq[18]); //for(int i = 0; i<19; i++) // meq[i] = m[i]-omega*(m[i]-meq[i]); InvertMoments(f,meq); //for(int i = 0; i<19; i++) // f[i] -= ftemp[i]; } else{ float rho,u,v,w; rho = m[0]; u = m[3]; v = m[5]; w = m[7]; float usqr = u*u+v*v+w*w; f[0 ]-=omega*(f[0 ]-0.3333333333f*(rho-1.5f*usqr)); f[1 ]-=omega*(f[1 ]-0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f[2 ]-=omega*(f[2 ]-0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); f[3 ]-=omega*(f[3 ]-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); f[4 ]-=omega*(f[4 ]-0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); f[5 ]-=omega*(f[5 ]-0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); f[6 ]-=omega*(f[6 ]-0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); f[7 ]-=omega*(f[7 ]-0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); f[8 ]-=omega*(f[8 ]-0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); f[9 ]-=omega*(f[9 ]-0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); f[10]-=omega*(f[10]-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); f[11]-=omega*(f[11]-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr)); f[12]-=omega*(f[12]-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); f[13]-=omega*(f[13]-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr)); f[14]-=omega*(f[14]-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); f[15]-=omega*(f[15]-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); f[16]-=omega*(f[16]-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); f[17]-=omega*(f[17]-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); f[18]-=omega*(f[18]-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } AddForce(f,dpdy); } inline __device__ void North_Extrap(float* f, float rho) { float m[19]; //rho = 1.0f; float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void South_Extrap(float* f, float v) { float m[19]; float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void East_Extrap(float* f, float rho) { float m[19]; //rho = 0.0f; float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void West_Extrap(float* f, float u, int t) { float m[19]; float v = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; //if(t == 1000 || t == 2000 || t == 3000) w = 0.01f; float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } __device__ void xsymmetry_bot(float* f, int y, int z) { if(y == 0 && z == 0){ f[ 2] = f[ 4]; f[13]=f[18]; f[11]=f[18]; f[16]=f[18]; f[ 6] =f[ 7]; f[ 9] =f[14]; f[12]=f[17]; } else if(y == 0 && z == ZDIM-1){ f[ 4] = f[ 2]; f[11]=f[13]; f[18]=f[13]; f[16]=f[13]; f[ 6] =f[ 7]; f[14]=f[ 9]; f[17]=f[12]; } else if(y == YDIM-1 && z == 0){ f[ 4] = f[ 2]; f[11]=f[16]; f[18]=f[16]; f[13]=f[16]; f[ 7] =f[ 6]; f[ 9] =f[14]; f[12]=f[17]; } else if(y == YDIM-1 && z == ZDIM-1){ f[ 4] = f[ 2]; f[16]=f[11]; f[18]=f[11]; f[13]=f[11]; f[ 7] =f[ 6]; f[14]=f[ 9]; f[17]=f[12]; } else{ if(y == 0){ f[ 2] = f[ 4]; f[11]=f[13]; f[16]=f[18]; f[ 8] = f[ 5]; } else if(y == YDIM-1){ f[ 4]=f[ 2] ; f[13]=f[11]; f[18]=f[16]; f[ 5]=f[ 8] ; } } f[ 1] = f[ 3] ; f[ 5] = f[ 6] ; f[ 8] = f[ 7] ; f[10]= f[12]; f[15]= f[17]; } __device__ void xsymmetry_top(float* f, int y, int z) { if(y == 0 && z == 0){ f[ 2] = f[ 4]; f[13] = f[18]; f[11] = f[18]; f[16] = f[18]; f[ 5] = f[ 8]; f[ 9] = f[14]; f[10] = f[15]; } else if(y == 0 && z == ZDIM-1){ f[ 2] = f[ 4]; f[11] = f[13]; f[18] = f[13]; f[16] = f[13]; f[ 5] = f[ 8]; f[14] = f[ 9]; f[15] = f[10]; } else if(y == YDIM-1 && z == 0){ f[ 4] = f[ 2]; f[18] = f[16]; f[11] = f[16]; f[13] = f[16]; f[ 8] = f[ 5]; f[ 9] = f[14]; f[10] = f[15]; } else if(y == YDIM-1 && z == ZDIM-1){ f[ 4] = f[ 2]; f[13] = f[11]; f[16] = f[11]; f[18] = f[11]; f[ 8] = f[ 5]; f[14] = f[ 9]; f[15] = f[10]; } else{ if(y == 0){ f[ 2] = f[ 4]; f[11] = f[13]; f[16] = f[18]; f[ 5] = f[ 8]; } else if(y == YDIM-1){ f[ 4] = f[ 2]; f[13] = f[11]; f[18] = f[16]; f[ 8] = f[ 5]; } } f[ 3] = f[ 1] ; f[ 6] = f[ 5] ; f[ 7] = f[ 8] ; f[12]= f[10]; f[17]= f[15]; } inline __device__ void vel_av(float* f, float& uAv, float& vAv, float& wAv, int t) { float u,v,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1); vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1); wAv = (wAv*(t-START_VELAV)+w)/((t-START_VELAV)+1); } inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR); vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR); } inline __device__ void vel_fluc(float* f, float& uAv, float& vAv, float& wAv, float& ufluc, float& vfluc, float& wfluc, int t) { float u,v,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); w = (w-wAv)*(w-wAv); ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1); vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1); wfluc = (wfluc*(t-START_VELFLUC)+w)/((t-START_VELFLUC)+1); } inline __device__ void vel_flucLR(float* f, float& uAv, float& vAv, float& ufluc, float& vfluc, float t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); } __global__ void initialize(float *fout, size_t pitch, int zInner, int GPU_N) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = x; float ycoord = y; float zcoord = z+1+GPU_N*ZDIM; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f[19] = {0}; float m[19] = {0}; int im = ImageFcn(xcoord,ycoord,zcoord,0); float u,v,w,rho; rho = 1.f; u = 0.0f; v = UMAX; w = 0.0f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } mrt_meq(m,rho,u,v,w); InvertMoments(f,m); for(int i = 0; i<19; i++) fout[j+i *pitch*YDIM*zInner]=f[ i]; } __global__ void initializeLR(float *fout, size_t pitch, int zInner, int GPU_N) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = x; float ycoord = y; float zcoord = z+1+GPU_N*(zInner+2); xcoord = LRX0+x*LRFACTOR; ycoord = LRY0+y*LRFACTOR; zcoord = LRZ0+LRFACTOR*(GPU_N*(zInner+2)+z); int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f[19] = {0}; float m[19] = {0}; int im = ImageFcnLR(xcoord,ycoord,zcoord); float u,v,w,rho; rho = 1.f; u = UMAX; v = 0.0f; w = 0.0f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } mrt_meq(m,rho,u,v,w); InvertMoments(f,m); for(int i = 0; i<19; i++) fout[j+i *pitch*YLRDIM*zInner]=f[ i]; } __global__ void update_top(float* hB, float* hA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* h_interp, size_t pitch_interp, float dpdy) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int j = x+y*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1,t); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= hA [j]; f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)]; f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)]; f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)]; f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)]; f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)]; f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)]; f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)]; f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)]; f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)]; f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)]; f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)]; f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)]; f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)]; f[14]= temp[buff_mem(14,x ,y ,pitch)]; f[15]= temp[buff_mem(15,x-1,y ,pitch)]; f[16]= temp[buff_mem(16,x ,y-1,pitch)]; f[17]= temp[buff_mem(17,x+1,y ,pitch)]; f[18]= temp[buff_mem(18,x ,y+1,pitch)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } hB[buff_mem(0 ,x,y,pitch)] = f[0 ]; hB[buff_mem(1 ,x,y,pitch)] = f[3 ]; hB[buff_mem(2 ,x,y,pitch)] = f[4 ]; hB[buff_mem(3 ,x,y,pitch)] = f[1 ]; hB[buff_mem(4 ,x,y,pitch)] = f[2 ]; hB[buff_mem(5 ,x,y,pitch)] = f[7 ]; hB[buff_mem(6 ,x,y,pitch)] = f[8 ]; hB[buff_mem(7 ,x,y,pitch)] = f[5 ]; hB[buff_mem(8 ,x,y,pitch)] = f[6 ]; hB[buff_mem(9 ,x,y,pitch)] = f[14]; hB[buff_mem(10,x,y,pitch)] = f[17]; hB[buff_mem(11,x,y,pitch)] = f[18]; hB[buff_mem(12,x,y,pitch)] = f[15]; hB[buff_mem(13,x,y,pitch)] = f[16]; hB[buff_mem(14,x,y,pitch)] = f[9 ]; hB[buff_mem(15,x,y,pitch)] = f[12]; hB[buff_mem(16,x,y,pitch)] = f[13]; hB[buff_mem(17,x,y,pitch)] = f[10]; hB[buff_mem(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,y-1,pitch)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,y+1,pitch)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,(GPU+1)*(zInner+2)-1); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x-1,y,pitch)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x+1,y,pitch)]; float u_in = PoisProf3D(y,(GPU+1)*(zInner+2)-1); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1); if(im == 26) xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1); if(y>DYNY1) dpdy = 0.f; mrt_collide(f,omega,dpdy); if(im == 50)//west periodic { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,XDIM-2,y,pitch)]; } if(im == 51)//east periodic { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,1,y,pitch)]; } if(im == 52)//south periodic { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,DYNY1-1,pitch)]; } if(im == 53)//north periodic { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,DYNY2,pitch)]; } if(im == 54)//DYNY periodic { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,1,pitch)]; } for(int i = 0; i<19; i++) hB[buff_mem(i ,x,y,pitch)] = f[i ]; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ // //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) h_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_bot(float* gB, float* gA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* g_interp, size_t pitch_interp, float dpdy) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int j = x+y*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,GPU*(zInner+2),t); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= gA [j]; f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)]; f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)]; f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)]; f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)]; f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)]; f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)]; f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)]; f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)]; f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)]; f[10]= temp[buff_mem(10,x-1,y ,pitch)]; f[11]= temp[buff_mem(11,x ,y-1,pitch)]; f[12]= temp[buff_mem(12,x+1,y ,pitch)]; f[13]= temp[buff_mem(13,x ,y+1,pitch)]; f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)]; f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)]; f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)]; f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)]; f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } gB[buff_mem(0 ,x,y,pitch)] = f[0 ]; gB[buff_mem(1 ,x,y,pitch)] = f[3 ]; gB[buff_mem(2 ,x,y,pitch)] = f[4 ]; gB[buff_mem(3 ,x,y,pitch)] = f[1 ]; gB[buff_mem(4 ,x,y,pitch)] = f[2 ]; gB[buff_mem(5 ,x,y,pitch)] = f[7 ]; gB[buff_mem(6 ,x,y,pitch)] = f[8 ]; gB[buff_mem(7 ,x,y,pitch)] = f[5 ]; gB[buff_mem(8 ,x,y,pitch)] = f[6 ]; gB[buff_mem(9 ,x,y,pitch)] = f[14]; gB[buff_mem(10,x,y,pitch)] = f[17]; gB[buff_mem(11,x,y,pitch)] = f[18]; gB[buff_mem(12,x,y,pitch)] = f[15]; gB[buff_mem(13,x,y,pitch)] = f[16]; gB[buff_mem(14,x,y,pitch)] = f[9 ]; gB[buff_mem(15,x,y,pitch)] = f[12]; gB[buff_mem(16,x,y,pitch)] = f[13]; gB[buff_mem(17,x,y,pitch)] = f[10]; gB[buff_mem(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,y-1,pitch)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,y+1,pitch)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,GPU*(zInner+2)); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x-1,y,pitch)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x+1,y,pitch)]; float u_in = PoisProf3D(y,GPU*(zInner+2)); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,GPU*(zInner+2)); if(im == 26) xsymmetry_bot(f,y,GPU*(zInner+2)); if(y>DYNY1) dpdy = 0.f; mrt_collide(f,omega,dpdy); if(im == 50)//west periodic { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,XDIM-2,y,pitch)]; } if(im == 51)//east periodic { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,1,y,pitch)]; } if(im == 52)//south periodic { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,DYNY1-1,pitch)]; } if(im == 53)//north periodic { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,DYNY2,pitch)]; } if(im == 54)//DYNY periodic { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,1,pitch)]; } for(int i = 0; i<19; i++) gB[buff_mem(i ,x,y,pitch)] = f[i ]; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) g_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velAv_w, float* velFluc_u, float* velFluc_v, float* velFluc_w, float* FX, float* FY, float* FZ, int t, int flag_F, float* f_interp, size_t pitch_interp, float dpdy) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,GPU*(zInner+2)+1+z,t); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[ 0] = fA[j]; f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)]; f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)]; f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)]; f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)]; f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)]; f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)]; f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)]; f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)]; if(z==zInner-1){//top nodes need info from h f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)]; f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)]; f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)]; f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)]; f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)]; f[14]= h [buff_mem(14,x ,y ,pitch)]; f[15]= h [buff_mem(15,x-1,y ,pitch)]; f[16]= h [buff_mem(16,x ,y-1,pitch)]; f[17]= h [buff_mem(17,x+1,y ,pitch)]; f[18]= h [buff_mem(18,x ,y+1,pitch)]; } else if(z==0){//bottom nodes need info from g f[ 9] =g [buff_mem(9 ,x ,y ,pitch)]; f[10]= g [buff_mem(10,x-1,y ,pitch)]; f[11]= g [buff_mem(11,x ,y-1,pitch)]; f[12]= g [buff_mem(12,x+1,y ,pitch)]; f[13]= g [buff_mem(13,x ,y+1,pitch)]; f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)]; f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)]; f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)]; f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)]; f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)]; } else{//normal nodes f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)]; f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)]; f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)]; f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)]; f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)]; f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)]; f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)]; f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)]; f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)]; f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)]; }//end normal nodes if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ; fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ; fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ; fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ; fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ; fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ; fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ; fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ; fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14]; fB[f_mem(10,x,y,z,pitch,zInner)] = f[17]; fB[f_mem(11,x,y,z,pitch,zInner)] = f[18]; fB[f_mem(12,x,y,z,pitch,zInner)] = f[15]; fB[f_mem(13,x,y,z,pitch,zInner)] = f[16]; fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ; fB[f_mem(15,x,y,z,pitch,zInner)] = f[12]; fB[f_mem(16,x,y,z,pitch,zInner)] = f[13]; fB[f_mem(17,x,y,z,pitch,zInner)] = f[10]; fB[f_mem(18,x,y,z,pitch,zInner)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,GPU*(zInner+2)+1+z); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x-1,y,z,pitch,zInner)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x+1,y,z,pitch,zInner)]; float u_in = PoisProf3D(y,GPU*(zInner+2)+1+z); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,GPU*(zInner+2)+1+z); if(im == 26) xsymmetry_bot(f,y,GPU*(zInner+2)+1+z); if(y>DYNY1) dpdy = 0.f; mrt_collide(f,omega,dpdy); if(im == 50)//west periodic { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,XDIM-2,y,z,pitch,zInner)]; } if(im == 51)//east periodic { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,1,y,z,pitch,zInner)]; } if(im == 52)//south periodic { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,DYNY1-1,z,pitch,zInner)]; } if(im == 53)//north periodic { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,DYNY2,z,pitch,zInner)]; } if(im == 54)//DYNY periodic { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,1,z,pitch,zInner)]; } if(VELAV == 1){ if(t>=START_VELAV && t<START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM]; float w_Av = velAv_w[x+y*pitch+(z+1)*pitch*YDIM]; vel_av(f,u_Av,v_Av,w_Av,t); velAv_u[x+y*pitch+(z+1)*pitch*YDIM] = u_Av; velAv_v[x+y*pitch+(z+1)*pitch*YDIM] = v_Av; velAv_w[x+y*pitch+(z+1)*pitch*YDIM] = w_Av; } else if(t>=START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM]; float w_Av = velAv_w[x+y*pitch+(z+1)*pitch*YDIM]; float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YDIM]; float w_fluc = velFluc_w[x+y*pitch+(z+1)*pitch*YDIM]; vel_fluc(f,u_Av,v_Av,w_Av,u_fluc,v_fluc,w_fluc,t); velFluc_u[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc; velFluc_v[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc; velFluc_w[x+y*pitch+(z+1)*pitch*YDIM] = w_fluc; } } for(int i = 0; i<19; i++) fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) f_interp[f_mem_interp(i,x-int(LRX0),y-int(LRY0),z,pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_top_LR(float* hB, float* hA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region int j = x+y*pitch;//index on padded mem (pitch in elements) float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+LRFACTOR*z; int im = ImageFcnLR(xcoord,ycoord,zcoord); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= hA [j]; f[1 ]= hA [buff_memLR(1 ,x-1,y ,pitch)]; f[3 ]= hA [buff_memLR(3 ,x+1,y ,pitch)]; f[2 ]= hA [buff_memLR(2 ,x ,y-1,pitch)]; f[5 ]= hA [buff_memLR(5 ,x-1,y-1,pitch)]; f[6 ]= hA [buff_memLR(6 ,x+1,y-1,pitch)]; f[4 ]= hA [buff_memLR(4 ,x ,y+1,pitch)]; f[7 ]= hA [buff_memLR(7 ,x+1,y+1,pitch)]; f[8 ]= hA [buff_memLR(8 ,x-1,y+1,pitch)]; f[9 ]= fA [ f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)]; f[10]= fA [ f_memLR(10,x-1,y ,zInner-1,pitch, zInner)]; f[11]= fA [ f_memLR(11,x ,y-1,zInner-1,pitch, zInner)]; f[12]= fA [ f_memLR(12,x+1,y ,zInner-1,pitch, zInner)]; f[13]= fA [ f_memLR(13,x ,y+1,zInner-1,pitch, zInner)]; f[14]= temp[buff_memLR(14,x ,y ,pitch)]; f[15]= temp[buff_memLR(15,x-1,y ,pitch)]; f[16]= temp[buff_memLR(16,x ,y-1,pitch)]; f[17]= temp[buff_memLR(17,x+1,y ,pitch)]; f[18]= temp[buff_memLR(18,x ,y+1,pitch)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } hB[buff_memLR(0 ,x,y,pitch)] = f[0 ]; hB[buff_memLR(1 ,x,y,pitch)] = f[3 ]; hB[buff_memLR(2 ,x,y,pitch)] = f[4 ]; hB[buff_memLR(3 ,x,y,pitch)] = f[1 ]; hB[buff_memLR(4 ,x,y,pitch)] = f[2 ]; hB[buff_memLR(5 ,x,y,pitch)] = f[7 ]; hB[buff_memLR(6 ,x,y,pitch)] = f[8 ]; hB[buff_memLR(7 ,x,y,pitch)] = f[5 ]; hB[buff_memLR(8 ,x,y,pitch)] = f[6 ]; hB[buff_memLR(9 ,x,y,pitch)] = f[14]; hB[buff_memLR(10,x,y,pitch)] = f[17]; hB[buff_memLR(11,x,y,pitch)] = f[18]; hB[buff_memLR(12,x,y,pitch)] = f[15]; hB[buff_memLR(13,x,y,pitch)] = f[16]; hB[buff_memLR(14,x,y,pitch)] = f[9 ]; hB[buff_memLR(15,x,y,pitch)] = f[12]; hB[buff_memLR(16,x,y,pitch)] = f[13]; hB[buff_memLR(17,x,y,pitch)] = f[10]; hB[buff_memLR(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega,LRFACTOR); for(int i = 0; i<19; i++) hB[buff_memLR(i ,x,y,pitch)] = f[i ]; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_bot_LR(float* gB, float* gA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; //int z = (zInner+2)-1; int j = x+y*pitch;//index on padded mem (pitch in elements) float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; //float zcoord = LRZ0+GPU*LRFACTOR*z; float zcoord = LRZ0+LRFACTOR*(GPU*(zInner+2)-1); int im = ImageFcnLR(xcoord,ycoord,zcoord); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= gA [j]; f[1 ]= gA [buff_memLR(1 ,x-1,y ,pitch)]; f[3 ]= gA [buff_memLR(3 ,x+1,y ,pitch)]; f[2 ]= gA [buff_memLR(2 ,x ,y-1,pitch)]; f[5 ]= gA [buff_memLR(5 ,x-1,y-1,pitch)]; f[6 ]= gA [buff_memLR(6 ,x+1,y-1,pitch)]; f[4 ]= gA [buff_memLR(4 ,x ,y+1,pitch)]; f[7 ]= gA [buff_memLR(7 ,x+1,y+1,pitch)]; f[8 ]= gA [buff_memLR(8 ,x-1,y+1,pitch)]; f[9 ]= temp[buff_memLR(9 ,x ,y ,pitch)]; f[10]= temp[buff_memLR(10,x-1,y ,pitch)]; f[11]= temp[buff_memLR(11,x ,y-1,pitch)]; f[12]= temp[buff_memLR(12,x+1,y ,pitch)]; f[13]= temp[buff_memLR(13,x ,y+1,pitch)]; f[14]= fA [ f_memLR(14,x ,y ,0,pitch, zInner)]; f[15]= fA [ f_memLR(15,x-1,y ,0,pitch, zInner)]; f[16]= fA [ f_memLR(16,x ,y-1,0,pitch, zInner)]; f[17]= fA [ f_memLR(17,x+1,y ,0,pitch, zInner)]; f[18]= fA [ f_memLR(18,x ,y+1,0,pitch, zInner)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } gB[buff_memLR(0 ,x,y,pitch)] = f[0 ]; gB[buff_memLR(1 ,x,y,pitch)] = f[3 ]; gB[buff_memLR(2 ,x,y,pitch)] = f[4 ]; gB[buff_memLR(3 ,x,y,pitch)] = f[1 ]; gB[buff_memLR(4 ,x,y,pitch)] = f[2 ]; gB[buff_memLR(5 ,x,y,pitch)] = f[7 ]; gB[buff_memLR(6 ,x,y,pitch)] = f[8 ]; gB[buff_memLR(7 ,x,y,pitch)] = f[5 ]; gB[buff_memLR(8 ,x,y,pitch)] = f[6 ]; gB[buff_memLR(9 ,x,y,pitch)] = f[14]; gB[buff_memLR(10,x,y,pitch)] = f[17]; gB[buff_memLR(11,x,y,pitch)] = f[18]; gB[buff_memLR(12,x,y,pitch)] = f[15]; gB[buff_memLR(13,x,y,pitch)] = f[16]; gB[buff_memLR(14,x,y,pitch)] = f[9 ]; gB[buff_memLR(15,x,y,pitch)] = f[12]; gB[buff_memLR(16,x,y,pitch)] = f[13]; gB[buff_memLR(17,x,y,pitch)] = f[10]; gB[buff_memLR(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega,LRFACTOR); for(int i = 0; i<19; i++) gB[buff_memLR(i ,x,y,pitch)] = f[i ]; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_inn_LR(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z)); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[ 0] = fA[j]; f[ 1] = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)]; f[ 3] = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)]; f[ 2] = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)]; f[ 5] = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)]; f[ 6] = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)]; f[ 4] = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)]; f[ 7] = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)]; f[ 8] = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)]; if(z==zInner-1){//top nodes need info from h f[ 9] =fA[ f_memLR(9 ,x ,y ,z-1,pitch, zInner)]; f[10]= fA[ f_memLR(10,x-1,y ,z-1,pitch, zInner)]; f[11]= fA[ f_memLR(11,x ,y-1,z-1,pitch, zInner)]; f[12]= fA[ f_memLR(12,x+1,y ,z-1,pitch, zInner)]; f[13]= fA[ f_memLR(13,x ,y+1,z-1,pitch, zInner)]; f[14]= h [buff_memLR(14,x ,y ,pitch)]; f[15]= h [buff_memLR(15,x-1,y ,pitch)]; f[16]= h [buff_memLR(16,x ,y-1,pitch)]; f[17]= h [buff_memLR(17,x+1,y ,pitch)]; f[18]= h [buff_memLR(18,x ,y+1,pitch)]; } else if(z==0){//bottom nodes need info from g f[ 9] =g [buff_memLR(9 ,x ,y ,pitch)]; f[10]= g [buff_memLR(10,x-1,y ,pitch)]; f[11]= g [buff_memLR(11,x ,y-1,pitch)]; f[12]= g [buff_memLR(12,x+1,y ,pitch)]; f[13]= g [buff_memLR(13,x ,y+1,pitch)]; f[14]= fA[ f_memLR(14,x ,y ,z+1,pitch, zInner)]; f[15]= fA[ f_memLR(15,x-1,y ,z+1,pitch, zInner)]; f[16]= fA[ f_memLR(16,x ,y-1,z+1,pitch, zInner)]; f[17]= fA[ f_memLR(17,x+1,y ,z+1,pitch, zInner)]; f[18]= fA[ f_memLR(18,x ,y+1,z+1,pitch, zInner)]; } else{//normal nodes f[ 9] =fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)]; f[10]= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)]; f[11]= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)]; f[12]= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)]; f[13]= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)]; f[14]= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)]; f[15]= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)]; f[16]= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)]; f[17]= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)]; f[18]= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)]; }//end normal nodes if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[ 3] ; fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[ 4] ; fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[ 1] ; fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[ 2] ; fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[ 7] ; fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[ 8] ; fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[ 5] ; fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[ 6] ; fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[14]; fB[f_memLR(10,x,y,z,pitch,zInner)] = f[17]; fB[f_memLR(11,x,y,z,pitch,zInner)] = f[18]; fB[f_memLR(12,x,y,z,pitch,zInner)] = f[15]; fB[f_memLR(13,x,y,z,pitch,zInner)] = f[16]; fB[f_memLR(14,x,y,z,pitch,zInner)] = f[ 9] ; fB[f_memLR(15,x,y,z,pitch,zInner)] = f[12]; fB[f_memLR(16,x,y,z,pitch,zInner)] = f[13]; fB[f_memLR(17,x,y,z,pitch,zInner)] = f[10]; fB[f_memLR(18,x,y,z,pitch,zInner)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega,LRFACTOR); if(VELAV == 1){ if(t>=START_VELAV && t<START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM]; vel_avLR(f,u_Av,v_Av,t); velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av; velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av; } else if(t>=START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM]; float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM]; vel_flucLR(f,u_Av,v_Av,u_fluc,v_fluc,t); velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc; velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc; } } for(int i = 0; i<19; i++) fB[f_memLR(i ,x,y,z,pitch,zInner)] = f[ i] ; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } /* InterpCF is used on the LR grid. It first uses part of its threads to read from the coarse mesh nodes that completely envelope the fine mesh nodes, and loads the f's into shared memory. Next, all threads use the shared memory data to interpolate and scale the f's */ __global__ void InterpCF(float* f_f, float* g_f, float* h_f, size_t pitch_f, float* m_f_c, float* m_g_c, float* m_h_c, float* m_g_temp, size_t pitch_m, float SF, float omega_c, int GPU, int zInner, int zInner_f) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; __shared__ float mom_c[BLOCKSIZEINTERP][2][2][9]; __shared__ float S_c[BLOCKSIZEINTERP][2][2][6]; //int GPU = 0; int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner_f+2)+z)); if(blockIdx.z == 0 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use g and g_temp int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_temp[x_c+y_c*pitch_m+i*ymax*pitch_m]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f); } else if(blockIdx.z == 1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use g and f int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+i*ymax*pitch_m*zInner]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f); } else if(blockIdx.z == zInner+1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use h and f int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+(zInner-1)*ymax*pitch_m+i*ymax*pitch_m*zInner]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_h_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f); } else if(threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2){//use f only int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int z_c = threadIdx.z+blockIdx.z-2;//in coarse grid, blockdim.z is 1; -2 to account for g and lower halo int ymax = YLRDIM*LRFACTOR+1; for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+z_c*ymax*pitch_m+i*ymax*pitch_m*zInner]; // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f); } syncthreads(); if(x<LRLEVEL || x>XLRDIM-LRLEVEL-1 || y<LRLEVEL || y>YLRDIM-LRLEVEL-1){ //if(x<LRLEVEL+3 || x>XLRDIM-LRLEVEL-5 || y<LRLEVEL+3 || y>YLRDIM-LRLEVEL-5){ //interpolate from shared mem int xm = int(threadIdx.x*LRFACTOR+LRFACTOR*0.5f); int ym = int(threadIdx.y*LRFACTOR+LRFACTOR*0.5f); int zm = int(threadIdx.z*LRFACTOR+LRFACTOR*0.5f); int xp = xm+1; //int yp = ym+1; int zp = zm+1; float xf = (threadIdx.x*LRFACTOR+LRFACTOR*0.5f)-xm; float yf = (threadIdx.y*LRFACTOR+LRFACTOR*0.5f)-ym; float zf = (threadIdx.z*LRFACTOR+LRFACTOR*0.5f)-zm; float mom[9]; for(int i = 0; i<9; i++){ float v000 = mom_c[xm][0][0][i]; float v001 = mom_c[xp][0][0][i]; float v010 = mom_c[xm][1][0][i]; float v011 = mom_c[xp][1][0][i]; float v100 = mom_c[xm][0][1][i]; float v101 = mom_c[xp][0][1][i]; float v110 = mom_c[xm][1][1][i]; float v111 = mom_c[xp][1][1][i]; mom[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf); } if(ORDER == 2) { float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8; float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8; float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8; float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8; float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8; float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8; u_x1=S_c[xm][0][0][0];v_y1=S_c[xm][0][0][1];w_z1=S_c[xm][0][0][2];Sxy1=S_c[xm][0][0][3];Syz1=S_c[xm][0][0][4];Sxz1=S_c[xm][0][0][5]; u_x2=S_c[xp][0][0][0];v_y2=S_c[xp][0][0][1];w_z2=S_c[xp][0][0][2];Sxy2=S_c[xp][0][0][3];Syz2=S_c[xp][0][0][4];Sxz2=S_c[xp][0][0][5]; u_x3=S_c[xm][1][0][0];v_y3=S_c[xm][1][0][1];w_z3=S_c[xm][1][0][2];Sxy3=S_c[xm][1][0][3];Syz3=S_c[xm][1][0][4];Sxz3=S_c[xm][1][0][5]; u_x4=S_c[xp][1][0][0];v_y4=S_c[xp][1][0][1];w_z4=S_c[xp][1][0][2];Sxy4=S_c[xp][1][0][3];Syz4=S_c[xp][1][0][4];Sxz4=S_c[xp][1][0][5]; u_x5=S_c[xm][0][1][0];v_y5=S_c[xm][0][1][1];w_z5=S_c[xm][0][1][2];Sxy5=S_c[xm][0][1][3];Syz5=S_c[xm][0][1][4];Sxz5=S_c[xm][0][1][5]; u_x6=S_c[xp][0][1][0];v_y6=S_c[xp][0][1][1];w_z6=S_c[xp][0][1][2];Sxy6=S_c[xp][0][1][3];Syz6=S_c[xp][0][1][4];Sxz6=S_c[xp][0][1][5]; u_x7=S_c[xm][1][1][0];v_y7=S_c[xm][1][1][1];w_z7=S_c[xm][1][1][2];Sxy7=S_c[xm][1][1][3];Syz7=S_c[xm][1][1][4];Sxz7=S_c[xm][1][1][5]; u_x8=S_c[xp][1][1][0];v_y8=S_c[xp][1][1][1];w_z8=S_c[xp][1][1][2];Sxy8=S_c[xp][1][1][3];Syz8=S_c[xp][1][1][4];Sxz8=S_c[xp][1][1][5]; float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77; m03=mom_c[xm][0][0][1];m05=mom_c[xm][0][0][2];m07=mom_c[xm][0][0][3]; m13=mom_c[xp][0][0][1];m15=mom_c[xp][0][0][2];m17=mom_c[xp][0][0][3]; m23=mom_c[xm][1][0][1];m25=mom_c[xm][1][0][2];m27=mom_c[xm][1][0][3]; m33=mom_c[xp][1][0][1];m35=mom_c[xp][1][0][2];m37=mom_c[xp][1][0][3]; m43=mom_c[xm][0][1][1];m45=mom_c[xm][0][1][2];m47=mom_c[xm][0][1][3]; m53=mom_c[xp][0][1][1];m55=mom_c[xp][0][1][2];m57=mom_c[xp][0][1][3]; m63=mom_c[xm][1][1][1];m65=mom_c[xm][1][1][2];m67=mom_c[xm][1][1][3]; m73=mom_c[xp][1][1][1];m75=mom_c[xp][1][1][2];m77=mom_c[xp][1][1][3]; float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f; float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f; float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f; float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f; float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f; float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f; float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f; float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f; float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f; float xpr = 4.f*xf*xf-4.f*xf+1.f; float ypr = 4.f*yf*yf-4.f*yf+1.f; float zpr = 4.f*zf*zf-4.f*zf+1.f; mom[1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr); mom[2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr); mom[3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr); } float f[19]; //InvertPhysicalMoments(f,mom,SF); InvertPhysicalMoments_LES_cf(f,mom,SF,omega_c); if(im != 1 && im != 10){ if(z==0){ for(int i = 0; i<19; i++){ g_f[buff_memLR(i,x,y,pitch_f)]=f[i]; } } else if(z==gridDim.z*blockDim.z-1){ for(int i = 0; i<19; i++){ h_f[buff_memLR(i,x,y,pitch_f)]=f[i]; } } else{ for(int i = 0; i<19; i++){ f_f[f_memLR(i,x,y,z-1,pitch_f,zInner_f)]=f[i]; } } } } } __global__ void InterpFC(float* f_c, float* g_c, float* h_c, float* f_f, float* h_f, float* temp_f, size_t pitch_c, size_t pitch_f, float SF, float omega_f, int GPU, int zInner, int zInner_f) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; //if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) && //(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2))) //(true)) //if( (x > LRX0+5 && x < LRX0+XLRDIM*LRFACTOR-6 && y > LRY0+5 && y < LRY0+YLRDIM*LRFACTOR-6) && if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-2 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-2) && //(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2))) (true)) { float f[19]; float mom[8][9];//physical moments of 8 neighboring nodes float S_f[8][6];//strain rate tensor of 8 neighboring nodes int xm = LRLEVEL*(x-LRX0); int ym = LRLEVEL*(y-LRY0); int zm = LRLEVEL*(z-(-(1.f-0.5f*LRFACTOR)))-1;//LRZ0=-(1.f-0.5f*LRFACTOR), and -1 to account for g_LR int xp = xm+1; int yp = ym+1; int zp = zm+1; //top nodes. interp between h and h_temp. output to h if(z == zInner+1) { for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xm,ym,pitch_f)]; PhysicalMoments(mom[0],f); StrainRate(S_f[0],mom[0],1.f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xp,ym,pitch_f)]; PhysicalMoments(mom[1],f); StrainRate(S_f[1],mom[1],1.f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xm,yp,pitch_f)]; PhysicalMoments(mom[2],f); StrainRate(S_f[2],mom[2],1.f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xp,yp,pitch_f)]; PhysicalMoments(mom[3],f); StrainRate(S_f[3],mom[3],1.f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xm,ym,pitch_f)]; PhysicalMoments(mom[4],f); StrainRate(S_f[4],mom[4],1.f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xp,ym,pitch_f)]; PhysicalMoments(mom[5],f); StrainRate(S_f[5],mom[5],1.f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xm,yp,pitch_f)]; PhysicalMoments(mom[6],f); StrainRate(S_f[6],mom[6],1.f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xp,yp,pitch_f)]; PhysicalMoments(mom[7],f); StrainRate(S_f[7],mom[7],1.f); } //inner nodes. output to g or f else{ for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,ym,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[0],f); StrainRate(S_f[0],mom[0],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,ym,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[1],f); StrainRate(S_f[1],mom[1],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,yp,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[2],f); StrainRate(S_f[2],mom[2],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,yp,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[3],f); StrainRate(S_f[3],mom[3],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,ym,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[4],f); StrainRate(S_f[4],mom[4],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,ym,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[5],f); StrainRate(S_f[5],mom[5],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,yp,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[6],f); StrainRate(S_f[6],mom[6],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,yp,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[7],f); StrainRate(S_f[7],mom[7],1.f); } if(ORDER == 1){ for(int i = 0; i<9; i++) mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]); } else if(ORDER == 2) { float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8; float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8; float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8; float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8; float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8; float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8; u_x1=S_f[0][0];v_y1=S_f[0][1];w_z1=S_f[0][2];Sxy1=S_f[0][3];Syz1=S_f[0][4];Sxz1=S_f[0][5]; u_x2=S_f[1][0];v_y2=S_f[1][1];w_z2=S_f[1][2];Sxy2=S_f[1][3];Syz2=S_f[1][4];Sxz2=S_f[1][5]; u_x3=S_f[2][0];v_y3=S_f[2][1];w_z3=S_f[2][2];Sxy3=S_f[2][3];Syz3=S_f[2][4];Sxz3=S_f[2][5]; u_x4=S_f[3][0];v_y4=S_f[3][1];w_z4=S_f[3][2];Sxy4=S_f[3][3];Syz4=S_f[3][4];Sxz4=S_f[3][5]; u_x5=S_f[4][0];v_y5=S_f[4][1];w_z5=S_f[4][2];Sxy5=S_f[4][3];Syz5=S_f[4][4];Sxz5=S_f[4][5]; u_x6=S_f[5][0];v_y6=S_f[5][1];w_z6=S_f[5][2];Sxy6=S_f[5][3];Syz6=S_f[5][4];Sxz6=S_f[5][5]; u_x7=S_f[6][0];v_y7=S_f[6][1];w_z7=S_f[6][2];Sxy7=S_f[6][3];Syz7=S_f[6][4];Sxz7=S_f[6][5]; u_x8=S_f[7][0];v_y8=S_f[7][1];w_z8=S_f[7][2];Sxy8=S_f[7][3];Syz8=S_f[7][4];Sxz8=S_f[7][5]; float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77; m03=mom[0][1];m05=mom[0][2];m07=mom[0][3]; m13=mom[1][1];m15=mom[1][2];m17=mom[1][3]; m23=mom[2][1];m25=mom[2][2];m27=mom[2][3]; m33=mom[3][1];m35=mom[3][2];m37=mom[3][3]; m43=mom[4][1];m45=mom[4][2];m47=mom[4][3]; m53=mom[5][1];m55=mom[5][2];m57=mom[5][3]; m63=mom[6][1];m65=mom[6][2];m67=mom[6][3]; m73=mom[7][1];m75=mom[7][2];m77=mom[7][3]; float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f; float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f; float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f; float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f; float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f; float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f; float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f; float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f; float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f; for(int i = 0; i<9; i++) mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]); float xpr = 0.f;//4.f*xf*xf-4.f*xf+1.f; float ypr = 0.f;//4.f*yf*yf-4.f*yf+1.f; float zpr = 0.f;//4.f*zf*zf-4.f*zf+1.f; mom[0][1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr); mom[0][2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr); mom[0][3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr); } //InvertPhysicalMoments(f,mom[0],SF); InvertPhysicalMoments_LES_fc(f,mom[0],SF,omega_f); //for(int i = 0; i<19; i++) f[i] = 0.1f; //int GPU = 0; int im = ImageFcn(x,y,GPU*(zInner+2)+z,0); if(im != 1 && im != 10){ if(z == 0){ for(int i = 0; i<19; i++) g_c[buff_mem(i,x,y,pitch_c)]=f[i]; } else if(z == zInner+1){ for(int i = 0; i<19; i++) h_c[buff_mem(i,x,y,pitch_c)]=f[i]; } else{ for(int i = 0; i<19; i++) f_c[f_mem(i,x,y,z-1,pitch_c,zInner)]=f[i]; } } }//end extraction region } __global__ void AverageV(float* fA, float* gA, float* hA, size_t pitch, int GPU, int zInner, float* Av_V, int t) { int x = threadIdx.x+blockIdx.x*blockDim.x; int z = threadIdx.z+blockIdx.z*blockDim.z; float f[19]; float v_av = 0; __shared__ float sumV[BLOCKSIZEX]; syncthreads(); if(z == 0){ for(int i = 0; i<19; i++) f[i] = gA[buff_mem(i,x,DYNY1,pitch)]; } else if(z == zInner+1){ for(int i = 0; i<19; i++) f[i] = hA[buff_mem(i,x,DYNY1,pitch)]; } else{ for(int i = 0; i<19; i++) f[i] = fA[f_mem(i,x,DYNY1,z-1,pitch,zInner)]; } sumV[threadIdx.x] = f[2]-f[4]+f[5]+f[6]-f[7]-f[8]+f[11]-f[13]+f[16]-f[18]; syncthreads(); int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumV[threadIdx.x] += sumV[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&Av_V[t],sumV[0]); } } void WriteResults(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv, float **velFluc, float omega, int GPU_N, int GPU) { float f[19]; output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n"; output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n"; if(GPU == 0){ outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n"; outputslice<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<1<<"\n"; } for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<","<<velAv[2][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<","<<velFluc[2][i+j*XDIM]<<","<<0<<endl; }} for(int k = 1; k<ZDIM/GPU_N-1; k++){ for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float m1 =-30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+8.f*f[5]+8.f*f[6]+8.f*f[7]+8.f*f[8]+-11.f*f[9]+8.f*f[10]+8.f*f[11]+8.f*f[12]+8.f*f[13]+-11.f*f[14]+8.f*f[15]+8.f*f[16]+8.f*f[17]+8.f*f[18]; //float m6 = -4.f*f[2]+4.f*f[4]+f[5]+f[6]+-f[7]+-f[8]+f[11]+-f[13]+f[16]+-f[18]; float m10 =-4.f*f[1]+2.f*f[2]+-4.f*f[3]+2.f*f[4]+f[5]+f[6]+f[7]+f[8]+2.f*f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+2.f*f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18]; float m16 = f[5]+-f[6]+-f[7]+f[8]-f[10]+f[12]+-f[15]+f[17]; float m[19] = {0}; Moments_host(f,m); float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f); //float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); m[9] -= 2.f*u*u-(v*v+w*w); m[11]-= v*v-w*w; m[13]-= u*v; m[14]-= v*w; m[15]-= u*w; float PI11 = -0.5f *(m[ 9]); float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f; float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f; float PI12 = -1.5f*m[13]; float PI23 = -1.5f*m[14]; float PI13 = -1.5f*m[15]; //we know Smag on coarse mesh float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); //InvertMoments_host(f,m); //u = m[3]; //v = m[5]; //w = m[7]; //m6 = m[6 ]; //m10= m[10]; //m16= m[16]; int z = (ZDIM/GPU_N*GPU+k); output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", " //<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl; <<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl; if(k == 1 && GPU == 0){ outputslice<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "<<velAv[2][i+j*XDIM+k*XDIM*YDIM]<<"," <<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[2][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl; } }}} for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<","<<velAv[2][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", " <<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[2][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<","<<0<<endl; }} } void WriteResultsLR(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv, float **velFluc, float omega, int GPU_N, int GPU) { float f[19]; output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n"; output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM/GPU_N<<"\n"; if(GPU == 0){ outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n"; outputslice<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<1<<"\n"; } for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU); output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XLRDIM]<<","<<velAv[1][i+j*XLRDIM]<<", "<<velFluc[0][i+j*XLRDIM]<<","<<velFluc[1][i+j*XLRDIM]<<","<<0<<endl; }} for(int k = 1; k<ZLRDIM/GPU_N-1; k++){ for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k); float m[19] = {0}; Moments_host(f,m); float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f); //float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); m[9] -= 2.f*u*u-(v*v+w*w); m[11]-= v*v-w*w; m[13]-= u*v; m[14]-= v*w; m[15]-= u*w; float PI11 = -0.5f *(m[ 9]); float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f; float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f; float PI12 = -1.5f*m[13]; float PI23 = -1.5f*m[14]; float PI13 = -1.5f*m[15]; //we know Smag on coarse mesh float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13))/LRFACTOR; output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", " //<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl; <<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl; if(k == 3 && GPU == 0){ outputslice<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", " <<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl; } }}} for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1); output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velAv[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", " <<velFluc[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<0<<endl; }} } void WriteForces(float **F, ofstream &output, int ForceTime, int level) { float ref = UMAX*UMAX*ZDIM*OBSTR1; if(level > 0) ref *= LRLEVEL*LRLEVEL; for(int i = 0; i<ForceTime; i++){ output<<i+STARTF<<", "<<F[0][i]/ref<<", "<<F[1][i]/ref<<", "<<F[2][i]/ref<<endl; } } void WriteAvV(float *v, ofstream &output) { for(int i = 0; i<TMAX; i++){ output<<i<<", "<<v[i]/(XDIM-2)/ZDIM<<endl; } } void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node) { output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl; output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl; output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl; output<<"Obst1 radius: \t"<<OBSTR1<<endl; output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl; output<<"Obst2 radius: \t"<<OBSTR2<<endl; output<<"RE: \t"<<RE<<endl; output<<"UMAX: \t"<<UMAX<<endl; output<<"omega \t: "<<omega<<endl; output<<"DPDY \t: "<<DPDY<<endl; output<<"TMAX: \t"<<TMAX<<endl; output<<"STARTF: \t"<<STARTF<<endl; output<<"START_VELAV: \t"<<START_VELAV<<endl; output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl; output<<"REFINEMENT: \t"<<REFINEMENT<<endl; output<<"MODEL: \t"<<MODEL<<endl; output<<"Smagorinsky LES: \t"<<SmagLES<<endl; output<<"CS: \t"<<CS<<endl; output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl; output<<"LR factor \t"<<LRFACTOR<<endl; output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl; output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl; output<<"omega in LR \t: "<<omegaLR<<endl; output<<"GPUs per node \t: "<<GPU_per_node<<endl; } int main(int argc, char *argv[]) { int GPU_N; hipGetDeviceCount(&GPU_N); GPU_N=NUMGPU; cout<<"number of GPUs: "<<GPU_N<<endl; ofstream output; ofstream outputForce; ofstream outputInputs; ofstream outputAvV; string FileName = CASENAME; output.open ((FileName+".dat").c_str()); outputForce.open ((FileName+".force").c_str()); outputInputs.open ((FileName+".inputs").c_str()); outputAvV.open ((FileName+".vel").c_str()); ofstream outputpart[REFINEMENT*GPU_N+GPU_N], outputslice; for(int i = 0; i< REFINEMENT*GPU_N+GPU_N; i++){ //string filenum = to_string(i); char str[10]; snprintf(str,10,"%i",i); outputpart[i].open ((FileName+"_part"+str+".dat").c_str()); } outputslice.open ((FileName+"_slice.dat").c_str()); //size_t memsize, memsize2; size_t pitch = 2; while(pitch<XDIM) pitch=pitch*2; pitch *= sizeof(float);//pitch*sizeof(float); size_t pitch_e = pitch/sizeof(float); cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl; float CharLength = OBSTR1*2.f; float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f); float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); if(LRFACTOR == 0.25f){ omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); } if(LRFACTOR == 0.125f){ omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); } float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR); float SF_fc = 1.f/SF_cf; cout<<SF_cf<<endl; WriteInputs(outputInputs,omega,omegaLR,GPU_N); WriteInputs(cout,omega,omegaLR,GPU_N); if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){ cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl; return 0; } int zInner = ZDIM/GPU_N-2; //excluding halo int ForceTime = max(0,TMAX-STARTF); dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); //2 halo layers per GPU (for 2 GPUs) dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ); dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1); dim3 AvV_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),1,(ZDIM/GPU_N)/BLOCKSIZEZ); hipStream_t stream_halo[GPU_N]; hipStream_t stream_inner[GPU_N]; //data pointers as 3D array (GPUxCoord) float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N]; float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2]; float *g_temp[GPU_N], *h_temp[GPU_N]; float *F_h[GPU_N][3]; float *F_d[GPU_N][3]; float *F_total[3]; float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3]; float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3]; float *Av_V_h[GPU_N]; float *Av_V_d[GPU_N]; float dpdy = DPDY; for(int i = 0; i<3; i++) F_total[i] = (float *)malloc(ForceTime*sizeof(float)); for(int i=0;i<3;i++) for(int j=0;j<(ForceTime);j++) F_total[i][j] = 0; //Malloc and Initialize for each GPU for(int n = 0; n<GPU_N; n++){ f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float)); g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float)); h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float)); for(int i = 0; i<3; i++){ F_h [n][i] = (float *)malloc(ForceTime*sizeof(float)); velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float)); velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float)); } Av_V_h[n] = (float *)malloc(TMAX*sizeof(float)); hipSetDevice(n); hipStreamCreate(&stream_halo[n]); hipStreamCreate(&stream_inner[n]); for(int m = 0; m<GPU_N; m++) if(m != n) hipDeviceEnablePeerAccess(m,0); for(int i = 0; i<2; i++){ hipMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float)); hipMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float)); hipMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float)); } hipMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float)); hipMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float)); for(int i = 0; i<3; i++){ hipMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float)); hipMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float)); hipMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float)); } hipMalloc((void **) & Av_V_d[n],TMAX*sizeof(float)); //initialize host f_inner for (int i = 0; i < XDIM*YDIM*zInner*19; i++) f_h[n][i] = 0; //initialize host g,h for (int i = 0; i < XDIM*YDIM*19; i++){ g_h[n][i] = 0; h_h[n][i] = 0; } for(int i=0;i<3;i++){ for(int j=0;j<(ForceTime);j++) F_h[n][i][j] = 0; for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){ velAv_h [n][i][j] = 0; velFluc_h[n][i][j] = 0; } } for(int j=0;j<(ForceTime);j++) Av_V_h[n][j] = 0; for(int i = 0; i<2; i++){ hipMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice); hipMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,hipMemcpyHostToDevice); hipMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,hipMemcpyHostToDevice); } for(int i = 0; i<3; i++){ hipMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice); hipMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice); hipMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),hipMemcpyHostToDevice); } hipMemcpy(Av_V_d[n],Av_V_h[n],sizeof(float)*(TMAX),hipMemcpyHostToDevice); //initialization kernels for(int i = 0; i<2; i++){ hipLaunchKernelGGL(( initialize), dim3(grid),dim3(threads), 0, 0, f_d[n][i],pitch_e,zInner,GPU_N); hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, g_d[n][i],pitch_e, 1,GPU_N); hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, h_d[n][i],pitch_e, 1,GPU_N); } hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, g_temp[n],pitch_e, 1,GPU_N); hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, h_temp[n],pitch_e, 1,GPU_N); }//end Malloc and Initialize //data pointers as 3D array (GPUxCoord) float *f_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N]; float *f_LR_d[GPU_N][2], *g_LR_d[GPU_N][2], *h_LR_d[GPU_N][2]; float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N]; float *velAv_LR_h[GPU_N][3],*velFluc_LR_h[GPU_N][3]; float *velAv_LR_d[GPU_N][3],*velFluc_LR_d[GPU_N][3]; float *f_interp[GPU_N], *g_interp[GPU_N], *h_interp[GPU_N], *g_interp_temp[GPU_N], *h_interp_temp[GPU_N]; float *interp_h[GPU_N]; size_t pitchLR = 2; while(pitchLR<XLRDIM) pitchLR=pitchLR*2; pitchLR = pitchLR*sizeof(float); size_t pitchLR_e = pitchLR/sizeof(float); cout<<"LR Pitch (in elements): "<<pitchLR_e<<endl; size_t pitchInterp = 2; while(pitchInterp<XLRDIM*LRFACTOR+1) pitchInterp=pitchInterp*2; pitchInterp = pitchInterp*sizeof(float); size_t pitchInterp_e = pitchInterp/sizeof(float); cout<<"Interp Pitch (in elements): "<<pitchInterp_e<<endl; int zLRInner = ZLRDIM/GPU_N-2; dim3 LR_threads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ); dim3 LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ); dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1); dim3 Interp_threads(BLOCKSIZEINTERP, LRLEVEL, LRLEVEL); dim3 Interp_grid(((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP),((YLRDIM+LRLEVEL-1)/LRLEVEL),ZLRDIM/LRLEVEL/GPU_N); cout<<((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP)<<", "<<((YLRDIM+LRLEVEL-1)/LRLEVEL)<<", "<<ZLRDIM/LRLEVEL/GPU_N<<endl; dim3 Interp_grid_c(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(ZDIM/GPU_N)/BLOCKSIZEZ); //setup LR if(REFINEMENT == 1){ for(int n = 0; n<GPU_N; n++){ f_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float)); g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float)); h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float)); interp_h [n] = (float *)malloc((XLRDIM*LRFACTOR+1)*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float)); for(int i = 0; i<3; i++){ velAv_LR_h [n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); velFluc_LR_h[n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); } hipSetDevice(n); for(int i = 0; i<2; i++){ hipMalloc((void **) &f_LR_d[n][i], pitchLR_e*YLRDIM*zLRInner*19*sizeof(float)); hipMalloc((void **) &g_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float)); hipMalloc((void **) &h_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float)); } hipMalloc((void **) & g_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float)); hipMalloc((void **) & h_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float)); hipMalloc((void **) & f_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float)); hipMalloc((void **) & g_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); hipMalloc((void **) & h_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); hipMalloc((void **) & g_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); hipMalloc((void **) & h_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); for(int i = 0; i<3; i++){ hipMalloc((void **) & velAv_LR_d [n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); hipMalloc((void **) & velFluc_LR_d[n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); } for (int i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++) f_LR_h[n][i] = 0; //initialize host g,h for (int i = 0; i < XLRDIM*YLRDIM*19; i++){ g_LR_h[n][i] = 0; h_LR_h[n][i] = 0; } for(int i=0;i<3;i++){ for (int j = 0; j < XLRDIM*YLRDIM*ZLRDIM/GPU_N; j++){ velAv_LR_h [n][i][j] = 0; velFluc_LR_h[n][i][j] = 0; } } for(int i = 0; i<2; i++){ hipMemcpy2D(f_LR_d[n][i],pitchLR,f_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyHostToDevice); hipMemcpy2D(g_LR_d[n][i],pitchLR,g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyHostToDevice); hipMemcpy2D(h_LR_d[n][i],pitchLR,h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyHostToDevice); } for(int i = 0; i<3; i++){ hipMemcpy2D(velAv_LR_d [n][i],pitchLR,velAv_LR_h [n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice); hipMemcpy2D(velFluc_LR_d[n][i],pitchLR,velFluc_LR_h[n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice); } //initialization kernels for(int i = 0; i<2; i++){ hipLaunchKernelGGL(( initializeLR), dim3(LR_grid),dim3(LR_threads), 0, 0, f_LR_d[n][i],pitchLR_e,zLRInner,GPU_N); hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, g_LR_d[n][i],pitchLR_e, 1,GPU_N); hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, h_LR_d[n][i],pitchLR_e, 1,GPU_N); } hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, g_LR_temp[n],pitchLR_e, 1,GPU_N); hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, h_LR_temp[n],pitchLR_e, 1,GPU_N); }//end of GPU loop for malloc and initialize for LR }//end of LR malloc and initialize hipFuncSetCacheConfig(InterpCF,hipFuncCachePreferShared); int A = 0; int B = 1; int C = 0; int D = 1; for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); size_t mem_avail, mem_total; hipMemGetInfo(&mem_avail,&mem_total); cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n"; cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n"; } struct timeval tdr0,tdr1; double restime; hipDeviceSynchronize(); gettimeofday (&tdr0,NULL); //time loop for(int t = 0; t<TMAX; t++) { //copy temporary array for top and bottom on coarse mesh to neighbor GPU. Only transfering 5 distbs for(int n = 0; n<GPU_N; n++) hipMemcpyPeerAsync(&h_temp[n][pitch_e*YDIM*14],n,&g_d[ (n+1)%GPU_N][A][pitch_e*YDIM*14], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]); for(int n = 0; n<GPU_N; n++) hipMemcpyPeerAsync(&g_temp[n][pitch_e*YDIM*9],n,&h_d[abs(n-1)%GPU_N][A][pitch_e*YDIM*9],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]); //compute inner nodes on coarse mesh for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipLaunchKernelGGL(( update_inn), dim3(grid),dim3(threads),0,stream_inner[n], f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n][0],velAv_d[n][1],velAv_d[n][2],velFluc_d[n][0],velFluc_d[n][1],velFluc_d[n][2],F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),f_interp[n],pitchInterp_e,dpdy); } //synchronize halo stream before computing top and bottom nodes for(int n = 0; n<GPU_N; n++) hipStreamSynchronize(stream_halo[n]); //compute top and bottom nodes for(int n = 0; n<GPU_N; n++) { hipSetDevice(n); hipLaunchKernelGGL(( update_top), dim3(g_grid), dim3(threads), 0, stream_halo [n], h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),h_interp[n],pitchInterp_e,dpdy); hipLaunchKernelGGL(( update_bot), dim3(g_grid), dim3(threads), 0, stream_halo [n], g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),g_interp[n],pitchInterp_e,dpdy); } if(t%100 == 0 && t>10000) { for(int n = 0; n<GPU_N; n++) hipDeviceSynchronize(); for(int n = 0; n<GPU_N; n++) { hipLaunchKernelGGL(( AverageV), dim3(AvV_grid), dim3(threads), 0, 0, f_d[n][B],g_d[n][B],h_d[n][B],pitch_e,n,zInner,Av_V_d[n],t); } for(int n = 0; n<GPU_N; n++) hipMemcpy(&Av_V_h[n][t],&Av_V_d[n][t],sizeof(float),hipMemcpyDeviceToHost); float Av_V = 0; for(int n = 0; n<GPU_N; n++) Av_V += Av_V_h[n][t]; Av_V /= (XDIM-2)*ZDIM; float diff; diff = (Av_V-UMAX)/UMAX; dpdy += diff*KP*abs(DPDY); //dpdy = max(DPDY*) // if(Av_V < UMAX*0.995f) // dpdy *= 1.01f; // else if(Av_V > UMAX*1.005f) // dpdy *= 0.99f; if(t%1000 == 0) outputAvV<<t<<", "<<Av_V<<", "<<dpdy<<endl; } //hipDeviceSynchronize(); swap(A,B); if(REFINEMENT == 1){ int flag_F = 0; for(int i = 0; i<LRLEVEL; i++){ if(t>STARTF && i == 0) flag_F = 1; else flag_F = 0; for(int n = 0; n<GPU_N; n++){ hipMemcpyPeerAsync(&h_LR_temp[n][pitchLR_e*YLRDIM*14],n,&g_LR_d[ (n+1)%GPU_N][C][pitchLR_e*YLRDIM*14], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]); hipMemcpyPeerAsync(&g_LR_temp[n][pitchLR_e*YLRDIM*9 ],n,&h_LR_d[abs(n-1)%GPU_N][C][pitchLR_e*YLRDIM*9 ],abs(n-1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]); } for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipLaunchKernelGGL(( update_inn_LR), dim3(LR_grid),dim3(LR_threads),0,stream_inner[n], f_LR_d[n][D],f_LR_d[n][C],g_LR_d[n][C], h_LR_d[n][C],omegaLR,pitchLR_e,n,zLRInner,velAv_LR_d[n][0],velAv_LR_d[n][1],velFluc_LR_d[n][0],velFluc_LR_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); } for(int n = 0; n<GPU_N; n++) hipStreamSynchronize(stream_halo[n]); for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipLaunchKernelGGL(( update_top_LR), dim3(g_LR_grid),dim3(LR_threads),0,stream_halo[n], h_LR_d[n][D],h_LR_d[n][C],f_LR_d[n][C],h_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); hipLaunchKernelGGL(( update_bot_LR), dim3(g_LR_grid),dim3(LR_threads),0,stream_halo[n], g_LR_d[n][D],g_LR_d[n][C],f_LR_d[n][C],g_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); } if(i == LRLEVEL-1) { for(int n = 0; n<GPU_N; n++) //hipMemcpyPeerAsync(&h_interp_temp[n][0],n,&g_interp[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]); for(int n = 0; n<GPU_N; n++) hipMemcpyPeerAsync(&g_interp_temp[n][0],n,&h_interp[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]); } for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipDeviceSynchronize(); } flag_F = 0; swap(C,D); } //interp from coarse grid for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipLaunchKernelGGL(( InterpCF), dim3(Interp_grid),dim3(Interp_threads),0,stream_inner[n], f_LR_d[n][C],g_LR_d[n][C],h_LR_d[n][C],pitchLR_e,f_interp[n],g_interp[n],h_interp[n],g_interp_temp[n],pitchInterp_e,SF_cf,omega,n,zInner,zLRInner); //hipDeviceSynchronize(); } //interp from fine grid for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipMemcpyPeerAsync(&h_LR_temp[n][0],n,&g_LR_d[ (n+1)%GPU_N][C][0], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]); } for(int n = 0; n<GPU_N; n++) hipStreamSynchronize(stream_halo[n]); for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipLaunchKernelGGL(( InterpFC), dim3(Interp_grid_c),dim3(threads),0,stream_halo[n], f_d[n][A],g_d[n][A],h_d[n][A],f_LR_d[n][C],h_LR_d[n][C],h_LR_temp[n],pitch_e,pitchLR_e,SF_fc,omegaLR,n,zInner,zLRInner); } }//end refinement for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipDeviceSynchronize(); } }//end time loop hipDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); int Nodes; Nodes = XDIM*YDIM*ZDIM; if (REFINEMENT == 1) Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL; cout<<"Time taken for main kernel: "<<restime<<" (" <<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n"; //D2H Memcpy and write results for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyDeviceToHost); hipMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,hipMemcpyDeviceToHost); hipMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,hipMemcpyDeviceToHost); for(int i = 0; i<3; i++){ hipMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost); hipMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velFluc_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost); hipMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,hipMemcpyDeviceToHost); } hipMemcpy(Av_V_h[n],Av_V_d[n],sizeof(float)*TMAX,hipMemcpyDeviceToHost); WriteResults(outputpart[n],outputslice,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n); outputpart[n]<<endl; for(int i=0;i<3;i++) for(int j=0;j<ForceTime;j++) F_total[i][j] += F_h[n][i][j]; if(n > 0){ for(int j=0;j<TMAX;j++) Av_V_h[0][j] += Av_V_h[n][j]; } for(int i = 0; i<2; i++){ hipFree(f_d[n][i]); hipFree(g_d[n][i]); hipFree(h_d[n][i]); } hipFree(f_d[n]); hipFree(g_d[n]); hipFree(h_d[n]); hipFree(g_temp[n]); hipFree(h_temp[n]); for(int i=0;i<3;i++) hipFree(F_d[n][i]); hipFree(F_d[n]); }//end Memcpy and write results WriteForces(F_total,outputForce,ForceTime,REFINEMENT*LRLEVEL); //WriteAvV(Av_V_h[0],outputAvV); if(REFINEMENT == 1){ // output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n"; // output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n"; for(int n = 0; n<GPU_N; n++){ hipSetDevice(n); hipMemcpy2D(f_LR_h[n],XLRDIM*sizeof(float),f_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyDeviceToHost); hipMemcpy2D(g_LR_h[n],XLRDIM*sizeof(float),g_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyDeviceToHost); hipMemcpy2D(h_LR_h[n],XLRDIM*sizeof(float),h_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyDeviceToHost); //hipMemcpy2D(interp_h[n],(XLRDIM*LRFACTOR+1)*sizeof(float),f_interp[n],pitchInterp,(XLRDIM*LRFACTOR+1)*sizeof(float),(YLRDIM*LRFACTOR+1)*zInner*9,hipMemcpyDeviceToHost); for(int i = 0; i<3; i++){ hipMemcpy2D( velAv_LR_h[n][i],XLRDIM*sizeof(float),velAv_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost); hipMemcpy2D(velFluc_LR_h[n][i],XLRDIM*sizeof(float),velFluc_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost); } WriteResultsLR(outputpart[GPU_N+n],outputslice,f_LR_h[n],g_LR_h[n],h_LR_h[n],velAv_LR_h[n],velFluc_LR_h[n],omegaLR,GPU_N,n); outputpart[GPU_N+n]<<endl; for(int i = 0; i<2; i++){ hipFree(f_LR_d[n][i]); hipFree(g_LR_d[n][i]); hipFree(h_LR_d[n][i]); } hipFree(f_LR_d[n]); hipFree(g_LR_d[n]); hipFree(h_LR_d[n]); hipFree(g_LR_temp[n]); hipFree(h_LR_temp[n]); } } return 0; }
d1a9258c5f889e7a719afdcba111a25298ee59cf.cu
#include <cuda.h> #include <iostream> #include <ostream> #include <fstream> #include <sys/time.h> #include <time.h> using namespace std; #define CASENAME "mrt_test" #define NUMGPU 1 #define BLOCKSIZEX 64 #define BLOCKSIZEY 1 #define BLOCKSIZEZ 1 #define BLOCKSIZELRX 64 #define BLOCKSIZELRY 1 #define BLOCKSIZELRZ 1 #define BLOCKSIZEINTERP 8 #define XDIM 64 #define YDIM 124 #define ZDIM 20 //62 #define TMAX 10000 #define STARTF 3000000 #define DYNY1 123 #define DYNY2 1 #define KP 0.0f //p-control constant #define OBSTR1 31.f #define OBSTX1 31.5f #define OBSTY1 92.5f #define OBSTZ1 32.5f #define OBSTR2 10.f #define OBSTX2 25.5f #define OBSTY2 25.5f #define OBSTZ2 32.5f #define LRFACTOR 0.5f #define LRLEVEL 2 #define LRX0 128.25f //minimum x coord of LR #define XLRDIM 128 //number of nodes in x #define LRY0 64.25f #define YLRDIM 80 #define LRZ0 -0.75f #define ZLRDIM 8 #define ORDER 2 //order of accuracy of interpolation #define RE 5400.f//2000.f//100.f; #define UMAX 0.06f #define SmagLES 1 //1,0 #define MODEL "MRT" //BGK,MRT,STREAM #define REFINEMENT 0 //1,0 #define CS 0.02f #define DPDX 0.f #define DPDY -5.16e-7 #define VELAV 1 #define START_VELAV 200000 #define START_VELFLUC 1600000 inline __device__ int ImageFcnLR(float x, float y, float z) { int value = 0; if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) { value = 10; } return value; } inline __device__ int ImageFcn(int x, int y, int z, int t) { int value = 0; if(abs(x-OBSTX2) < OBSTR2 && abs(y-OBSTY2) < OBSTR2 && t < 5000) value = 10; if(abs(x-OBSTX2-3) < OBSTR2 && abs(y-OBSTY2-3) < OBSTR2 && t < 5000 && z == 10) value = 10; //if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) // value = 10; if(x == 0) value = 1;//50;//400; else if(x == XDIM-1) value = 1;//51;//300; // else if(y == 0) // value = 52;//1;//22; //// else if(y == DYNY1) //// value = 54;//1;//22; // else if(y == YDIM-1) // value = 54; //100; return value; } inline __device__ float PoisProf (float x){ float radius = (YDIM-1-1)*0.5f; float result = -1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f); return (result); } inline __device__ float PoisProf3D (float x, float y){ x = x-0.5f; y = y-0.5f; //float H = 41.f; return UMAX;//2.25f*16.f*UMAX*x*y*(H-x)*(H-y)/((H)*(H)*(H)*(H)); // float radius = (YDIM-1-1)*0.5f; // float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f); // return (result); } int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } __device__ int dmin(int a, int b) { if (a<b) return a; else return b-1; } __device__ int dmax(int a) { if (a>-1) return a; else return 0; } __device__ int dmax(int a,int b) { if (a>b) return a; else return b; } __device__ int dmin_p(int a, int b) { if (a<b) return a; else return 0; } __device__ int dmax_p(int a, int b) { if (a>-1) return a; else return b-1; } inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011, float v100, float v101, float v110, float v111, float x, float y, float z){ return v000*(1.f-x)*(1.f-y)*(1.f-z)+ v001*( x)*(1.f-y)*(1.f-z)+ v010*(1.f-x)*( y)*(1.f-z)+ v011*( x)*( y)*(1.f-z)+ v100*(1.f-x)*(1.f-y)*( z)+ v101*( x)*(1.f-y)*( z)+ v110*(1.f-x)*( y)*( z)+ v111*( x)*( y)*( z); } inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner) { if(y > YDIM-1) y = 0; if(y < 0) y = YDIM-1; int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner); index = dmax(index); index = dmin(index,19*pitch*YDIM*(zInner)); return index; } inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner) { int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner); index = dmax(index); index = dmin(index,19*pitch*YLRDIM*(zInner)); return index; } inline __device__ int f_mem_interp(int m_num, int x, int y, int z, int pitch, int zInner) { int index = (x+y*pitch+z*(YLRDIM*LRFACTOR+1)*pitch)+m_num*pitch*(YLRDIM*LRFACTOR+1)*(zInner); index = dmax(index); index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)*(zInner)); return index; } inline __device__ int buff_mem_interp(int m_num, int x, int y, int pitch, int zInner) { int index = (x+y*pitch+m_num*(YLRDIM*LRFACTOR+1)*pitch); index = dmax(index); index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)); return index; } inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch) { if(y > YDIM-1) y = 0; if(y < 0) y = YDIM-1; int index = (x+y*pitch)+f_num*pitch*YDIM; index = dmax(index); index = dmin(index,19*pitch*YDIM); return index; } inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch) { int index = (x+y*pitch)+f_num*pitch*YLRDIM; index = dmax(index); index = dmin(index,19*pitch*YLRDIM); return index; } inline __device__ void AddForce(float* f, float dpdy) { // f[1] -= 0.0555555556f*3.f*DPDX; // f[3] += 0.0555555556f*3.f*DPDX; // f[5] -= 0.0277777778f*3.f*DPDX; // f[6] += 0.0277777778f*3.f*DPDX; // f[7] += 0.0277777778f*3.f*DPDX; // f[8] -= 0.0277777778f*3.f*DPDX; // f[10]-= 0.0277777778f*3.f*DPDX; // f[12]+= 0.0277777778f*3.f*DPDX; // f[15]-= 0.0277777778f*3.f*DPDX; // f[17]+= 0.0277777778f*3.f*DPDX; f[2] -= 0.0555555556f*3.f*dpdy; f[4] += 0.0555555556f*3.f*dpdy; f[5] -= 0.0277777778f*3.f*dpdy; f[6] -= 0.0277777778f*3.f*dpdy; f[7] += 0.0277777778f*3.f*dpdy; f[8] += 0.0277777778f*3.f*dpdy; f[11]-= 0.0277777778f*3.f*dpdy; f[13]+= 0.0277777778f*3.f*dpdy; f[16]-= 0.0277777778f*3.f*dpdy; f[18]+= 0.0277777778f*3.f*dpdy; } inline __device__ void Moments(float* f, float* m) { m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ; m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18]; m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; } void Moments_host(float* f, float* m) { m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ; m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18]; m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; } void InvertMoments_host(float* f, float* m) { float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w) { meq[ 0] = rho; meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w); meq[ 2] = 7.53968254f*(u*u+v*v+w*w);; meq[ 3] = u; meq[ 4] = -0.666666667f*u; meq[ 5] = v; meq[ 6] = -0.666666667f*v; meq[ 7] = w; meq[ 8] = -0.666666667f*w; meq[ 9] = 2.f*u*u-(v*v+w*w); meq[11] = v*v-w*w; meq[13] = u*v; meq[14] = v*w; meq[15] = u*w; } inline __device__ void bgk_meq(float* meq, float rho, float u, float v, float w) { meq[ 0] = rho; meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w); meq[ 2] = 3.f*rho-5.5f*(u*u+v*v+w*w);; meq[ 3] = u; meq[ 4] = -0.666666667f*u; meq[ 5] = v; meq[ 6] = -0.666666667f*v; meq[ 7] = w; meq[ 8] = -0.666666667f*w; meq[ 9] = 2.f*u*u-(v*v+w*w); meq[10] = -0.5f*meq[9]*0.333333333333f; meq[11] = v*v-w*w; meq[12] = -0.5f*meq[11]; meq[13] = u*v; meq[14] = v*w; meq[15] = u*w; } //outputs strain rate tensor (Sxx,Syy,Szz,Sxy,Syz,Sxz) from inputs (m0,m3,m5,m7,m9,m11,m13,m14,m15) inline __device__ void StrainRate(float* S, float* m_strain, float dx) { float omega = 1.f; float m1 = 0.f;//(-11.f*m_strain[0]+19.f*(m_strain[1]*m_strain[1]+m_strain[2]*m_strain[2]+m_strain[3]*m_strain[3])); float u = m_strain[1]; float v = m_strain[2]; float w = m_strain[3]; float m9 = m_strain[4]-(2.f*u*u-(v*v+w*w)); float m11= m_strain[5]-(v*v-w*w); float m13= m_strain[6]-(u*v); float m14= m_strain[7]-(v*w); float m15= m_strain[8]-(u*w); S[0] = -0.026315789f*( m1+19.f*omega* m9); S[1] = -0.013157895f*(2.f*m1-19.f*omega*(m9-3.f*m11)); S[2] = -0.013157895f*(2.f*m1-19.f*omega*(m9+3.f*m11)); S[3] = -1.5f*omega*m13; S[4] = -1.5f*omega*m14; S[5] = -1.5f*omega*m15; //S[0] /= dx; //S[1] /= dx; //S[2] /= dx; //S[3] /= dx; //S[4] /= dx; //S[5] /= dx; } //outputs physical moments (rho,u,v,w,Pxx,Pww,Pxy,Pyz,Pxz) from f inline __device__ void PhysicalMoments(float* mom, float* f) { mom[0] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; mom[1] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17]; mom[2] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; mom[3] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; mom[4] = 2.f*f[1]+-f[2]+2.f*f[3]+-f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+-f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18]; mom[5] = f[2]+f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+-f[10]+-f[12]+-f[14]+-f[15]+-f[17]; mom[6] = f[5]+-f[6]+f[7]+-f[8]; mom[7] = f[11]+-f[13]+-f[16]+f[18]; mom[8] = f[10]+-f[12]+-f[15]+f[17]; } inline __device__ void InvertMoments(float* f, float* m) { float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void InvertPhysicalMoments(float* f, float* mom, float SF) { float m[19]={0}; m[ 0] = mom[0]; m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3])); m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]); m[ 3] = mom[1]; m[ 4] = -0.666666667f*mom[1]; m[ 5] = mom[2]; m[ 6] = -0.666666667f*mom[2]; m[ 7] = mom[3]; m[ 8] = -0.666666667f*mom[3]; m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3])); m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]); m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2]; m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3]; m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3]; // InvertMoments(f,m); float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void InvertPhysicalMoments_LES_fc(float* f, float* mom, float SF, float omega_f) { float tau_f = 1.f/omega_f; float S[6]={0}; StrainRate(S,mom,1.f); float Smag_f = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5])); float tau_c = tau_f+0.5f+12.f*Smag_f*CS; tau_c *= 0.5f; float omega_c = 1.f/tau_c; tau_f = tau_f+Smag_f*CS; omega_f = 1.f/tau_f; SF = (1.f-omega_c)*omega_f/(LRFACTOR*omega_c*(1.f-omega_f)); float m[19]={0}; m[ 0] = mom[0]; m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3])); m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]); m[ 3] = mom[1]; m[ 4] = -0.666666667f*mom[1]; m[ 5] = mom[2]; m[ 6] = -0.666666667f*mom[2]; m[ 7] = mom[3]; m[ 8] = -0.666666667f*mom[3]; m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3])); m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]); m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2]; m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3]; m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3]; // InvertMoments(f,m); float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void InvertPhysicalMoments_LES_cf(float* f, float* mom, float SF, float omega_c) { float tau_c = 1.f/omega_c; float S[6]={0}; StrainRate(S,mom,1.f); float Smag_c = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5])); float tau_f = 2.f*tau_c-0.5f+1.5f*Smag_c*CS; float omega_f = 1.f/tau_f; omega_f = 1.f/tau_f; tau_c = tau_c+Smag_c*CS; omega_c = 1.f/tau_c; SF = (LRFACTOR*omega_c*(1.f-omega_f))/((1.f-omega_c)*omega_f); float m[19]={0}; m[ 0] = mom[0]; m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3])); m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]); m[ 3] = mom[1]; m[ 4] = -0.666666667f*mom[1]; m[ 5] = mom[2]; m[ 6] = -0.666666667f*mom[2]; m[ 7] = mom[3]; m[ 8] = -0.666666667f*mom[3]; m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3])); m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]); m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2]; m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3]; m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3]; // InvertMoments(f,m); float u = m[3]; float v = m[5]; float w = m[7]; f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2])); f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10])); f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14])))); } inline __device__ void mrt_collide(float* f, float omega, float dpdy) { float m[19]; //float u,v,w; m[3] = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; m[5] = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; m[7] = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[0] = f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+ f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; if(MODEL == "MRT"){ m[ 1] = 19.f*(-f[ 0]+ f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18] -(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]));//+8.f*(f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18]); m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18] -7.53968254f*(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]); m[ 4] = 1.666666667f*(-3.f*f[1]+3.f*f[ 3]+m[3]); m[ 6] = 1.666666667f*(-3.f*f[2]+3.f*f[ 4]+m[5]); m[ 8] = 1.666666667f*(-3.f*f[9]+3.f*f[14]+m[7]); m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+- f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18] -(2.f*m[3]*m[3]-(m[5]*m[5]+m[7]*m[7])); m[10] =-4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+-f[10] +-f[12] +- f[14]+-f[15] +-f[17] -(m[5]*m[5]-m[7]*m[7]); m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+-f[10] +-f[12] + 2.f*f[14]+-f[15] +-f[17] ; m[13] = f[ 5]+-f[ 6]+ f[ 7]+-f[ 8] -m[3]*m[5]; m[14] = f[11] +- f[13] + - f[16] + f[18] -m[5]*m[7]; m[15] = f[10] + - f[12] +-f[15] + f[17] -m[3]*m[7]; m[16] = f[ 5]+-f[ 6]+-f[ 7]+ f[ 8] -f[10] + f[12] +-f[15] + f[17] ; m[17] = -f[ 5]+-f[ 6]+ f[ 7]+ f[ 8] + f[11] +- f[13] + f[16] +- f[18]; m[18] = f[10]+- f[11]+ f[12]+- f[13] +-f[15]+ f[16]+-f[17]+ f[18]; } if(SmagLES == 1) { float usqr = m[3]*m[3]+m[5]*m[5]+m[7]*m[7]; float u = m[3]; float v = m[5]; float w = m[7]; float rho = m[0]; float feq0, feq1 ,feq2 ,feq3 ,feq4 ,feq5 ,feq6 ,feq7 ,feq8 ,feq9 ,feq10 ,feq11 ,feq12 ,feq13 ,feq14 ,feq15 ,feq16 ,feq17 ,feq18; if(MODEL == "MRT"){ feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; usqr = u*u+v*v+w*w; feq0 =(0.3333333333f*(rho-1.5f*usqr)); feq1 =(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); feq2 =(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); feq3 =(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); feq4 =(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); feq5 =(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); feq6 =(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); feq7 =(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); feq8 =(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); feq9 =(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); feq10=(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); feq11=(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr)); feq12=(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); feq13=(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr)); feq14=(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); feq15=(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); feq16=(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); feq17=(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); feq18=(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } else{ usqr = u*u+v*v+w*w; feq0 =(0.3333333333f*(rho-1.5f*usqr)); feq1 =(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); feq2 =(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); feq3 =(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); feq4 =(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); feq5 =(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); feq6 =(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); feq7 =(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); feq8 =(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); feq9 =(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); feq10=(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); feq11=(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr)); feq12=(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); feq13=(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr)); feq14=(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); feq15=(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); feq16=(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); feq17=(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); feq18=(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } float PI11 = (f[1 ]-feq1 )+(f[3 ]-feq3 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17); float PI22 = (f[2 ]-feq2 )+(f[4 ]-feq4 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18); float PI33 = (f[9 ]-feq9 )+(f[14]-feq14)+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17)+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18); float PI12 = (f[5 ]-feq5 )+(f[7 ]-feq7 )-(f[6 ]-feq6 )-(f[8 ]-feq8 ); float PI13 = (f[10]-feq10)+(f[17]-feq17)-(f[12]-feq12)-(f[15]-feq15); float PI23 = (f[11]-feq11)+(f[18]-feq18)-(f[13]-feq13)-(f[16]-feq16); float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); //float Q = sqrt(Q11*Q11+Q22*Q22+Q33*Q33+2.f*Q12*Q12+2.f*Q23*Q23+2.f*Q13*Q13); float tau0 = 1.f/omega; float tau = 0.5f*tau0+0.5f*sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q); omega = 1.f/tau; } if(MODEL == "MRT"){ //f[ 0] -=- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]); //f[ 1] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]); //f[ 2] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]); //f[ 3] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]); //f[ 4] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]); //f[ 5] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))); //f[ 6] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))); //f[ 7] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))); //f[ 8] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))); //f[ 9] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]); //f[10] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))); //f[11] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))); //f[12] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))); //f[13] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))); //f[14] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]); //f[15] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))); //f[16] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))); //f[17] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))); //f[18] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))); Moments(f,m); float meq[19]={0}; //float ftemp[19]={0}; //bgk_meq(meq,m[0],m[3],m[5],m[7]); // //for(int i = 0; i<19; i++) // meq[i] = m[i]-omega*(m[i]-meq[i]); bgk_meq(meq,m[0],m[3],m[5],m[7]); meq[9] = m[9] -omega*(m[9]-meq[9]); meq[11] = m[11]-omega*(m[11]-meq[11]); meq[13] = m[13]-omega*(m[13]-meq[13]); meq[14] = m[14]-omega*(m[14]-meq[14]); meq[15] = m[15]-omega*(m[15]-meq[15]); //meq[1 ] = m[1 ]-1.19f*(m[1 ]-meq[1 ]); //meq[2 ] = m[2 ]-1.4f *(m[2 ]-meq[2 ]); //meq[10] = m[10]-1.4f *(m[10]-meq[10]); //meq[12] = m[12]-1.4f *(m[12]-meq[12]); //meq[4 ] = m[4 ]-1.2f *(m[4 ]-meq[4 ]); //meq[6 ] = m[6 ]-1.2f *(m[6 ]-meq[6 ]); //meq[8 ] = m[8 ]-1.2f *(m[8 ]-meq[8 ]); //meq[16] = m[16]-1.98f*(m[16]-meq[16]); //meq[17] = m[17]-1.98f*(m[17]-meq[17]); //meq[18] = m[18]-1.98f*(m[18]-meq[18]); //for(int i = 0; i<19; i++) // meq[i] = m[i]-omega*(m[i]-meq[i]); InvertMoments(f,meq); //for(int i = 0; i<19; i++) // f[i] -= ftemp[i]; } else{ float rho,u,v,w; rho = m[0]; u = m[3]; v = m[5]; w = m[7]; float usqr = u*u+v*v+w*w; f[0 ]-=omega*(f[0 ]-0.3333333333f*(rho-1.5f*usqr)); f[1 ]-=omega*(f[1 ]-0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f[2 ]-=omega*(f[2 ]-0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); f[3 ]-=omega*(f[3 ]-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); f[4 ]-=omega*(f[4 ]-0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); f[5 ]-=omega*(f[5 ]-0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); f[6 ]-=omega*(f[6 ]-0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); f[7 ]-=omega*(f[7 ]-0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); f[8 ]-=omega*(f[8 ]-0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); f[9 ]-=omega*(f[9 ]-0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); f[10]-=omega*(f[10]-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); f[11]-=omega*(f[11]-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr)); f[12]-=omega*(f[12]-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); f[13]-=omega*(f[13]-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr)); f[14]-=omega*(f[14]-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); f[15]-=omega*(f[15]-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); f[16]-=omega*(f[16]-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); f[17]-=omega*(f[17]-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); f[18]-=omega*(f[18]-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } AddForce(f,dpdy); } inline __device__ void North_Extrap(float* f, float rho) { float m[19]; //rho = 1.0f; float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void South_Extrap(float* f, float v) { float m[19]; float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void East_Extrap(float* f, float rho) { float m[19]; //rho = 0.0f; float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } inline __device__ void West_Extrap(float* f, float u, int t) { float m[19]; float v = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; //if(t == 1000 || t == 2000 || t == 3000) w = 0.01f; float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18]; m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18]; m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18]; m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ; m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18]; m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18]; m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ; m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ; m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ; m[14] = f[11] + - f[13] + - f[16] + f[18]; m[15] = f[10] + - f[12] + - f[15] + f[17] ; m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ; m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18]; m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18]; f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2])); f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10])); f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12])); f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])))); f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])))); f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12])); f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])))); f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])))); f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])))); f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])))); } __device__ void xsymmetry_bot(float* f, int y, int z) { if(y == 0 && z == 0){ f[ 2] = f[ 4]; f[13]=f[18]; f[11]=f[18]; f[16]=f[18]; f[ 6] =f[ 7]; f[ 9] =f[14]; f[12]=f[17]; } else if(y == 0 && z == ZDIM-1){ f[ 4] = f[ 2]; f[11]=f[13]; f[18]=f[13]; f[16]=f[13]; f[ 6] =f[ 7]; f[14]=f[ 9]; f[17]=f[12]; } else if(y == YDIM-1 && z == 0){ f[ 4] = f[ 2]; f[11]=f[16]; f[18]=f[16]; f[13]=f[16]; f[ 7] =f[ 6]; f[ 9] =f[14]; f[12]=f[17]; } else if(y == YDIM-1 && z == ZDIM-1){ f[ 4] = f[ 2]; f[16]=f[11]; f[18]=f[11]; f[13]=f[11]; f[ 7] =f[ 6]; f[14]=f[ 9]; f[17]=f[12]; } else{ if(y == 0){ f[ 2] = f[ 4]; f[11]=f[13]; f[16]=f[18]; f[ 8] = f[ 5]; } else if(y == YDIM-1){ f[ 4]=f[ 2] ; f[13]=f[11]; f[18]=f[16]; f[ 5]=f[ 8] ; } } f[ 1] = f[ 3] ; f[ 5] = f[ 6] ; f[ 8] = f[ 7] ; f[10]= f[12]; f[15]= f[17]; } __device__ void xsymmetry_top(float* f, int y, int z) { if(y == 0 && z == 0){ f[ 2] = f[ 4]; f[13] = f[18]; f[11] = f[18]; f[16] = f[18]; f[ 5] = f[ 8]; f[ 9] = f[14]; f[10] = f[15]; } else if(y == 0 && z == ZDIM-1){ f[ 2] = f[ 4]; f[11] = f[13]; f[18] = f[13]; f[16] = f[13]; f[ 5] = f[ 8]; f[14] = f[ 9]; f[15] = f[10]; } else if(y == YDIM-1 && z == 0){ f[ 4] = f[ 2]; f[18] = f[16]; f[11] = f[16]; f[13] = f[16]; f[ 8] = f[ 5]; f[ 9] = f[14]; f[10] = f[15]; } else if(y == YDIM-1 && z == ZDIM-1){ f[ 4] = f[ 2]; f[13] = f[11]; f[16] = f[11]; f[18] = f[11]; f[ 8] = f[ 5]; f[14] = f[ 9]; f[15] = f[10]; } else{ if(y == 0){ f[ 2] = f[ 4]; f[11] = f[13]; f[16] = f[18]; f[ 5] = f[ 8]; } else if(y == YDIM-1){ f[ 4] = f[ 2]; f[13] = f[11]; f[18] = f[16]; f[ 8] = f[ 5]; } } f[ 3] = f[ 1] ; f[ 6] = f[ 5] ; f[ 7] = f[ 8] ; f[12]= f[10]; f[17]= f[15]; } inline __device__ void vel_av(float* f, float& uAv, float& vAv, float& wAv, int t) { float u,v,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1); vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1); wAv = (wAv*(t-START_VELAV)+w)/((t-START_VELAV)+1); } inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR); vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR); } inline __device__ void vel_fluc(float* f, float& uAv, float& vAv, float& wAv, float& ufluc, float& vfluc, float& wfluc, int t) { float u,v,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); w = (w-wAv)*(w-wAv); ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1); vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1); wfluc = (wfluc*(t-START_VELFLUC)+w)/((t-START_VELFLUC)+1); } inline __device__ void vel_flucLR(float* f, float& uAv, float& vAv, float& ufluc, float& vfluc, float t) { float u,v;//,w; u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17]; v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18]; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); } __global__ void initialize(float *fout, size_t pitch, int zInner, int GPU_N) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = x; float ycoord = y; float zcoord = z+1+GPU_N*ZDIM; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f[19] = {0}; float m[19] = {0}; int im = ImageFcn(xcoord,ycoord,zcoord,0); float u,v,w,rho; rho = 1.f; u = 0.0f; v = UMAX; w = 0.0f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } mrt_meq(m,rho,u,v,w); InvertMoments(f,m); for(int i = 0; i<19; i++) fout[j+i *pitch*YDIM*zInner]=f[ i]; } __global__ void initializeLR(float *fout, size_t pitch, int zInner, int GPU_N) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = x; float ycoord = y; float zcoord = z+1+GPU_N*(zInner+2); xcoord = LRX0+x*LRFACTOR; ycoord = LRY0+y*LRFACTOR; zcoord = LRZ0+LRFACTOR*(GPU_N*(zInner+2)+z); int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f[19] = {0}; float m[19] = {0}; int im = ImageFcnLR(xcoord,ycoord,zcoord); float u,v,w,rho; rho = 1.f; u = UMAX; v = 0.0f; w = 0.0f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } mrt_meq(m,rho,u,v,w); InvertMoments(f,m); for(int i = 0; i<19; i++) fout[j+i *pitch*YLRDIM*zInner]=f[ i]; } __global__ void update_top(float* hB, float* hA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* h_interp, size_t pitch_interp, float dpdy) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int j = x+y*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1,t); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= hA [j]; f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)]; f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)]; f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)]; f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)]; f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)]; f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)]; f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)]; f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)]; f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)]; f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)]; f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)]; f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)]; f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)]; f[14]= temp[buff_mem(14,x ,y ,pitch)]; f[15]= temp[buff_mem(15,x-1,y ,pitch)]; f[16]= temp[buff_mem(16,x ,y-1,pitch)]; f[17]= temp[buff_mem(17,x+1,y ,pitch)]; f[18]= temp[buff_mem(18,x ,y+1,pitch)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } hB[buff_mem(0 ,x,y,pitch)] = f[0 ]; hB[buff_mem(1 ,x,y,pitch)] = f[3 ]; hB[buff_mem(2 ,x,y,pitch)] = f[4 ]; hB[buff_mem(3 ,x,y,pitch)] = f[1 ]; hB[buff_mem(4 ,x,y,pitch)] = f[2 ]; hB[buff_mem(5 ,x,y,pitch)] = f[7 ]; hB[buff_mem(6 ,x,y,pitch)] = f[8 ]; hB[buff_mem(7 ,x,y,pitch)] = f[5 ]; hB[buff_mem(8 ,x,y,pitch)] = f[6 ]; hB[buff_mem(9 ,x,y,pitch)] = f[14]; hB[buff_mem(10,x,y,pitch)] = f[17]; hB[buff_mem(11,x,y,pitch)] = f[18]; hB[buff_mem(12,x,y,pitch)] = f[15]; hB[buff_mem(13,x,y,pitch)] = f[16]; hB[buff_mem(14,x,y,pitch)] = f[9 ]; hB[buff_mem(15,x,y,pitch)] = f[12]; hB[buff_mem(16,x,y,pitch)] = f[13]; hB[buff_mem(17,x,y,pitch)] = f[10]; hB[buff_mem(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,y-1,pitch)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,y+1,pitch)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,(GPU+1)*(zInner+2)-1); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x-1,y,pitch)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x+1,y,pitch)]; float u_in = PoisProf3D(y,(GPU+1)*(zInner+2)-1); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1); if(im == 26) xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1); if(y>DYNY1) dpdy = 0.f; mrt_collide(f,omega,dpdy); if(im == 50)//west periodic { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,XDIM-2,y,pitch)]; } if(im == 51)//east periodic { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,1,y,pitch)]; } if(im == 52)//south periodic { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,DYNY1-1,pitch)]; } if(im == 53)//north periodic { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,DYNY2,pitch)]; } if(im == 54)//DYNY periodic { for(int i = 0; i<19; i++) f[i ]= hA[buff_mem(i ,x,1,pitch)]; } for(int i = 0; i<19; i++) hB[buff_mem(i ,x,y,pitch)] = f[i ]; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ // //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) h_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_bot(float* gB, float* gA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* g_interp, size_t pitch_interp, float dpdy) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int j = x+y*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,GPU*(zInner+2),t); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= gA [j]; f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)]; f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)]; f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)]; f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)]; f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)]; f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)]; f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)]; f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)]; f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)]; f[10]= temp[buff_mem(10,x-1,y ,pitch)]; f[11]= temp[buff_mem(11,x ,y-1,pitch)]; f[12]= temp[buff_mem(12,x+1,y ,pitch)]; f[13]= temp[buff_mem(13,x ,y+1,pitch)]; f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)]; f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)]; f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)]; f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)]; f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } gB[buff_mem(0 ,x,y,pitch)] = f[0 ]; gB[buff_mem(1 ,x,y,pitch)] = f[3 ]; gB[buff_mem(2 ,x,y,pitch)] = f[4 ]; gB[buff_mem(3 ,x,y,pitch)] = f[1 ]; gB[buff_mem(4 ,x,y,pitch)] = f[2 ]; gB[buff_mem(5 ,x,y,pitch)] = f[7 ]; gB[buff_mem(6 ,x,y,pitch)] = f[8 ]; gB[buff_mem(7 ,x,y,pitch)] = f[5 ]; gB[buff_mem(8 ,x,y,pitch)] = f[6 ]; gB[buff_mem(9 ,x,y,pitch)] = f[14]; gB[buff_mem(10,x,y,pitch)] = f[17]; gB[buff_mem(11,x,y,pitch)] = f[18]; gB[buff_mem(12,x,y,pitch)] = f[15]; gB[buff_mem(13,x,y,pitch)] = f[16]; gB[buff_mem(14,x,y,pitch)] = f[9 ]; gB[buff_mem(15,x,y,pitch)] = f[12]; gB[buff_mem(16,x,y,pitch)] = f[13]; gB[buff_mem(17,x,y,pitch)] = f[10]; gB[buff_mem(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,y-1,pitch)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,y+1,pitch)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,GPU*(zInner+2)); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x-1,y,pitch)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x+1,y,pitch)]; float u_in = PoisProf3D(y,GPU*(zInner+2)); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,GPU*(zInner+2)); if(im == 26) xsymmetry_bot(f,y,GPU*(zInner+2)); if(y>DYNY1) dpdy = 0.f; mrt_collide(f,omega,dpdy); if(im == 50)//west periodic { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,XDIM-2,y,pitch)]; } if(im == 51)//east periodic { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,1,y,pitch)]; } if(im == 52)//south periodic { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,DYNY1-1,pitch)]; } if(im == 53)//north periodic { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,DYNY2,pitch)]; } if(im == 54)//DYNY periodic { for(int i = 0; i<19; i++) f[i ]= gA[buff_mem(i ,x,1,pitch)]; } for(int i = 0; i<19; i++) gB[buff_mem(i ,x,y,pitch)] = f[i ]; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) g_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velAv_w, float* velFluc_u, float* velFluc_v, float* velFluc_w, float* FX, float* FY, float* FZ, int t, int flag_F, float* f_interp, size_t pitch_interp, float dpdy) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,GPU*(zInner+2)+1+z,t); float f[19]; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[ 0] = fA[j]; f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)]; f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)]; f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)]; f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)]; f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)]; f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)]; f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)]; f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)]; if(z==zInner-1){//top nodes need info from h f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)]; f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)]; f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)]; f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)]; f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)]; f[14]= h [buff_mem(14,x ,y ,pitch)]; f[15]= h [buff_mem(15,x-1,y ,pitch)]; f[16]= h [buff_mem(16,x ,y-1,pitch)]; f[17]= h [buff_mem(17,x+1,y ,pitch)]; f[18]= h [buff_mem(18,x ,y+1,pitch)]; } else if(z==0){//bottom nodes need info from g f[ 9] =g [buff_mem(9 ,x ,y ,pitch)]; f[10]= g [buff_mem(10,x-1,y ,pitch)]; f[11]= g [buff_mem(11,x ,y-1,pitch)]; f[12]= g [buff_mem(12,x+1,y ,pitch)]; f[13]= g [buff_mem(13,x ,y+1,pitch)]; f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)]; f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)]; f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)]; f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)]; f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)]; } else{//normal nodes f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)]; f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)]; f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)]; f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)]; f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)]; f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)]; f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)]; f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)]; f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)]; f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)]; }//end normal nodes if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ; fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ; fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ; fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ; fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ; fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ; fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ; fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ; fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14]; fB[f_mem(10,x,y,z,pitch,zInner)] = f[17]; fB[f_mem(11,x,y,z,pitch,zInner)] = f[18]; fB[f_mem(12,x,y,z,pitch,zInner)] = f[15]; fB[f_mem(13,x,y,z,pitch,zInner)] = f[16]; fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ; fB[f_mem(15,x,y,z,pitch,zInner)] = f[12]; fB[f_mem(16,x,y,z,pitch,zInner)] = f[13]; fB[f_mem(17,x,y,z,pitch,zInner)] = f[10]; fB[f_mem(18,x,y,z,pitch,zInner)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; if(im == 100)//north outlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)]; North_Extrap(f,1.0f); } if(im == 200)//south inlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)]; //South_Extrap(f,UMAX); float u_in = PoisProf3D(x,GPU*(zInner+2)+1+z); South_Extrap(f,u_in); } if(im == 300)//east outlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x-1,y,z,pitch,zInner)]; East_Extrap(f,1.0f); } if(im == 400)//west inlet { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x+1,y,z,pitch,zInner)]; float u_in = PoisProf3D(y,GPU*(zInner+2)+1+z); West_Extrap(f,u_in,t); } if(im == 25) xsymmetry_top(f,y,GPU*(zInner+2)+1+z); if(im == 26) xsymmetry_bot(f,y,GPU*(zInner+2)+1+z); if(y>DYNY1) dpdy = 0.f; mrt_collide(f,omega,dpdy); if(im == 50)//west periodic { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,XDIM-2,y,z,pitch,zInner)]; } if(im == 51)//east periodic { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,1,y,z,pitch,zInner)]; } if(im == 52)//south periodic { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,DYNY1-1,z,pitch,zInner)]; } if(im == 53)//north periodic { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,DYNY2,z,pitch,zInner)]; } if(im == 54)//DYNY periodic { for(int i = 0; i<19; i++) f[i ]= fA[f_mem(i ,x,1,z,pitch,zInner)]; } if(VELAV == 1){ if(t>=START_VELAV && t<START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM]; float w_Av = velAv_w[x+y*pitch+(z+1)*pitch*YDIM]; vel_av(f,u_Av,v_Av,w_Av,t); velAv_u[x+y*pitch+(z+1)*pitch*YDIM] = u_Av; velAv_v[x+y*pitch+(z+1)*pitch*YDIM] = v_Av; velAv_w[x+y*pitch+(z+1)*pitch*YDIM] = w_Av; } else if(t>=START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM]; float w_Av = velAv_w[x+y*pitch+(z+1)*pitch*YDIM]; float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YDIM]; float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YDIM]; float w_fluc = velFluc_w[x+y*pitch+(z+1)*pitch*YDIM]; vel_fluc(f,u_Av,v_Av,w_Av,u_fluc,v_fluc,w_fluc,t); velFluc_u[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc; velFluc_v[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc; velFluc_w[x+y*pitch+(z+1)*pitch*YDIM] = w_fluc; } } for(int i = 0; i<19; i++) fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ; } if(REFINEMENT == 1){ if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR)) { // if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1)) // { // //do nothing // } // else{ //float rho,u,v,w,m9,m11,m13,m14,m15; float mom[9]; PhysicalMoments(mom,f); for(int i = 0; i<9; i++) f_interp[f_mem_interp(i,x-int(LRX0),y-int(LRY0),z,pitch_interp,zInner)]=mom[i]; // } } } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_top_LR(float* hB, float* hA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region int j = x+y*pitch;//index on padded mem (pitch in elements) float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+LRFACTOR*z; int im = ImageFcnLR(xcoord,ycoord,zcoord); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= hA [j]; f[1 ]= hA [buff_memLR(1 ,x-1,y ,pitch)]; f[3 ]= hA [buff_memLR(3 ,x+1,y ,pitch)]; f[2 ]= hA [buff_memLR(2 ,x ,y-1,pitch)]; f[5 ]= hA [buff_memLR(5 ,x-1,y-1,pitch)]; f[6 ]= hA [buff_memLR(6 ,x+1,y-1,pitch)]; f[4 ]= hA [buff_memLR(4 ,x ,y+1,pitch)]; f[7 ]= hA [buff_memLR(7 ,x+1,y+1,pitch)]; f[8 ]= hA [buff_memLR(8 ,x-1,y+1,pitch)]; f[9 ]= fA [ f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)]; f[10]= fA [ f_memLR(10,x-1,y ,zInner-1,pitch, zInner)]; f[11]= fA [ f_memLR(11,x ,y-1,zInner-1,pitch, zInner)]; f[12]= fA [ f_memLR(12,x+1,y ,zInner-1,pitch, zInner)]; f[13]= fA [ f_memLR(13,x ,y+1,zInner-1,pitch, zInner)]; f[14]= temp[buff_memLR(14,x ,y ,pitch)]; f[15]= temp[buff_memLR(15,x-1,y ,pitch)]; f[16]= temp[buff_memLR(16,x ,y-1,pitch)]; f[17]= temp[buff_memLR(17,x+1,y ,pitch)]; f[18]= temp[buff_memLR(18,x ,y+1,pitch)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } hB[buff_memLR(0 ,x,y,pitch)] = f[0 ]; hB[buff_memLR(1 ,x,y,pitch)] = f[3 ]; hB[buff_memLR(2 ,x,y,pitch)] = f[4 ]; hB[buff_memLR(3 ,x,y,pitch)] = f[1 ]; hB[buff_memLR(4 ,x,y,pitch)] = f[2 ]; hB[buff_memLR(5 ,x,y,pitch)] = f[7 ]; hB[buff_memLR(6 ,x,y,pitch)] = f[8 ]; hB[buff_memLR(7 ,x,y,pitch)] = f[5 ]; hB[buff_memLR(8 ,x,y,pitch)] = f[6 ]; hB[buff_memLR(9 ,x,y,pitch)] = f[14]; hB[buff_memLR(10,x,y,pitch)] = f[17]; hB[buff_memLR(11,x,y,pitch)] = f[18]; hB[buff_memLR(12,x,y,pitch)] = f[15]; hB[buff_memLR(13,x,y,pitch)] = f[16]; hB[buff_memLR(14,x,y,pitch)] = f[9 ]; hB[buff_memLR(15,x,y,pitch)] = f[12]; hB[buff_memLR(16,x,y,pitch)] = f[13]; hB[buff_memLR(17,x,y,pitch)] = f[10]; hB[buff_memLR(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega,LRFACTOR); for(int i = 0; i<19; i++) hB[buff_memLR(i ,x,y,pitch)] = f[i ]; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_bot_LR(float* gB, float* gA, float* fA, float* temp, float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; //int z = (zInner+2)-1; int j = x+y*pitch;//index on padded mem (pitch in elements) float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; //float zcoord = LRZ0+GPU*LRFACTOR*z; float zcoord = LRZ0+LRFACTOR*(GPU*(zInner+2)-1); int im = ImageFcnLR(xcoord,ycoord,zcoord); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[0 ]= gA [j]; f[1 ]= gA [buff_memLR(1 ,x-1,y ,pitch)]; f[3 ]= gA [buff_memLR(3 ,x+1,y ,pitch)]; f[2 ]= gA [buff_memLR(2 ,x ,y-1,pitch)]; f[5 ]= gA [buff_memLR(5 ,x-1,y-1,pitch)]; f[6 ]= gA [buff_memLR(6 ,x+1,y-1,pitch)]; f[4 ]= gA [buff_memLR(4 ,x ,y+1,pitch)]; f[7 ]= gA [buff_memLR(7 ,x+1,y+1,pitch)]; f[8 ]= gA [buff_memLR(8 ,x-1,y+1,pitch)]; f[9 ]= temp[buff_memLR(9 ,x ,y ,pitch)]; f[10]= temp[buff_memLR(10,x-1,y ,pitch)]; f[11]= temp[buff_memLR(11,x ,y-1,pitch)]; f[12]= temp[buff_memLR(12,x+1,y ,pitch)]; f[13]= temp[buff_memLR(13,x ,y+1,pitch)]; f[14]= fA [ f_memLR(14,x ,y ,0,pitch, zInner)]; f[15]= fA [ f_memLR(15,x-1,y ,0,pitch, zInner)]; f[16]= fA [ f_memLR(16,x ,y-1,0,pitch, zInner)]; f[17]= fA [ f_memLR(17,x+1,y ,0,pitch, zInner)]; f[18]= fA [ f_memLR(18,x ,y+1,0,pitch, zInner)]; if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } gB[buff_memLR(0 ,x,y,pitch)] = f[0 ]; gB[buff_memLR(1 ,x,y,pitch)] = f[3 ]; gB[buff_memLR(2 ,x,y,pitch)] = f[4 ]; gB[buff_memLR(3 ,x,y,pitch)] = f[1 ]; gB[buff_memLR(4 ,x,y,pitch)] = f[2 ]; gB[buff_memLR(5 ,x,y,pitch)] = f[7 ]; gB[buff_memLR(6 ,x,y,pitch)] = f[8 ]; gB[buff_memLR(7 ,x,y,pitch)] = f[5 ]; gB[buff_memLR(8 ,x,y,pitch)] = f[6 ]; gB[buff_memLR(9 ,x,y,pitch)] = f[14]; gB[buff_memLR(10,x,y,pitch)] = f[17]; gB[buff_memLR(11,x,y,pitch)] = f[18]; gB[buff_memLR(12,x,y,pitch)] = f[15]; gB[buff_memLR(13,x,y,pitch)] = f[16]; gB[buff_memLR(14,x,y,pitch)] = f[9 ]; gB[buff_memLR(15,x,y,pitch)] = f[12]; gB[buff_memLR(16,x,y,pitch)] = f[13]; gB[buff_memLR(17,x,y,pitch)] = f[10]; gB[buff_memLR(18,x,y,pitch)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega,LRFACTOR); for(int i = 0; i<19; i++) gB[buff_memLR(i ,x,y,pitch)] = f[i ]; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } __global__ void update_inn_LR(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z)); float f[19]; __shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX]; __shared__ int check[1]; check[0] = 0; syncthreads(); f[ 0] = fA[j]; f[ 1] = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)]; f[ 3] = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)]; f[ 2] = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)]; f[ 5] = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)]; f[ 6] = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)]; f[ 4] = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)]; f[ 7] = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)]; f[ 8] = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)]; if(z==zInner-1){//top nodes need info from h f[ 9] =fA[ f_memLR(9 ,x ,y ,z-1,pitch, zInner)]; f[10]= fA[ f_memLR(10,x-1,y ,z-1,pitch, zInner)]; f[11]= fA[ f_memLR(11,x ,y-1,z-1,pitch, zInner)]; f[12]= fA[ f_memLR(12,x+1,y ,z-1,pitch, zInner)]; f[13]= fA[ f_memLR(13,x ,y+1,z-1,pitch, zInner)]; f[14]= h [buff_memLR(14,x ,y ,pitch)]; f[15]= h [buff_memLR(15,x-1,y ,pitch)]; f[16]= h [buff_memLR(16,x ,y-1,pitch)]; f[17]= h [buff_memLR(17,x+1,y ,pitch)]; f[18]= h [buff_memLR(18,x ,y+1,pitch)]; } else if(z==0){//bottom nodes need info from g f[ 9] =g [buff_memLR(9 ,x ,y ,pitch)]; f[10]= g [buff_memLR(10,x-1,y ,pitch)]; f[11]= g [buff_memLR(11,x ,y-1,pitch)]; f[12]= g [buff_memLR(12,x+1,y ,pitch)]; f[13]= g [buff_memLR(13,x ,y+1,pitch)]; f[14]= fA[ f_memLR(14,x ,y ,z+1,pitch, zInner)]; f[15]= fA[ f_memLR(15,x-1,y ,z+1,pitch, zInner)]; f[16]= fA[ f_memLR(16,x ,y-1,z+1,pitch, zInner)]; f[17]= fA[ f_memLR(17,x+1,y ,z+1,pitch, zInner)]; f[18]= fA[ f_memLR(18,x ,y+1,z+1,pitch, zInner)]; } else{//normal nodes f[ 9] =fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)]; f[10]= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)]; f[11]= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)]; f[12]= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)]; f[13]= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)]; f[14]= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)]; f[15]= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)]; f[16]= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)]; f[17]= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)]; f[18]= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)]; }//end normal nodes if(im == 1 || im ==10){//BB if(im == 10 && flag_F == 1){ check[0] = 1; sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6]; sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17]; sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6]; sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18]; sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13]; sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[ 3] ; fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[ 4] ; fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[ 1] ; fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[ 2] ; fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[ 7] ; fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[ 8] ; fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[ 5] ; fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[ 6] ; fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[14]; fB[f_memLR(10,x,y,z,pitch,zInner)] = f[17]; fB[f_memLR(11,x,y,z,pitch,zInner)] = f[18]; fB[f_memLR(12,x,y,z,pitch,zInner)] = f[15]; fB[f_memLR(13,x,y,z,pitch,zInner)] = f[16]; fB[f_memLR(14,x,y,z,pitch,zInner)] = f[ 9] ; fB[f_memLR(15,x,y,z,pitch,zInner)] = f[12]; fB[f_memLR(16,x,y,z,pitch,zInner)] = f[13]; fB[f_memLR(17,x,y,z,pitch,zInner)] = f[10]; fB[f_memLR(18,x,y,z,pitch,zInner)] = f[11]; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; mrt_collide(f,omega,LRFACTOR); if(VELAV == 1){ if(t>=START_VELAV && t<START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM]; vel_avLR(f,u_Av,v_Av,t); velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av; velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av; } else if(t>=START_VELFLUC){ float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM]; float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM]; float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM]; vel_flucLR(f,u_Av,v_Av,u_fluc,v_fluc,t); velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc; velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc; } } for(int i = 0; i<19; i++) fB[f_memLR(i ,x,y,z,pitch,zInner)] = f[ i] ; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t-STARTF],sumX[0]); atomicAdd(&FY[t-STARTF],sumY[0]); atomicAdd(&FZ[t-STARTF],sumZ[0]); } } } /* InterpCF is used on the LR grid. It first uses part of its threads to read from the coarse mesh nodes that completely envelope the fine mesh nodes, and loads the f's into shared memory. Next, all threads use the shared memory data to interpolate and scale the f's */ __global__ void InterpCF(float* f_f, float* g_f, float* h_f, size_t pitch_f, float* m_f_c, float* m_g_c, float* m_h_c, float* m_g_temp, size_t pitch_m, float SF, float omega_c, int GPU, int zInner, int zInner_f) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; __shared__ float mom_c[BLOCKSIZEINTERP][2][2][9]; __shared__ float S_c[BLOCKSIZEINTERP][2][2][6]; //int GPU = 0; int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner_f+2)+z)); if(blockIdx.z == 0 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use g and g_temp int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_temp[x_c+y_c*pitch_m+i*ymax*pitch_m]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f); } else if(blockIdx.z == 1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use g and f int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+i*ymax*pitch_m*zInner]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f); } else if(blockIdx.z == zInner+1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2) { //use h and f int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int ymax = YLRDIM*LRFACTOR+1; if(threadIdx.z == 0){ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+(zInner-1)*ymax*pitch_m+i*ymax*pitch_m*zInner]; } else{ for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_h_c[x_c+y_c*pitch_m+i*ymax*pitch_m]; } // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f); } else if(threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2){//use f only int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1 int z_c = threadIdx.z+blockIdx.z-2;//in coarse grid, blockdim.z is 1; -2 to account for g and lower halo int ymax = YLRDIM*LRFACTOR+1; for(int i = 0; i<9; i++) mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+z_c*ymax*pitch_m+i*ymax*pitch_m*zInner]; // float S[6];//float m_strain[9]; // for(int i = 0; i<9; i++) // m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z]; // for(int i = 0; i<6; i++) // S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i]; StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f); } syncthreads(); if(x<LRLEVEL || x>XLRDIM-LRLEVEL-1 || y<LRLEVEL || y>YLRDIM-LRLEVEL-1){ //if(x<LRLEVEL+3 || x>XLRDIM-LRLEVEL-5 || y<LRLEVEL+3 || y>YLRDIM-LRLEVEL-5){ //interpolate from shared mem int xm = int(threadIdx.x*LRFACTOR+LRFACTOR*0.5f); int ym = int(threadIdx.y*LRFACTOR+LRFACTOR*0.5f); int zm = int(threadIdx.z*LRFACTOR+LRFACTOR*0.5f); int xp = xm+1; //int yp = ym+1; int zp = zm+1; float xf = (threadIdx.x*LRFACTOR+LRFACTOR*0.5f)-xm; float yf = (threadIdx.y*LRFACTOR+LRFACTOR*0.5f)-ym; float zf = (threadIdx.z*LRFACTOR+LRFACTOR*0.5f)-zm; float mom[9]; for(int i = 0; i<9; i++){ float v000 = mom_c[xm][0][0][i]; float v001 = mom_c[xp][0][0][i]; float v010 = mom_c[xm][1][0][i]; float v011 = mom_c[xp][1][0][i]; float v100 = mom_c[xm][0][1][i]; float v101 = mom_c[xp][0][1][i]; float v110 = mom_c[xm][1][1][i]; float v111 = mom_c[xp][1][1][i]; mom[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf); } if(ORDER == 2) { float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8; float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8; float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8; float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8; float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8; float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8; u_x1=S_c[xm][0][0][0];v_y1=S_c[xm][0][0][1];w_z1=S_c[xm][0][0][2];Sxy1=S_c[xm][0][0][3];Syz1=S_c[xm][0][0][4];Sxz1=S_c[xm][0][0][5]; u_x2=S_c[xp][0][0][0];v_y2=S_c[xp][0][0][1];w_z2=S_c[xp][0][0][2];Sxy2=S_c[xp][0][0][3];Syz2=S_c[xp][0][0][4];Sxz2=S_c[xp][0][0][5]; u_x3=S_c[xm][1][0][0];v_y3=S_c[xm][1][0][1];w_z3=S_c[xm][1][0][2];Sxy3=S_c[xm][1][0][3];Syz3=S_c[xm][1][0][4];Sxz3=S_c[xm][1][0][5]; u_x4=S_c[xp][1][0][0];v_y4=S_c[xp][1][0][1];w_z4=S_c[xp][1][0][2];Sxy4=S_c[xp][1][0][3];Syz4=S_c[xp][1][0][4];Sxz4=S_c[xp][1][0][5]; u_x5=S_c[xm][0][1][0];v_y5=S_c[xm][0][1][1];w_z5=S_c[xm][0][1][2];Sxy5=S_c[xm][0][1][3];Syz5=S_c[xm][0][1][4];Sxz5=S_c[xm][0][1][5]; u_x6=S_c[xp][0][1][0];v_y6=S_c[xp][0][1][1];w_z6=S_c[xp][0][1][2];Sxy6=S_c[xp][0][1][3];Syz6=S_c[xp][0][1][4];Sxz6=S_c[xp][0][1][5]; u_x7=S_c[xm][1][1][0];v_y7=S_c[xm][1][1][1];w_z7=S_c[xm][1][1][2];Sxy7=S_c[xm][1][1][3];Syz7=S_c[xm][1][1][4];Sxz7=S_c[xm][1][1][5]; u_x8=S_c[xp][1][1][0];v_y8=S_c[xp][1][1][1];w_z8=S_c[xp][1][1][2];Sxy8=S_c[xp][1][1][3];Syz8=S_c[xp][1][1][4];Sxz8=S_c[xp][1][1][5]; float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77; m03=mom_c[xm][0][0][1];m05=mom_c[xm][0][0][2];m07=mom_c[xm][0][0][3]; m13=mom_c[xp][0][0][1];m15=mom_c[xp][0][0][2];m17=mom_c[xp][0][0][3]; m23=mom_c[xm][1][0][1];m25=mom_c[xm][1][0][2];m27=mom_c[xm][1][0][3]; m33=mom_c[xp][1][0][1];m35=mom_c[xp][1][0][2];m37=mom_c[xp][1][0][3]; m43=mom_c[xm][0][1][1];m45=mom_c[xm][0][1][2];m47=mom_c[xm][0][1][3]; m53=mom_c[xp][0][1][1];m55=mom_c[xp][0][1][2];m57=mom_c[xp][0][1][3]; m63=mom_c[xm][1][1][1];m65=mom_c[xm][1][1][2];m67=mom_c[xm][1][1][3]; m73=mom_c[xp][1][1][1];m75=mom_c[xp][1][1][2];m77=mom_c[xp][1][1][3]; float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f; float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f; float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f; float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f; float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f; float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f; float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f; float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f; float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f; float xpr = 4.f*xf*xf-4.f*xf+1.f; float ypr = 4.f*yf*yf-4.f*yf+1.f; float zpr = 4.f*zf*zf-4.f*zf+1.f; mom[1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr); mom[2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr); mom[3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr); } float f[19]; //InvertPhysicalMoments(f,mom,SF); InvertPhysicalMoments_LES_cf(f,mom,SF,omega_c); if(im != 1 && im != 10){ if(z==0){ for(int i = 0; i<19; i++){ g_f[buff_memLR(i,x,y,pitch_f)]=f[i]; } } else if(z==gridDim.z*blockDim.z-1){ for(int i = 0; i<19; i++){ h_f[buff_memLR(i,x,y,pitch_f)]=f[i]; } } else{ for(int i = 0; i<19; i++){ f_f[f_memLR(i,x,y,z-1,pitch_f,zInner_f)]=f[i]; } } } } } __global__ void InterpFC(float* f_c, float* g_c, float* h_c, float* f_f, float* h_f, float* temp_f, size_t pitch_c, size_t pitch_f, float SF, float omega_f, int GPU, int zInner, int zInner_f) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; //if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) && //(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2))) //(true)) //if( (x > LRX0+5 && x < LRX0+XLRDIM*LRFACTOR-6 && y > LRY0+5 && y < LRY0+YLRDIM*LRFACTOR-6) && if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-2 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-2) && //(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2))) (true)) { float f[19]; float mom[8][9];//physical moments of 8 neighboring nodes float S_f[8][6];//strain rate tensor of 8 neighboring nodes int xm = LRLEVEL*(x-LRX0); int ym = LRLEVEL*(y-LRY0); int zm = LRLEVEL*(z-(-(1.f-0.5f*LRFACTOR)))-1;//LRZ0=-(1.f-0.5f*LRFACTOR), and -1 to account for g_LR int xp = xm+1; int yp = ym+1; int zp = zm+1; //top nodes. interp between h and h_temp. output to h if(z == zInner+1) { for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xm,ym,pitch_f)]; PhysicalMoments(mom[0],f); StrainRate(S_f[0],mom[0],1.f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xp,ym,pitch_f)]; PhysicalMoments(mom[1],f); StrainRate(S_f[1],mom[1],1.f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xm,yp,pitch_f)]; PhysicalMoments(mom[2],f); StrainRate(S_f[2],mom[2],1.f); for(int i = 0; i<19; i++) f[i] = temp_f[buff_memLR(i,xp,yp,pitch_f)]; PhysicalMoments(mom[3],f); StrainRate(S_f[3],mom[3],1.f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xm,ym,pitch_f)]; PhysicalMoments(mom[4],f); StrainRate(S_f[4],mom[4],1.f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xp,ym,pitch_f)]; PhysicalMoments(mom[5],f); StrainRate(S_f[5],mom[5],1.f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xm,yp,pitch_f)]; PhysicalMoments(mom[6],f); StrainRate(S_f[6],mom[6],1.f); for(int i = 0; i<19; i++) f[i] = h_f[buff_memLR(i,xp,yp,pitch_f)]; PhysicalMoments(mom[7],f); StrainRate(S_f[7],mom[7],1.f); } //inner nodes. output to g or f else{ for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,ym,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[0],f); StrainRate(S_f[0],mom[0],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,ym,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[1],f); StrainRate(S_f[1],mom[1],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,yp,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[2],f); StrainRate(S_f[2],mom[2],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,yp,zm,pitch_f,zInner_f)]; PhysicalMoments(mom[3],f); StrainRate(S_f[3],mom[3],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,ym,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[4],f); StrainRate(S_f[4],mom[4],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,ym,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[5],f); StrainRate(S_f[5],mom[5],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xm,yp,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[6],f); StrainRate(S_f[6],mom[6],1.f); for(int i = 0; i<19; i++) f[i] = f_f[f_memLR(i,xp,yp,zp,pitch_f,zInner_f)]; PhysicalMoments(mom[7],f); StrainRate(S_f[7],mom[7],1.f); } if(ORDER == 1){ for(int i = 0; i<9; i++) mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]); } else if(ORDER == 2) { float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8; float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8; float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8; float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8; float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8; float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8; u_x1=S_f[0][0];v_y1=S_f[0][1];w_z1=S_f[0][2];Sxy1=S_f[0][3];Syz1=S_f[0][4];Sxz1=S_f[0][5]; u_x2=S_f[1][0];v_y2=S_f[1][1];w_z2=S_f[1][2];Sxy2=S_f[1][3];Syz2=S_f[1][4];Sxz2=S_f[1][5]; u_x3=S_f[2][0];v_y3=S_f[2][1];w_z3=S_f[2][2];Sxy3=S_f[2][3];Syz3=S_f[2][4];Sxz3=S_f[2][5]; u_x4=S_f[3][0];v_y4=S_f[3][1];w_z4=S_f[3][2];Sxy4=S_f[3][3];Syz4=S_f[3][4];Sxz4=S_f[3][5]; u_x5=S_f[4][0];v_y5=S_f[4][1];w_z5=S_f[4][2];Sxy5=S_f[4][3];Syz5=S_f[4][4];Sxz5=S_f[4][5]; u_x6=S_f[5][0];v_y6=S_f[5][1];w_z6=S_f[5][2];Sxy6=S_f[5][3];Syz6=S_f[5][4];Sxz6=S_f[5][5]; u_x7=S_f[6][0];v_y7=S_f[6][1];w_z7=S_f[6][2];Sxy7=S_f[6][3];Syz7=S_f[6][4];Sxz7=S_f[6][5]; u_x8=S_f[7][0];v_y8=S_f[7][1];w_z8=S_f[7][2];Sxy8=S_f[7][3];Syz8=S_f[7][4];Sxz8=S_f[7][5]; float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77; m03=mom[0][1];m05=mom[0][2];m07=mom[0][3]; m13=mom[1][1];m15=mom[1][2];m17=mom[1][3]; m23=mom[2][1];m25=mom[2][2];m27=mom[2][3]; m33=mom[3][1];m35=mom[3][2];m37=mom[3][3]; m43=mom[4][1];m45=mom[4][2];m47=mom[4][3]; m53=mom[5][1];m55=mom[5][2];m57=mom[5][3]; m63=mom[6][1];m65=mom[6][2];m67=mom[6][3]; m73=mom[7][1];m75=mom[7][2];m77=mom[7][3]; float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f; float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f; float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f; float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f; float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f; float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f; float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f; float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f; float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f; for(int i = 0; i<9; i++) mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]); float xpr = 0.f;//4.f*xf*xf-4.f*xf+1.f; float ypr = 0.f;//4.f*yf*yf-4.f*yf+1.f; float zpr = 0.f;//4.f*zf*zf-4.f*zf+1.f; mom[0][1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr); mom[0][2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr); mom[0][3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr); } //InvertPhysicalMoments(f,mom[0],SF); InvertPhysicalMoments_LES_fc(f,mom[0],SF,omega_f); //for(int i = 0; i<19; i++) f[i] = 0.1f; //int GPU = 0; int im = ImageFcn(x,y,GPU*(zInner+2)+z,0); if(im != 1 && im != 10){ if(z == 0){ for(int i = 0; i<19; i++) g_c[buff_mem(i,x,y,pitch_c)]=f[i]; } else if(z == zInner+1){ for(int i = 0; i<19; i++) h_c[buff_mem(i,x,y,pitch_c)]=f[i]; } else{ for(int i = 0; i<19; i++) f_c[f_mem(i,x,y,z-1,pitch_c,zInner)]=f[i]; } } }//end extraction region } __global__ void AverageV(float* fA, float* gA, float* hA, size_t pitch, int GPU, int zInner, float* Av_V, int t) { int x = threadIdx.x+blockIdx.x*blockDim.x; int z = threadIdx.z+blockIdx.z*blockDim.z; float f[19]; float v_av = 0; __shared__ float sumV[BLOCKSIZEX]; syncthreads(); if(z == 0){ for(int i = 0; i<19; i++) f[i] = gA[buff_mem(i,x,DYNY1,pitch)]; } else if(z == zInner+1){ for(int i = 0; i<19; i++) f[i] = hA[buff_mem(i,x,DYNY1,pitch)]; } else{ for(int i = 0; i<19; i++) f[i] = fA[f_mem(i,x,DYNY1,z-1,pitch,zInner)]; } sumV[threadIdx.x] = f[2]-f[4]+f[5]+f[6]-f[7]-f[8]+f[11]-f[13]+f[16]-f[18]; syncthreads(); int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumV[threadIdx.x] += sumV[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&Av_V[t],sumV[0]); } } void WriteResults(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv, float **velFluc, float omega, int GPU_N, int GPU) { float f[19]; output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n"; output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n"; if(GPU == 0){ outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n"; outputslice<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<1<<"\n"; } for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<","<<velAv[2][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<","<<velFluc[2][i+j*XDIM]<<","<<0<<endl; }} for(int k = 1; k<ZDIM/GPU_N-1; k++){ for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float m1 =-30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+8.f*f[5]+8.f*f[6]+8.f*f[7]+8.f*f[8]+-11.f*f[9]+8.f*f[10]+8.f*f[11]+8.f*f[12]+8.f*f[13]+-11.f*f[14]+8.f*f[15]+8.f*f[16]+8.f*f[17]+8.f*f[18]; //float m6 = -4.f*f[2]+4.f*f[4]+f[5]+f[6]+-f[7]+-f[8]+f[11]+-f[13]+f[16]+-f[18]; float m10 =-4.f*f[1]+2.f*f[2]+-4.f*f[3]+2.f*f[4]+f[5]+f[6]+f[7]+f[8]+2.f*f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+2.f*f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18]; float m16 = f[5]+-f[6]+-f[7]+f[8]-f[10]+f[12]+-f[15]+f[17]; float m[19] = {0}; Moments_host(f,m); float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f); //float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); m[9] -= 2.f*u*u-(v*v+w*w); m[11]-= v*v-w*w; m[13]-= u*v; m[14]-= v*w; m[15]-= u*w; float PI11 = -0.5f *(m[ 9]); float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f; float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f; float PI12 = -1.5f*m[13]; float PI23 = -1.5f*m[14]; float PI13 = -1.5f*m[15]; //we know Smag on coarse mesh float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); //InvertMoments_host(f,m); //u = m[3]; //v = m[5]; //w = m[7]; //m6 = m[6 ]; //m10= m[10]; //m16= m[16]; int z = (ZDIM/GPU_N*GPU+k); output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", " //<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl; <<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl; if(k == 1 && GPU == 0){ outputslice<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "<<velAv[2][i+j*XDIM+k*XDIM*YDIM]<<"," <<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[2][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl; } }}} for(int j = 0; j<YDIM; j++){ for(int i = 0; i<XDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<","<<velAv[2][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", " <<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[2][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<","<<0<<endl; }} } void WriteResultsLR(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv, float **velFluc, float omega, int GPU_N, int GPU) { float f[19]; output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n"; output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM/GPU_N<<"\n"; if(GPU == 0){ outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n"; outputslice<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<1<<"\n"; } for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU); output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XLRDIM]<<","<<velAv[1][i+j*XLRDIM]<<", "<<velFluc[0][i+j*XLRDIM]<<","<<velFluc[1][i+j*XLRDIM]<<","<<0<<endl; }} for(int k = 1; k<ZLRDIM/GPU_N-1; k++){ for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k); float m[19] = {0}; Moments_host(f,m); float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f); //float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); m[9] -= 2.f*u*u-(v*v+w*w); m[11]-= v*v-w*w; m[13]-= u*v; m[14]-= v*w; m[15]-= u*w; float PI11 = -0.5f *(m[ 9]); float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f; float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f; float PI12 = -1.5f*m[13]; float PI23 = -1.5f*m[14]; float PI13 = -1.5f*m[15]; //we know Smag on coarse mesh float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13))/LRFACTOR; output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", " //<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl; <<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl; if(k == 3 && GPU == 0){ outputslice<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", " <<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl; } }}} for(int j = 0; j<YLRDIM; j++){ for(int i = 0; i<XLRDIM; i++){ float rho = 0; for(int l = 0; l<19; l++){ f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM]; rho += f[l]; } float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17]; float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18]; float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18]; float x = LRX0+LRFACTOR*i; float y = LRY0+LRFACTOR*j; float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1); output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<"," <<velAv[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velAv[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", " <<velFluc[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<0<<endl; }} } void WriteForces(float **F, ofstream &output, int ForceTime, int level) { float ref = UMAX*UMAX*ZDIM*OBSTR1; if(level > 0) ref *= LRLEVEL*LRLEVEL; for(int i = 0; i<ForceTime; i++){ output<<i+STARTF<<", "<<F[0][i]/ref<<", "<<F[1][i]/ref<<", "<<F[2][i]/ref<<endl; } } void WriteAvV(float *v, ofstream &output) { for(int i = 0; i<TMAX; i++){ output<<i<<", "<<v[i]/(XDIM-2)/ZDIM<<endl; } } void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node) { output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl; output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl; output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl; output<<"Obst1 radius: \t"<<OBSTR1<<endl; output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl; output<<"Obst2 radius: \t"<<OBSTR2<<endl; output<<"RE: \t"<<RE<<endl; output<<"UMAX: \t"<<UMAX<<endl; output<<"omega \t: "<<omega<<endl; output<<"DPDY \t: "<<DPDY<<endl; output<<"TMAX: \t"<<TMAX<<endl; output<<"STARTF: \t"<<STARTF<<endl; output<<"START_VELAV: \t"<<START_VELAV<<endl; output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl; output<<"REFINEMENT: \t"<<REFINEMENT<<endl; output<<"MODEL: \t"<<MODEL<<endl; output<<"Smagorinsky LES: \t"<<SmagLES<<endl; output<<"CS: \t"<<CS<<endl; output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl; output<<"LR factor \t"<<LRFACTOR<<endl; output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl; output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl; output<<"omega in LR \t: "<<omegaLR<<endl; output<<"GPUs per node \t: "<<GPU_per_node<<endl; } int main(int argc, char *argv[]) { int GPU_N; cudaGetDeviceCount(&GPU_N); GPU_N=NUMGPU; cout<<"number of GPUs: "<<GPU_N<<endl; ofstream output; ofstream outputForce; ofstream outputInputs; ofstream outputAvV; string FileName = CASENAME; output.open ((FileName+".dat").c_str()); outputForce.open ((FileName+".force").c_str()); outputInputs.open ((FileName+".inputs").c_str()); outputAvV.open ((FileName+".vel").c_str()); ofstream outputpart[REFINEMENT*GPU_N+GPU_N], outputslice; for(int i = 0; i< REFINEMENT*GPU_N+GPU_N; i++){ //string filenum = to_string(i); char str[10]; snprintf(str,10,"%i",i); outputpart[i].open ((FileName+"_part"+str+".dat").c_str()); } outputslice.open ((FileName+"_slice.dat").c_str()); //size_t memsize, memsize2; size_t pitch = 2; while(pitch<XDIM) pitch=pitch*2; pitch *= sizeof(float);//pitch*sizeof(float); size_t pitch_e = pitch/sizeof(float); cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl; float CharLength = OBSTR1*2.f; float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f); float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); if(LRFACTOR == 0.25f){ omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); } if(LRFACTOR == 0.125f){ omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f)); } float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR); float SF_fc = 1.f/SF_cf; cout<<SF_cf<<endl; WriteInputs(outputInputs,omega,omegaLR,GPU_N); WriteInputs(cout,omega,omegaLR,GPU_N); if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){ cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl; return 0; } int zInner = ZDIM/GPU_N-2; //excluding halo int ForceTime = max(0,TMAX-STARTF); dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); //2 halo layers per GPU (for 2 GPUs) dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ); dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1); dim3 AvV_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),1,(ZDIM/GPU_N)/BLOCKSIZEZ); cudaStream_t stream_halo[GPU_N]; cudaStream_t stream_inner[GPU_N]; //data pointers as 3D array (GPUxCoord) float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N]; float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2]; float *g_temp[GPU_N], *h_temp[GPU_N]; float *F_h[GPU_N][3]; float *F_d[GPU_N][3]; float *F_total[3]; float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3]; float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3]; float *Av_V_h[GPU_N]; float *Av_V_d[GPU_N]; float dpdy = DPDY; for(int i = 0; i<3; i++) F_total[i] = (float *)malloc(ForceTime*sizeof(float)); for(int i=0;i<3;i++) for(int j=0;j<(ForceTime);j++) F_total[i][j] = 0; //Malloc and Initialize for each GPU for(int n = 0; n<GPU_N; n++){ f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float)); g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float)); h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float)); for(int i = 0; i<3; i++){ F_h [n][i] = (float *)malloc(ForceTime*sizeof(float)); velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float)); velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float)); } Av_V_h[n] = (float *)malloc(TMAX*sizeof(float)); cudaSetDevice(n); cudaStreamCreate(&stream_halo[n]); cudaStreamCreate(&stream_inner[n]); for(int m = 0; m<GPU_N; m++) if(m != n) cudaDeviceEnablePeerAccess(m,0); for(int i = 0; i<2; i++){ cudaMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float)); cudaMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float)); cudaMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float)); } cudaMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float)); cudaMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float)); for(int i = 0; i<3; i++){ cudaMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float)); cudaMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float)); cudaMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float)); } cudaMalloc((void **) & Av_V_d[n],TMAX*sizeof(float)); //initialize host f_inner for (int i = 0; i < XDIM*YDIM*zInner*19; i++) f_h[n][i] = 0; //initialize host g,h for (int i = 0; i < XDIM*YDIM*19; i++){ g_h[n][i] = 0; h_h[n][i] = 0; } for(int i=0;i<3;i++){ for(int j=0;j<(ForceTime);j++) F_h[n][i][j] = 0; for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){ velAv_h [n][i][j] = 0; velFluc_h[n][i][j] = 0; } } for(int j=0;j<(ForceTime);j++) Av_V_h[n][j] = 0; for(int i = 0; i<2; i++){ cudaMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice); cudaMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice); cudaMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice); } for(int i = 0; i<3; i++){ cudaMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice); cudaMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice); cudaMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice); } cudaMemcpy(Av_V_d[n],Av_V_h[n],sizeof(float)*(TMAX),cudaMemcpyHostToDevice); //initialization kernels for(int i = 0; i<2; i++){ initialize<<< grid,threads>>>(f_d[n][i],pitch_e,zInner,GPU_N); initialize<<<g_grid,threads>>>(g_d[n][i],pitch_e, 1,GPU_N); initialize<<<g_grid,threads>>>(h_d[n][i],pitch_e, 1,GPU_N); } initialize<<<g_grid,threads>>>(g_temp[n],pitch_e, 1,GPU_N); initialize<<<g_grid,threads>>>(h_temp[n],pitch_e, 1,GPU_N); }//end Malloc and Initialize //data pointers as 3D array (GPUxCoord) float *f_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N]; float *f_LR_d[GPU_N][2], *g_LR_d[GPU_N][2], *h_LR_d[GPU_N][2]; float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N]; float *velAv_LR_h[GPU_N][3],*velFluc_LR_h[GPU_N][3]; float *velAv_LR_d[GPU_N][3],*velFluc_LR_d[GPU_N][3]; float *f_interp[GPU_N], *g_interp[GPU_N], *h_interp[GPU_N], *g_interp_temp[GPU_N], *h_interp_temp[GPU_N]; float *interp_h[GPU_N]; size_t pitchLR = 2; while(pitchLR<XLRDIM) pitchLR=pitchLR*2; pitchLR = pitchLR*sizeof(float); size_t pitchLR_e = pitchLR/sizeof(float); cout<<"LR Pitch (in elements): "<<pitchLR_e<<endl; size_t pitchInterp = 2; while(pitchInterp<XLRDIM*LRFACTOR+1) pitchInterp=pitchInterp*2; pitchInterp = pitchInterp*sizeof(float); size_t pitchInterp_e = pitchInterp/sizeof(float); cout<<"Interp Pitch (in elements): "<<pitchInterp_e<<endl; int zLRInner = ZLRDIM/GPU_N-2; dim3 LR_threads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ); dim3 LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ); dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1); dim3 Interp_threads(BLOCKSIZEINTERP, LRLEVEL, LRLEVEL); dim3 Interp_grid(((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP),((YLRDIM+LRLEVEL-1)/LRLEVEL),ZLRDIM/LRLEVEL/GPU_N); cout<<((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP)<<", "<<((YLRDIM+LRLEVEL-1)/LRLEVEL)<<", "<<ZLRDIM/LRLEVEL/GPU_N<<endl; dim3 Interp_grid_c(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(ZDIM/GPU_N)/BLOCKSIZEZ); //setup LR if(REFINEMENT == 1){ for(int n = 0; n<GPU_N; n++){ f_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float)); g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float)); h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float)); interp_h [n] = (float *)malloc((XLRDIM*LRFACTOR+1)*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float)); for(int i = 0; i<3; i++){ velAv_LR_h [n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); velFluc_LR_h[n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); } cudaSetDevice(n); for(int i = 0; i<2; i++){ cudaMalloc((void **) &f_LR_d[n][i], pitchLR_e*YLRDIM*zLRInner*19*sizeof(float)); cudaMalloc((void **) &g_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float)); cudaMalloc((void **) &h_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float)); } cudaMalloc((void **) & g_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float)); cudaMalloc((void **) & h_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float)); cudaMalloc((void **) & f_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float)); cudaMalloc((void **) & g_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); cudaMalloc((void **) & h_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); cudaMalloc((void **) & g_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); cudaMalloc((void **) & h_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float)); for(int i = 0; i<3; i++){ cudaMalloc((void **) & velAv_LR_d [n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); cudaMalloc((void **) & velFluc_LR_d[n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float)); } for (int i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++) f_LR_h[n][i] = 0; //initialize host g,h for (int i = 0; i < XLRDIM*YLRDIM*19; i++){ g_LR_h[n][i] = 0; h_LR_h[n][i] = 0; } for(int i=0;i<3;i++){ for (int j = 0; j < XLRDIM*YLRDIM*ZLRDIM/GPU_N; j++){ velAv_LR_h [n][i][j] = 0; velFluc_LR_h[n][i][j] = 0; } } for(int i = 0; i<2; i++){ cudaMemcpy2D(f_LR_d[n][i],pitchLR,f_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyHostToDevice); cudaMemcpy2D(g_LR_d[n][i],pitchLR,g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyHostToDevice); cudaMemcpy2D(h_LR_d[n][i],pitchLR,h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyHostToDevice); } for(int i = 0; i<3; i++){ cudaMemcpy2D(velAv_LR_d [n][i],pitchLR,velAv_LR_h [n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice); cudaMemcpy2D(velFluc_LR_d[n][i],pitchLR,velFluc_LR_h[n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice); } //initialization kernels for(int i = 0; i<2; i++){ initializeLR<<< LR_grid,LR_threads>>>(f_LR_d[n][i],pitchLR_e,zLRInner,GPU_N); initializeLR<<<g_LR_grid,LR_threads>>>(g_LR_d[n][i],pitchLR_e, 1,GPU_N); initializeLR<<<g_LR_grid,LR_threads>>>(h_LR_d[n][i],pitchLR_e, 1,GPU_N); } initializeLR<<<g_LR_grid,LR_threads>>>(g_LR_temp[n],pitchLR_e, 1,GPU_N); initializeLR<<<g_LR_grid,LR_threads>>>(h_LR_temp[n],pitchLR_e, 1,GPU_N); }//end of GPU loop for malloc and initialize for LR }//end of LR malloc and initialize cudaFuncSetCacheConfig(InterpCF,cudaFuncCachePreferShared); int A = 0; int B = 1; int C = 0; int D = 1; for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); size_t mem_avail, mem_total; cudaMemGetInfo(&mem_avail,&mem_total); cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n"; cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n"; } struct timeval tdr0,tdr1; double restime; cudaDeviceSynchronize(); gettimeofday (&tdr0,NULL); //time loop for(int t = 0; t<TMAX; t++) { //copy temporary array for top and bottom on coarse mesh to neighbor GPU. Only transfering 5 distbs for(int n = 0; n<GPU_N; n++) cudaMemcpyPeerAsync(&h_temp[n][pitch_e*YDIM*14],n,&g_d[ (n+1)%GPU_N][A][pitch_e*YDIM*14], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]); for(int n = 0; n<GPU_N; n++) cudaMemcpyPeerAsync(&g_temp[n][pitch_e*YDIM*9],n,&h_d[abs(n-1)%GPU_N][A][pitch_e*YDIM*9],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]); //compute inner nodes on coarse mesh for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); update_inn<<<grid,threads,0,stream_inner[n]>>>(f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n][0],velAv_d[n][1],velAv_d[n][2],velFluc_d[n][0],velFluc_d[n][1],velFluc_d[n][2],F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),f_interp[n],pitchInterp_e,dpdy); } //synchronize halo stream before computing top and bottom nodes for(int n = 0; n<GPU_N; n++) cudaStreamSynchronize(stream_halo[n]); //compute top and bottom nodes for(int n = 0; n<GPU_N; n++) { cudaSetDevice(n); update_top<<<g_grid, threads, 0, stream_halo [n]>>>(h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),h_interp[n],pitchInterp_e,dpdy); update_bot<<<g_grid, threads, 0, stream_halo [n]>>>(g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),g_interp[n],pitchInterp_e,dpdy); } if(t%100 == 0 && t>10000) { for(int n = 0; n<GPU_N; n++) cudaDeviceSynchronize(); for(int n = 0; n<GPU_N; n++) { AverageV<<<AvV_grid, threads>>>(f_d[n][B],g_d[n][B],h_d[n][B],pitch_e,n,zInner,Av_V_d[n],t); } for(int n = 0; n<GPU_N; n++) cudaMemcpy(&Av_V_h[n][t],&Av_V_d[n][t],sizeof(float),cudaMemcpyDeviceToHost); float Av_V = 0; for(int n = 0; n<GPU_N; n++) Av_V += Av_V_h[n][t]; Av_V /= (XDIM-2)*ZDIM; float diff; diff = (Av_V-UMAX)/UMAX; dpdy += diff*KP*abs(DPDY); //dpdy = max(DPDY*) // if(Av_V < UMAX*0.995f) // dpdy *= 1.01f; // else if(Av_V > UMAX*1.005f) // dpdy *= 0.99f; if(t%1000 == 0) outputAvV<<t<<", "<<Av_V<<", "<<dpdy<<endl; } //cudaDeviceSynchronize(); swap(A,B); if(REFINEMENT == 1){ int flag_F = 0; for(int i = 0; i<LRLEVEL; i++){ if(t>STARTF && i == 0) flag_F = 1; else flag_F = 0; for(int n = 0; n<GPU_N; n++){ cudaMemcpyPeerAsync(&h_LR_temp[n][pitchLR_e*YLRDIM*14],n,&g_LR_d[ (n+1)%GPU_N][C][pitchLR_e*YLRDIM*14], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]); cudaMemcpyPeerAsync(&g_LR_temp[n][pitchLR_e*YLRDIM*9 ],n,&h_LR_d[abs(n-1)%GPU_N][C][pitchLR_e*YLRDIM*9 ],abs(n-1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]); } for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); update_inn_LR<<<LR_grid,LR_threads,0,stream_inner[n]>>>(f_LR_d[n][D],f_LR_d[n][C],g_LR_d[n][C], h_LR_d[n][C],omegaLR,pitchLR_e,n,zLRInner,velAv_LR_d[n][0],velAv_LR_d[n][1],velFluc_LR_d[n][0],velFluc_LR_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); } for(int n = 0; n<GPU_N; n++) cudaStreamSynchronize(stream_halo[n]); for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); update_top_LR<<<g_LR_grid,LR_threads,0,stream_halo[n]>>>(h_LR_d[n][D],h_LR_d[n][C],f_LR_d[n][C],h_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); update_bot_LR<<<g_LR_grid,LR_threads,0,stream_halo[n]>>>(g_LR_d[n][D],g_LR_d[n][C],f_LR_d[n][C],g_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F); } if(i == LRLEVEL-1) { for(int n = 0; n<GPU_N; n++) //cudaMemcpyPeerAsync(&h_interp_temp[n][0],n,&g_interp[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]); for(int n = 0; n<GPU_N; n++) cudaMemcpyPeerAsync(&g_interp_temp[n][0],n,&h_interp[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]); } for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); cudaDeviceSynchronize(); } flag_F = 0; swap(C,D); } //interp from coarse grid for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); InterpCF<<<Interp_grid,Interp_threads,0,stream_inner[n]>>>(f_LR_d[n][C],g_LR_d[n][C],h_LR_d[n][C],pitchLR_e,f_interp[n],g_interp[n],h_interp[n],g_interp_temp[n],pitchInterp_e,SF_cf,omega,n,zInner,zLRInner); //cudaDeviceSynchronize(); } //interp from fine grid for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); cudaMemcpyPeerAsync(&h_LR_temp[n][0],n,&g_LR_d[ (n+1)%GPU_N][C][0], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]); } for(int n = 0; n<GPU_N; n++) cudaStreamSynchronize(stream_halo[n]); for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); InterpFC<<<Interp_grid_c,threads,0,stream_halo[n]>>>(f_d[n][A],g_d[n][A],h_d[n][A],f_LR_d[n][C],h_LR_d[n][C],h_LR_temp[n],pitch_e,pitchLR_e,SF_fc,omegaLR,n,zInner,zLRInner); } }//end refinement for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); cudaDeviceSynchronize(); } }//end time loop cudaDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); int Nodes; Nodes = XDIM*YDIM*ZDIM; if (REFINEMENT == 1) Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL; cout<<"Time taken for main kernel: "<<restime<<" (" <<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n"; //D2H Memcpy and write results for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); cudaMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost); cudaMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost); cudaMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost); for(int i = 0; i<3; i++){ cudaMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost); cudaMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velFluc_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost); cudaMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost); } cudaMemcpy(Av_V_h[n],Av_V_d[n],sizeof(float)*TMAX,cudaMemcpyDeviceToHost); WriteResults(outputpart[n],outputslice,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n); outputpart[n]<<endl; for(int i=0;i<3;i++) for(int j=0;j<ForceTime;j++) F_total[i][j] += F_h[n][i][j]; if(n > 0){ for(int j=0;j<TMAX;j++) Av_V_h[0][j] += Av_V_h[n][j]; } for(int i = 0; i<2; i++){ cudaFree(f_d[n][i]); cudaFree(g_d[n][i]); cudaFree(h_d[n][i]); } cudaFree(f_d[n]); cudaFree(g_d[n]); cudaFree(h_d[n]); cudaFree(g_temp[n]); cudaFree(h_temp[n]); for(int i=0;i<3;i++) cudaFree(F_d[n][i]); cudaFree(F_d[n]); }//end Memcpy and write results WriteForces(F_total,outputForce,ForceTime,REFINEMENT*LRLEVEL); //WriteAvV(Av_V_h[0],outputAvV); if(REFINEMENT == 1){ // output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n"; // output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n"; for(int n = 0; n<GPU_N; n++){ cudaSetDevice(n); cudaMemcpy2D(f_LR_h[n],XLRDIM*sizeof(float),f_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyDeviceToHost); cudaMemcpy2D(g_LR_h[n],XLRDIM*sizeof(float),g_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyDeviceToHost); cudaMemcpy2D(h_LR_h[n],XLRDIM*sizeof(float),h_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyDeviceToHost); //cudaMemcpy2D(interp_h[n],(XLRDIM*LRFACTOR+1)*sizeof(float),f_interp[n],pitchInterp,(XLRDIM*LRFACTOR+1)*sizeof(float),(YLRDIM*LRFACTOR+1)*zInner*9,cudaMemcpyDeviceToHost); for(int i = 0; i<3; i++){ cudaMemcpy2D( velAv_LR_h[n][i],XLRDIM*sizeof(float),velAv_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost); cudaMemcpy2D(velFluc_LR_h[n][i],XLRDIM*sizeof(float),velFluc_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost); } WriteResultsLR(outputpart[GPU_N+n],outputslice,f_LR_h[n],g_LR_h[n],h_LR_h[n],velAv_LR_h[n],velFluc_LR_h[n],omegaLR,GPU_N,n); outputpart[GPU_N+n]<<endl; for(int i = 0; i<2; i++){ cudaFree(f_LR_d[n][i]); cudaFree(g_LR_d[n][i]); cudaFree(h_LR_d[n][i]); } cudaFree(f_LR_d[n]); cudaFree(g_LR_d[n]); cudaFree(h_LR_d[n]); cudaFree(g_LR_temp[n]); cudaFree(h_LR_temp[n]); } } return 0; }
992798e73a37c6e10df93e30e9468f2a5d6b09df.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <omp.h> __device__ uint32_t rotate_left(uint32_t x, uint32_t n) { return (x << n) | (x >> (32-n)); } __device__ uint32_t encrypt(uint32_t m, uint32_t key) { return (rotate_left(m, key&31) + key)^key; } __host__ uint32_t h_rotate_left(uint32_t x, uint32_t n) { return (x << n) | (x >> (32-n)); } __host__ uint32_t h_encrypt(uint32_t m, uint32_t key) { return (h_rotate_left(m, key&31) + key)^key; } #define MAXN 16777216 #define GPULOCAL 128 #define BLOCKSZ (1024) __global__ void vecdot(uint32_t keyA, uint32_t keyB, uint32_t C[], int N) { int x = blockIdx.x * blockDim.x + threadIdx.x; int l = x * BLOCKSZ; int r = l + BLOCKSZ; uint32_t sum = 0; if (r > N) r = N; for (int i = l; i < r; i++) sum += encrypt(i, keyA) * encrypt(i, keyB); C[x] = sum; } uint32_t hostC[MAXN / GPULOCAL]; #define CheckErr(status) { gpuAssert((status), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, int abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } int main() { uint32_t N, keyA, keyB; uint32_t *cuArrC; hipMalloc((void **)&cuArrC, MAXN/GPULOCAL*sizeof(uint32_t)); while (scanf("%u %u %u", &N, &keyA, &keyB) == 3) { int M = (N + BLOCKSZ-1) / BLOCKSZ; int LOCAL = GPULOCAL; M = (M + LOCAL) / LOCAL * LOCAL; dim3 cuBlock(LOCAL); dim3 cuGrid(M/LOCAL); hipLaunchKernelGGL(( vecdot), dim3(cuGrid), dim3(cuBlock), 0, 0, keyA, keyB, cuArrC, N); CheckErr(hipGetLastError()); hipMemcpy(hostC, cuArrC, M*sizeof(uint32_t), hipMemcpyDeviceToHost); uint32_t sum = 0; #ifdef _OPENMP omp_set_num_threads(4); #endif #pragma omp parallel for reduction(+: sum) for (int i = 0; i < M; i++) sum += hostC[i]; printf("%u\n", sum); } hipFree(cuArrC); return 0; }
992798e73a37c6e10df93e30e9468f2a5d6b09df.cu
#include <stdio.h> #include <stdint.h> #include <cuda.h> #include <omp.h> __device__ uint32_t rotate_left(uint32_t x, uint32_t n) { return (x << n) | (x >> (32-n)); } __device__ uint32_t encrypt(uint32_t m, uint32_t key) { return (rotate_left(m, key&31) + key)^key; } __host__ uint32_t h_rotate_left(uint32_t x, uint32_t n) { return (x << n) | (x >> (32-n)); } __host__ uint32_t h_encrypt(uint32_t m, uint32_t key) { return (h_rotate_left(m, key&31) + key)^key; } #define MAXN 16777216 #define GPULOCAL 128 #define BLOCKSZ (1024) __global__ void vecdot(uint32_t keyA, uint32_t keyB, uint32_t C[], int N) { int x = blockIdx.x * blockDim.x + threadIdx.x; int l = x * BLOCKSZ; int r = l + BLOCKSZ; uint32_t sum = 0; if (r > N) r = N; for (int i = l; i < r; i++) sum += encrypt(i, keyA) * encrypt(i, keyB); C[x] = sum; } uint32_t hostC[MAXN / GPULOCAL]; #define CheckErr(status) { gpuAssert((status), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, int abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } int main() { uint32_t N, keyA, keyB; uint32_t *cuArrC; cudaMalloc((void **)&cuArrC, MAXN/GPULOCAL*sizeof(uint32_t)); while (scanf("%u %u %u", &N, &keyA, &keyB) == 3) { int M = (N + BLOCKSZ-1) / BLOCKSZ; int LOCAL = GPULOCAL; M = (M + LOCAL) / LOCAL * LOCAL; dim3 cuBlock(LOCAL); dim3 cuGrid(M/LOCAL); vecdot<<<cuGrid, cuBlock>>>(keyA, keyB, cuArrC, N); CheckErr(cudaGetLastError()); cudaMemcpy(hostC, cuArrC, M*sizeof(uint32_t), cudaMemcpyDeviceToHost); uint32_t sum = 0; #ifdef _OPENMP omp_set_num_threads(4); #endif #pragma omp parallel for reduction(+: sum) for (int i = 0; i < M; i++) sum += hostC[i]; printf("%u\n", sum); } cudaFree(cuArrC); return 0; }
4db00aa062b2afbcd9ebbe538253ef9031b8e33a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <chrono> #include <cuhash/hash_table.h> #include <limits> #include <spconv/indice.cu.h> #include <spconv/indice.h> #include <tensorview/cuda_utils.h> #include <tensorview/mp_helper.h> #include <tensorview/tensor.h> #include <tensorview/tensorview.h> #include <tensorview/torch_utils.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <type_traits> #include <utility/timer.h> namespace spconv { using max_kernel_vol_t = tv::mp_list_c<int, 9, 16, 27, 32, 128, 256, 4096>; int create_conv_indice_pair_p1_cuda( torch::Tensor indicesIn, torch::Tensor indicePairs, torch::Tensor indiceNum, torch::Tensor indicePairUnique, std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> outSpatialShape, bool transpose) { auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto ndim = kernelSize.size(); auto numActIn = indicesIn.size(0); auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; tv::dispatch_torch<int32_t>(indicesIn.scalar_type(), [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; tv::SimpleVector<Index, NDim> ks(kernelSize.begin(), kernelSize.end()); tv::SimpleVector<Index, NDim> st(stride.begin(), stride.end()); tv::SimpleVector<Index, NDim> pa(padding.begin(), padding.end()); tv::SimpleVector<Index, NDim> di(dilation.begin(), dilation.end()); tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); tv::DispatchInt<max_kernel_vol_t>()( kernelVolume, std::less_equal<int>(), [&](auto I2) { constexpr int MaxKernelVolume = TV_DECLTYPE(I2)::value; if (transpose) { hipLaunchKernelGGL(( prepareDeConvIndicePairsKernel<Index, NDim, MaxKernelVolume>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), tv::torch2tv<Index>(indicePairUnique), ks, st, pa, di, ou); TV_CHECK_CUDA_ERR_V2("prepareDeConvIndicePairsKernel failed"); } else { hipLaunchKernelGGL(( prepareIndicePairsKernel<Index, NDim, MaxKernelVolume>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), tv::torch2tv<Index>(indicePairUnique), ks, st, pa, di, ou); TV_CHECK_CUDA_ERR_V2("prepareIndicePairsKernel failed"); } #ifdef TV_LOG_KERNEL_INFO hipFuncAttributes attr; checkCudaErrors(hipFuncGetAttributes( &attr, prepareDeConvIndicePairsKernel<Index, NDim, MaxKernelVolume>)); tv::ssprint("prepareIndicePairsKernel<", tv::type_s<Index>, NDim, MaxKernelVolume, ">", attr.numRegs); #endif }); }); }); return 1; } int create_conv_indice_pair_p2_cuda( torch::Tensor indicesIn, torch::Tensor indicesOut, torch::Tensor gridsOut, torch::Tensor indicePairs, torch::Tensor indiceNum, torch::Tensor indicePairUnique, std::vector<int64_t> outSpatialShape, bool transpose, bool resetGrid, bool useHash) { auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto ndim = outSpatialShape.size(); auto numActIn = indicesIn.size(0); int batchSize = gridsOut.size(0); int numAct = indicePairUnique.size(0) - 1; auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; bool failed = false; tv::dispatch_torch<int32_t>(indicesIn.scalar_type(), [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; using IndexGrid = int32_t; tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); if (useHash) { auto table = cuhash::HashTable(); // std::cout << "create " << numAct << " size table..." << std::endl; table.Initialize(numAct, 2.0, 4); unsigned *d_values = nullptr; hipMalloc((void **)&d_values, sizeof(unsigned) * numAct); TV_CHECK_CUDA_ERR_V2("hipMalloc failed"); hipLaunchKernelGGL(( arangeKernel<unsigned>) , dim3(tv::cuda::getBlocks(numAct)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, d_values, numAct); TV_CHECK_CUDA_ERR_V2("arangeKernel failed"); bool res = table.Build( numAct, reinterpret_cast<unsigned *>(indicePairUnique.data_ptr<Index>()), d_values); hipFree(d_values); TV_CHECK_CUDA_ERR_V2("hipFree failed"); if (!res) { failed = true; return; } hipLaunchKernelGGL(( assignIndiceOutKernel<Index, NDim>) , dim3(tv::cuda::getBlocks(numAct)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesOut), numAct, tv::torch2tv<Index>(indicePairUnique), ou, batchSize); TV_CHECK_CUDA_ERR_V2("assignIndiceOutKernel failed"); auto tableSize = table.get_table_size(); auto tableData = table.data(); auto constants = table.get_constants_4(); auto stash_constants = table.get_stash_constants(); auto stash_count = table.get_stash_count(); hipLaunchKernelGGL(( assignIndicePairsHashKernel<Index, NDim>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesOut), numActIn, tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indicePairUnique), tableSize, tableData, constants, stash_constants, stash_count); TV_CHECK_CUDA_ERR_V2("assignIndicePairsHashKernel failed"); } else { hipLaunchKernelGGL(( assignGridAndIndiceOutKernel<Index, IndexGrid, NDim>) , dim3(tv::cuda::getBlocks(numAct)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesOut), tv::torch2tv<IndexGrid>(gridsOut), numAct, tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indicePairUnique), ou, batchSize); TV_CHECK_CUDA_ERR_V2("assignGridAndIndiceOutKernel failed"); hipLaunchKernelGGL(( assignIndicePairsKernel<Index, IndexGrid, NDim>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesOut), tv::torch2tv<IndexGrid>(gridsOut), numActIn, tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indicePairUnique), ou); TV_CHECK_CUDA_ERR_V2("assignIndicePairsKernel failed"); #ifdef TV_LOG_KERNEL_INFO hipFuncAttributes attr; checkCudaErrors(hipFuncGetAttributes( &attr, assignGridAndIndiceOutKernel<Index, IndexGrid, NDim>)); tv::ssprint("assignGridAndIndiceOutKernel<", tv::type_s<Index>, NDim, ">", attr.numRegs); hipFuncAttributes attr2; checkCudaErrors(hipFuncGetAttributes( &attr2, assignIndicePairsKernel<Index, IndexGrid, NDim>)); tv::ssprint("assignIndicePairsKernel<", tv::type_s<Index>, NDim, ">", attr2.numRegs); #endif } if (resetGrid && (!useHash)) { hipLaunchKernelGGL(( resetGridKernel<Index, IndexGrid, NDim>) , dim3(tv::cuda::getBlocks(numAct)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, indicePairUnique.data_ptr<Index>(), tv::torch2tv<IndexGrid>(gridsOut), numAct); TV_CHECK_CUDA_ERR_V2("resetGridKernel failed"); } }); }); if (failed){ return -1; } return numAct; } template <typename T> struct is_valid { __device__ __forceinline__ bool operator()(const T x) { return x != -1; } }; int create_submconv_indice_pair_cuda( torch::Tensor indicesIn, torch::Tensor gridsOut, torch::Tensor indicePairs, torch::Tensor indiceNum, std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> outSpatialShape, bool transpose, bool resetGrid, bool useHash) { auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto ndim = outSpatialShape.size(); auto numActIn = indicesIn.size(0); int batchSize = gridsOut.size(0); auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; bool failed = false; tv::dispatch_torch<int32_t>(indicesIn.scalar_type(), [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; tv::SimpleVector<Index, NDim> ks(kernelSize.begin(), kernelSize.end()); tv::SimpleVector<Index, NDim> st(stride.begin(), stride.end()); tv::SimpleVector<Index, NDim> pa(padding.begin(), padding.end()); tv::SimpleVector<Index, NDim> di(dilation.begin(), dilation.end()); tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); Index spatialVolume = 1; for (int i = 0; i < NDim; ++i) { spatialVolume *= outSpatialShape[i]; } if (useHash) { auto table = cuhash::HashTable(); // std::cout << "create " << numAct << " size table..." << std::endl; table.Initialize(numActIn, 2.0, 4); unsigned *d_keyvalues = nullptr; hipMalloc((void **)&d_keyvalues, sizeof(unsigned) * numActIn * 2); unsigned *d_values = d_keyvalues + numActIn; TV_CHECK_CUDA_ERR_V2("hipMalloc failed"); hipLaunchKernelGGL(( prepareSubMHashKernel<Index, NDim>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesIn), d_keyvalues, d_values, ou); TV_CHECK_CUDA_ERR_V2("prepareSubMHashKernel failed"); bool res = table.Build(numActIn, reinterpret_cast<unsigned *>(d_keyvalues), reinterpret_cast<unsigned *>(d_values)); hipFree(d_keyvalues); TV_CHECK_CUDA_ERR_V2("hipFree failed"); if (!res) { failed = true; return; } auto tableSize = table.get_table_size(); auto tableData = table.data(); auto constants = table.get_constants_4(); auto stash_constants = table.get_stash_constants(); auto stash_count = table.get_stash_count(); tv::DispatchInt<max_kernel_vol_t>()( kernelVolume, std::less_equal<int>(), [&](auto I2) { constexpr int MaxKernelVolume = TV_DECLTYPE(I2)::value; hipLaunchKernelGGL(( getSubMIndicePairsHashKernel<Index, NDim, MaxKernelVolume>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ks, st, pa, di, ou, tableSize, tableData, constants, stash_constants, stash_count); TV_CHECK_CUDA_ERR_V2("getSubMIndicePairsHashKernel failed"); }); } else { // auto timer = spconv::CudaContextTimer<>(); hipLaunchKernelGGL(( prepareSubMGridKernel<Index, IndexGrid, NDim>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), ou, spatialVolume); // tv::ssprint("prepareSubMGridKernel", timer.report() / 1000.0); TV_CHECK_CUDA_ERR_V2("prepareSubMGridKernel failed"); // when dilation all one, we use a simple kernel to calc result bool dilation_one = true; for (int i = 0; i < NDim; ++i) { dilation_one &= di[i] == 1; } auto found = false; if (dilation_one && (NDim == 2 || NDim == 3)) { auto indiceNumCpu = indiceNum.cpu(); if (NDim == 2) { tv::SimpleVector<Index, 2> ou_(outSpatialShape.begin(), outSpatialShape.end()); tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[0], [&](auto K0C) { tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[1], [&](auto K1C) { constexpr int K0 = TV_DECLTYPE(K0C)::value; constexpr int K1 = TV_DECLTYPE(K1C)::value; found = true; hipLaunchKernelGGL(( getSubMIndicePairsKernel2<Index, IndexGrid, K0, K1>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ou_, spatialVolume); }); }); } else if (NDim == 3) { tv::SimpleVector<Index, 3> ou_(outSpatialShape.begin(), outSpatialShape.end()); tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[0], [&](auto K0C) { tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[1], [&](auto K1C) { tv::dispatch_int_noexcept<1, 3, 5>( kernelSize[2], [&](auto K2C) { constexpr int K0 = TV_DECLTYPE(K0C)::value; constexpr int K1 = TV_DECLTYPE(K1C)::value; constexpr int K2 = TV_DECLTYPE(K2C)::value; found = true; hipLaunchKernelGGL(( getSubMIndicePairsKernel3<Index, IndexGrid, K0, K1, K2>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ou_, spatialVolume); }); }); }); } } if (!found) { tv::DispatchInt< max_kernel_vol_t>()(ndim, std::less_equal<int>(), [&](auto I2) { constexpr int MaxKernelVolume = TV_DECLTYPE(I2)::value; hipLaunchKernelGGL(( getSubMIndicePairsKernel<Index, IndexGrid, NDim, MaxKernelVolume>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ks, st, pa, di, ou); TV_CHECK_CUDA_ERR_V2("getSubMIndicePairsKernel failed"); }); } // tv::ssprint("getSubMIndicePairsKernel", timer.report() / 1000.0); } if (resetGrid && (!useHash)) { hipLaunchKernelGGL(( resetGridSubMKernel<Index, IndexGrid, NDim>) , dim3(tv::cuda::getBlocks(numActIn)), dim3(tv::cuda::CUDA_NUM_THREADS), 0, stream, indicesIn.data_ptr<Index>(), tv::torch2tv<IndexGrid>(gridsOut), ou, numActIn); TV_CHECK_CUDA_ERR_V2("resetGridKernel failed"); } }); }); if (failed){ return -1; } return numActIn; } } // namespace spconv
4db00aa062b2afbcd9ebbe538253ef9031b8e33a.cu
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <chrono> #include <cuhash/hash_table.h> #include <limits> #include <spconv/indice.cu.h> #include <spconv/indice.h> #include <tensorview/cuda_utils.h> #include <tensorview/mp_helper.h> #include <tensorview/tensor.h> #include <tensorview/tensorview.h> #include <tensorview/torch_utils.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <type_traits> #include <utility/timer.h> namespace spconv { using max_kernel_vol_t = tv::mp_list_c<int, 9, 16, 27, 32, 128, 256, 4096>; int create_conv_indice_pair_p1_cuda( torch::Tensor indicesIn, torch::Tensor indicePairs, torch::Tensor indiceNum, torch::Tensor indicePairUnique, std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> outSpatialShape, bool transpose) { auto stream = at::cuda::getCurrentCUDAStream(); auto ndim = kernelSize.size(); auto numActIn = indicesIn.size(0); auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; tv::dispatch_torch<int32_t>(indicesIn.scalar_type(), [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; tv::SimpleVector<Index, NDim> ks(kernelSize.begin(), kernelSize.end()); tv::SimpleVector<Index, NDim> st(stride.begin(), stride.end()); tv::SimpleVector<Index, NDim> pa(padding.begin(), padding.end()); tv::SimpleVector<Index, NDim> di(dilation.begin(), dilation.end()); tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); tv::DispatchInt<max_kernel_vol_t>()( kernelVolume, std::less_equal<int>(), [&](auto I2) { constexpr int MaxKernelVolume = TV_DECLTYPE(I2)::value; if (transpose) { prepareDeConvIndicePairsKernel<Index, NDim, MaxKernelVolume> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), tv::torch2tv<Index>(indicePairUnique), ks, st, pa, di, ou); TV_CHECK_CUDA_ERR_V2("prepareDeConvIndicePairsKernel failed"); } else { prepareIndicePairsKernel<Index, NDim, MaxKernelVolume> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), tv::torch2tv<Index>(indicePairUnique), ks, st, pa, di, ou); TV_CHECK_CUDA_ERR_V2("prepareIndicePairsKernel failed"); } #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, prepareDeConvIndicePairsKernel<Index, NDim, MaxKernelVolume>)); tv::ssprint("prepareIndicePairsKernel<", tv::type_s<Index>, NDim, MaxKernelVolume, ">", attr.numRegs); #endif }); }); }); return 1; } int create_conv_indice_pair_p2_cuda( torch::Tensor indicesIn, torch::Tensor indicesOut, torch::Tensor gridsOut, torch::Tensor indicePairs, torch::Tensor indiceNum, torch::Tensor indicePairUnique, std::vector<int64_t> outSpatialShape, bool transpose, bool resetGrid, bool useHash) { auto stream = at::cuda::getCurrentCUDAStream(); auto ndim = outSpatialShape.size(); auto numActIn = indicesIn.size(0); int batchSize = gridsOut.size(0); int numAct = indicePairUnique.size(0) - 1; auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; bool failed = false; tv::dispatch_torch<int32_t>(indicesIn.scalar_type(), [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; using IndexGrid = int32_t; tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); if (useHash) { auto table = cuhash::HashTable(); // std::cout << "create " << numAct << " size table..." << std::endl; table.Initialize(numAct, 2.0, 4); unsigned *d_values = nullptr; cudaMalloc((void **)&d_values, sizeof(unsigned) * numAct); TV_CHECK_CUDA_ERR_V2("cudaMalloc failed"); arangeKernel<unsigned> <<<tv::cuda::getBlocks(numAct), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(d_values, numAct); TV_CHECK_CUDA_ERR_V2("arangeKernel failed"); bool res = table.Build( numAct, reinterpret_cast<unsigned *>(indicePairUnique.data_ptr<Index>()), d_values); cudaFree(d_values); TV_CHECK_CUDA_ERR_V2("cudaFree failed"); if (!res) { failed = true; return; } assignIndiceOutKernel<Index, NDim> <<<tv::cuda::getBlocks(numAct), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesOut), numAct, tv::torch2tv<Index>(indicePairUnique), ou, batchSize); TV_CHECK_CUDA_ERR_V2("assignIndiceOutKernel failed"); auto tableSize = table.get_table_size(); auto tableData = table.data(); auto constants = table.get_constants_4(); auto stash_constants = table.get_stash_constants(); auto stash_count = table.get_stash_count(); assignIndicePairsHashKernel<Index, NDim> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesOut), numActIn, tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indicePairUnique), tableSize, tableData, constants, stash_constants, stash_count); TV_CHECK_CUDA_ERR_V2("assignIndicePairsHashKernel failed"); } else { assignGridAndIndiceOutKernel<Index, IndexGrid, NDim> <<<tv::cuda::getBlocks(numAct), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesOut), tv::torch2tv<IndexGrid>(gridsOut), numAct, tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indicePairUnique), ou, batchSize); TV_CHECK_CUDA_ERR_V2("assignGridAndIndiceOutKernel failed"); assignIndicePairsKernel<Index, IndexGrid, NDim> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesOut), tv::torch2tv<IndexGrid>(gridsOut), numActIn, tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indicePairUnique), ou); TV_CHECK_CUDA_ERR_V2("assignIndicePairsKernel failed"); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, assignGridAndIndiceOutKernel<Index, IndexGrid, NDim>)); tv::ssprint("assignGridAndIndiceOutKernel<", tv::type_s<Index>, NDim, ">", attr.numRegs); cudaFuncAttributes attr2; checkCudaErrors(cudaFuncGetAttributes( &attr2, assignIndicePairsKernel<Index, IndexGrid, NDim>)); tv::ssprint("assignIndicePairsKernel<", tv::type_s<Index>, NDim, ">", attr2.numRegs); #endif } if (resetGrid && (!useHash)) { resetGridKernel<Index, IndexGrid, NDim> <<<tv::cuda::getBlocks(numAct), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(indicePairUnique.data_ptr<Index>(), tv::torch2tv<IndexGrid>(gridsOut), numAct); TV_CHECK_CUDA_ERR_V2("resetGridKernel failed"); } }); }); if (failed){ return -1; } return numAct; } template <typename T> struct is_valid { __device__ __forceinline__ bool operator()(const T x) { return x != -1; } }; int create_submconv_indice_pair_cuda( torch::Tensor indicesIn, torch::Tensor gridsOut, torch::Tensor indicePairs, torch::Tensor indiceNum, std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> outSpatialShape, bool transpose, bool resetGrid, bool useHash) { auto stream = at::cuda::getCurrentCUDAStream(); auto ndim = outSpatialShape.size(); auto numActIn = indicesIn.size(0); int batchSize = gridsOut.size(0); auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; bool failed = false; tv::dispatch_torch<int32_t>(indicesIn.scalar_type(), [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; tv::SimpleVector<Index, NDim> ks(kernelSize.begin(), kernelSize.end()); tv::SimpleVector<Index, NDim> st(stride.begin(), stride.end()); tv::SimpleVector<Index, NDim> pa(padding.begin(), padding.end()); tv::SimpleVector<Index, NDim> di(dilation.begin(), dilation.end()); tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); Index spatialVolume = 1; for (int i = 0; i < NDim; ++i) { spatialVolume *= outSpatialShape[i]; } if (useHash) { auto table = cuhash::HashTable(); // std::cout << "create " << numAct << " size table..." << std::endl; table.Initialize(numActIn, 2.0, 4); unsigned *d_keyvalues = nullptr; cudaMalloc((void **)&d_keyvalues, sizeof(unsigned) * numActIn * 2); unsigned *d_values = d_keyvalues + numActIn; TV_CHECK_CUDA_ERR_V2("cudaMalloc failed"); prepareSubMHashKernel<Index, NDim> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), d_keyvalues, d_values, ou); TV_CHECK_CUDA_ERR_V2("prepareSubMHashKernel failed"); bool res = table.Build(numActIn, reinterpret_cast<unsigned *>(d_keyvalues), reinterpret_cast<unsigned *>(d_values)); cudaFree(d_keyvalues); TV_CHECK_CUDA_ERR_V2("cudaFree failed"); if (!res) { failed = true; return; } auto tableSize = table.get_table_size(); auto tableData = table.data(); auto constants = table.get_constants_4(); auto stash_constants = table.get_stash_constants(); auto stash_count = table.get_stash_count(); tv::DispatchInt<max_kernel_vol_t>()( kernelVolume, std::less_equal<int>(), [&](auto I2) { constexpr int MaxKernelVolume = TV_DECLTYPE(I2)::value; getSubMIndicePairsHashKernel<Index, NDim, MaxKernelVolume> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ks, st, pa, di, ou, tableSize, tableData, constants, stash_constants, stash_count); TV_CHECK_CUDA_ERR_V2("getSubMIndicePairsHashKernel failed"); }); } else { // auto timer = spconv::CudaContextTimer<>(); prepareSubMGridKernel<Index, IndexGrid, NDim> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), ou, spatialVolume); // tv::ssprint("prepareSubMGridKernel", timer.report() / 1000.0); TV_CHECK_CUDA_ERR_V2("prepareSubMGridKernel failed"); // when dilation all one, we use a simple kernel to calc result bool dilation_one = true; for (int i = 0; i < NDim; ++i) { dilation_one &= di[i] == 1; } auto found = false; if (dilation_one && (NDim == 2 || NDim == 3)) { auto indiceNumCpu = indiceNum.cpu(); if (NDim == 2) { tv::SimpleVector<Index, 2> ou_(outSpatialShape.begin(), outSpatialShape.end()); tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[0], [&](auto K0C) { tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[1], [&](auto K1C) { constexpr int K0 = TV_DECLTYPE(K0C)::value; constexpr int K1 = TV_DECLTYPE(K1C)::value; found = true; getSubMIndicePairsKernel2<Index, IndexGrid, K0, K1> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>( tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ou_, spatialVolume); }); }); } else if (NDim == 3) { tv::SimpleVector<Index, 3> ou_(outSpatialShape.begin(), outSpatialShape.end()); tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[0], [&](auto K0C) { tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[1], [&](auto K1C) { tv::dispatch_int_noexcept<1, 3, 5>( kernelSize[2], [&](auto K2C) { constexpr int K0 = TV_DECLTYPE(K0C)::value; constexpr int K1 = TV_DECLTYPE(K1C)::value; constexpr int K2 = TV_DECLTYPE(K2C)::value; found = true; getSubMIndicePairsKernel3<Index, IndexGrid, K0, K1, K2> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>( tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ou_, spatialVolume); }); }); }); } } if (!found) { tv::DispatchInt< max_kernel_vol_t>()(ndim, std::less_equal<int>(), [&](auto I2) { constexpr int MaxKernelVolume = TV_DECLTYPE(I2)::value; getSubMIndicePairsKernel<Index, IndexGrid, NDim, MaxKernelVolume> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ks, st, pa, di, ou); TV_CHECK_CUDA_ERR_V2("getSubMIndicePairsKernel failed"); }); } // tv::ssprint("getSubMIndicePairsKernel", timer.report() / 1000.0); } if (resetGrid && (!useHash)) { resetGridSubMKernel<Index, IndexGrid, NDim> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(indicesIn.data_ptr<Index>(), tv::torch2tv<IndexGrid>(gridsOut), ou, numActIn); TV_CHECK_CUDA_ERR_V2("resetGridKernel failed"); } }); }); if (failed){ return -1; } return numActIn; } } // namespace spconv
9be2cc4a0804e3448811a056d2a3ccbc11f62714.hip
// !!! This is a file automatically generated by hipify!!! #include "CudaSolver.cuh" #include "utils.h" #include <thrust/device_vector.h> #include <thrust/copy.h> #include <cstdlib> #include <cmath> #include <hip/hip_runtime.h> #ifndef __NVCC__ #define __constant__ #define __device__ #define __global__ #define __host__ typedef unsigned long size_t; enum hipMemcpyKind { hipMemcpyHostToHost, hipMemcpyHostToDevice, hipMemcpyDeviceToHost, hipMemcpyDeviceToDevice }; struct dim3 { int x, y, z; dim3(int x = 1, int y = 1, int z = 1); } blockIdx, blockDim, threadIdx; struct hipError_t { bool operator!=(hipError_t &other); } hipSuccess; template<typename T> hipError_t hipMemcpyToSymbol(T &symbol, const void *src, size_t count); hipError_t hipMemcpy(void *dst, const void *src, size_t count, enum hipMemcpyKind kind); hipError_t hipMemset(void *dst, int value, size_t count); char *hipGetErrorString(hipError_t err); int printf(const char *format, ...); hipError_t hipFree(void *p); hipError_t hipMalloc(void **dst, size_t size); hipError_t hipGetLastError(); hipError_t hipDeviceSynchronize(); typedef int hipStream_t; hipError_t *hipConfigureCall(dim3 gridDim, dim3 blockDim, size_t sharedMem = 0, hipStream_t stream = 0); #endif #define SAFE_CALL(Call) { \ hipError_t cuerr = Call; \ if(cuerr != hipSuccess) { \ printf("CUDA error: %s at call \"" #Call "\"\n", hipGetErrorString(cuerr)); \ throw "error in CUDA API function, aborting..."; \ } \ } #define SAFE_KERNEL_CALL(KernelCall) { \ KernelCall; \ hipError_t cuerr = hipGetLastError(); \ if(cuerr != hipSuccess) { \ printf("CUDA error in kernel launch: %s at kernel \"" #KernelCall "\"\n", hipGetErrorString(cuerr)); \ throw "error in CUDA kernel launch, aborting..."; \ } \ cuerr = hipDeviceSynchronize(); \ if(cuerr != hipSuccess) { \ printf("CUDA error in kernel execution: %s at kernel \"" #KernelCall "\"\n", hipGetErrorString(cuerr)); \ throw "error in CUDA kernel execution, aborting..."; \ } \ } __constant__ int d_shapeYZ; __constant__ int d_shapeZ; __constant__ double d_tau; __constant__ double d_h_x; __constant__ double d_h_y; __constant__ double d_h_z; __constant__ double d_L_x; __constant__ double d_L_y; __constant__ double d_L_z; __constant__ double d_a_t; __device__ double laplacian(double *g, int index) { double center = g[index]; return (g[index - d_shapeYZ] - 2.0 * center + g[index + d_shapeYZ]) / (d_h_x * d_h_x) + (g[index - d_shapeZ] - 2.0 * center + g[index + d_shapeZ]) / (d_h_y * d_h_y) + (g[index - 1] - 2.0 * center + g[index + 1]) / (d_h_z * d_h_z); } __device__ int flat_index(int i, int j, int k) { return i * d_shapeYZ + j * d_shapeZ + k; } __global__ void cuda_step(double *grid, double *previous_1, double *previous_2) { int i = blockIdx.z * blockDim.z + threadIdx.z + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; int k = blockIdx.x * blockDim.x + threadIdx.x + 1; int index = flat_index(i, j, k); grid[index] = 2.0 * previous_1[index] - previous_2[index] + d_tau * d_tau * laplacian(previous_1, index); } __global__ void cuda_c1(double *left, double *right, double *result) { int i = blockIdx.z * blockDim.z + threadIdx.z + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; int k = blockIdx.x * blockDim.x + threadIdx.x + 1; int index = flat_index(i, j, k); result[index] = abs(left[index] - right[index]); } __global__ void cuda_squared_error(double *left, double *right, double *result) { int i = blockIdx.z * blockDim.z + threadIdx.z + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; int k = blockIdx.x * blockDim.x + threadIdx.x + 1; int index = flat_index(i, j, k); double error = left[index] - right[index]; result[index] = error * error; } __device__ double cuda_u(double t, double x, double y, double z) { return sin(2 * M_PI * x / d_L_x) * sin(M_PI * y / d_L_y) * sin(2 * M_PI * z / d_L_z) * cos(d_a_t * t); } __device__ double cuda_phi(double x, double y, double z) { return sin(2 * M_PI * x / d_L_x) * sin(M_PI * y / d_L_y) * sin(2 * M_PI * z / d_L_z); } __global__ void cuda_fillByGt(double *grid, int n, int start_i, int start_j, int start_k) { int i = blockIdx.z * blockDim.z + threadIdx.z; int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; int index = flat_index(i, j, k); grid[index] = cuda_u( d_tau * n, d_h_x * (start_i + i), d_h_y * (start_j + j), d_h_z * (start_k + k) ); } __global__ void cuda_init0(double *grid, int start_i, int start_j, int start_k) { int i = blockIdx.z * blockDim.z + threadIdx.z + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; int k = blockIdx.x * blockDim.x + threadIdx.x + 1; int index = flat_index(i, j, k); grid[index] = cuda_phi( d_h_x * (start_i + i), d_h_y * (start_j + j), d_h_z * (start_k + k) ); } __global__ void cuda_init1(double *grid, double *previous) { int i = blockIdx.z * blockDim.z + threadIdx.z + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; int k = blockIdx.x * blockDim.x + threadIdx.x + 1; int index = flat_index(i, j, k); grid[index] = previous[index] + 0.5 * d_tau * d_tau * laplacian(previous, index); } __global__ void cuda_get_slice(int c0, int c1, int c2, double *slice, double *grid) { int grid_idx = c0 + blockIdx.x * c1 + threadIdx.x * c2; int slice_idx = blockIdx.x * blockDim.x + threadIdx.x; slice[slice_idx] = grid[grid_idx]; } __global__ void cuda_set_slice(int c0, int c1, int c2, double *slice, double *grid) { int grid_idx = c0 + blockIdx.x * c1 + threadIdx.x * c2; int slice_idx = blockIdx.x * blockDim.x + threadIdx.x; grid[grid_idx] = slice[slice_idx]; } dim3 blockSizeFull; dim3 blockSizeInner; dim3 gridSizeFull; dim3 gridSizeInner; CudaSolver::CudaSolver(double T, double L_x, double L_y, double L_z, int N, int K, U u, Phi phi, int shapeX, int shapeY, int shapeZ) : MathSolver(T, L_x, L_y, L_z, N, K, u, phi), sizeInBytes(sizeof(double) * shapeX * shapeY * shapeZ), flatSize(shapeX * shapeY * shapeZ), grid3D(shapeX, shapeY, shapeZ) { gridSizeFull = dim3(1, shapeY, shapeX); blockSizeFull = dim3(shapeZ); gridSizeInner = dim3(1, shapeY - 2, shapeX - 2); blockSizeInner = dim3(shapeZ - 2); hipMemcpyToSymbol(d_h_x, &h_x, sizeof(double)); hipMemcpyToSymbol(d_h_y, &h_y, sizeof(double)); hipMemcpyToSymbol(d_h_z, &h_z, sizeof(double)); hipMemcpyToSymbol(d_tau, &tau, sizeof(double)); hipMemcpyToSymbol(d_L_x, &u.L_x, sizeof(double)); hipMemcpyToSymbol(d_L_y, &u.L_y, sizeof(double)); hipMemcpyToSymbol(d_L_z, &u.L_z, sizeof(double)); hipMemcpyToSymbol(d_a_t, &u.a_t, sizeof(double)); int shapeYZ = shapeY * shapeZ; hipMemcpyToSymbol(d_shapeYZ, &shapeYZ, sizeof(int)); hipMemcpyToSymbol(d_shapeZ, &shapeZ, sizeof(int)); d_grids.resize(N_GRIDS); for (int i = 0; i < N_GRIDS; ++i) { SAFE_CALL(hipMalloc((void **) &d_grids[i], sizeInBytes)); } SAFE_CALL(hipMalloc((void **) &d_groundTruth, sizeInBytes)); SAFE_CALL(hipMalloc((void **) &d_errorC1, sizeInBytes)); SAFE_CALL(hipMalloc((void **) &d_errorMSE, sizeInBytes)); SAFE_CALL(hipMemset(d_errorC1, 0, sizeInBytes)); SAFE_CALL(hipMemset(d_errorMSE, 0, sizeInBytes)); int maxSliceSize = max(grid3D.getSliceSize(0), max(grid3D.getSliceSize(1), grid3D.getSliceSize(2))); h_slice.resize(maxSliceSize); SAFE_CALL(hipMalloc((void **) &d_slice, sizeof(double) * maxSliceSize)); } CudaSolver::~CudaSolver() { for (int i = 0; i < N_GRIDS; ++i) { SAFE_CALL(hipFree(d_grids[i])); } SAFE_CALL(hipFree(d_groundTruth)); SAFE_CALL(hipFree(d_errorC1)); SAFE_CALL(hipFree(d_errorMSE)); SAFE_CALL(hipFree(d_slice)); } void CudaSolver::init_0(int start_i, int start_j, int start_k) { hipLaunchKernelGGL(( SAFE_KERNEL_CALL((cuda_init0), dim3(gridSizeInner), dim3(blockSizeInner), 0, 0, getCurrentState(0), start_i, start_j, start_k ))); } void CudaSolver::init_1() { double *d_grid = getCurrentState(1); double *d_previous = getCurrentState(0); hipLaunchKernelGGL(( SAFE_KERNEL_CALL((cuda_init1), dim3(gridSizeInner), dim3(blockSizeInner), 0, 0, d_grid, d_previous))); } void CudaSolver::makeStepForInnerNodes(int n) { double *d_grid = getCurrentState(n); double *d_previous_1 = getCurrentState(n - 1); double *d_previous_2 = getCurrentState(n - 2); hipLaunchKernelGGL(( SAFE_KERNEL_CALL((cuda_step), dim3(gridSizeInner), dim3(blockSizeInner), 0, 0, d_grid, d_previous_1, d_previous_2))); } void CudaSolver::updateGroundTruth(int n, int start_i, int start_j, int start_k) { hipLaunchKernelGGL(( SAFE_KERNEL_CALL((cuda_fillByGt), dim3(gridSizeFull), dim3(blockSizeFull), 0, 0, d_groundTruth, n, start_i, start_j, start_k ))); } double CudaSolver::maxAbsoluteErrorInner(int n) { hipLaunchKernelGGL(( SAFE_KERNEL_CALL((cuda_c1), dim3(gridSizeInner), dim3(blockSizeInner), 0, 0, getCurrentState(n), d_groundTruth, d_errorC1 ))); return thrust::reduce(thrust::device, d_errorC1, d_errorC1 + flatSize, 0.0, thrust::maximum<double>()); } double CudaSolver::sumSquaredErrorInner(int n) { hipLaunchKernelGGL(( SAFE_KERNEL_CALL((cuda_squared_error), dim3(gridSizeInner), dim3(blockSizeInner), 0, 0, getCurrentState(n), d_groundTruth, d_errorMSE ))); return thrust::reduce(thrust::device, d_errorMSE, d_errorMSE + flatSize, 0.0, thrust::plus<double>()); } int CudaSolver::getSliceSize(int axis) { return grid3D.getSliceSize(axis); } std::vector<double> CudaSolver::getSlice(int n, int index, int axis) { int c0, c1, c2, gridSize, blockSize; grid3D.getSliceParams(axis, c0, c1, c2, gridSize, blockSize); hipLaunchKernelGGL(( SAFE_KERNEL_CALL((cuda_get_slice), dim3(gridSize), dim3(blockSize), 0, 0, c0 * index, c1, c2, d_slice, getCurrentState(n) ))); SAFE_CALL(hipMemcpy(h_slice.data(), d_slice, getSliceSize(axis) * sizeof(double), hipMemcpyDeviceToHost)); return std::vector<double>(h_slice.begin(), h_slice.begin() + getSliceSize(axis)); } void CudaSolver::setSlice(int n, int index, int axis, std::vector<double> &slice) { int c0, c1, c2, gridSize, blockSize; grid3D.getSliceParams(axis, c0, c1, c2, gridSize, blockSize); SAFE_CALL(hipMemcpy(d_slice, slice.data(), getSliceSize(axis) * sizeof(double), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( SAFE_KERNEL_CALL((cuda_set_slice), dim3(gridSize), dim3(blockSize), 0, 0, c0 * index, c1, c2, d_slice, getCurrentState(n) ))); } void CudaSolver::setZeros(int n, int index, int axis) { int c0, c1, c2, gridSize, blockSize; grid3D.getSliceParams(axis, c0, c1, c2, gridSize, blockSize); SAFE_CALL(hipMemset(d_slice, 0, getSliceSize(axis) * sizeof(double))); hipLaunchKernelGGL(( SAFE_KERNEL_CALL((cuda_set_slice), dim3(gridSize), dim3(blockSize), 0, 0, c0 * index, c1, c2, d_slice, getCurrentState(n) ))); } double *CudaSolver::getCurrentState(int n) { return d_grids[n % N_GRIDS]; } double CudaSolver::maxGroundTruth() { return thrust::reduce(thrust::device, d_groundTruth, d_groundTruth + flatSize, 0.0, thrust::maximum<double>()); }
9be2cc4a0804e3448811a056d2a3ccbc11f62714.cu
#include "CudaSolver.cuh" #include "utils.h" #include <thrust/device_vector.h> #include <thrust/copy.h> #include <cstdlib> #include <cmath> #include <cuda.h> #ifndef __NVCC__ #define __constant__ #define __device__ #define __global__ #define __host__ typedef unsigned long size_t; enum cudaMemcpyKind { cudaMemcpyHostToHost, cudaMemcpyHostToDevice, cudaMemcpyDeviceToHost, cudaMemcpyDeviceToDevice }; struct dim3 { int x, y, z; dim3(int x = 1, int y = 1, int z = 1); } blockIdx, blockDim, threadIdx; struct cudaError_t { bool operator!=(cudaError_t &other); } cudaSuccess; template<typename T> cudaError_t cudaMemcpyToSymbol(T &symbol, const void *src, size_t count); cudaError_t cudaMemcpy(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind); cudaError_t cudaMemset(void *dst, int value, size_t count); char *cudaGetErrorString(cudaError_t err); int printf(const char *format, ...); cudaError_t cudaFree(void *p); cudaError_t cudaMalloc(void **dst, size_t size); cudaError_t cudaGetLastError(); cudaError_t cudaDeviceSynchronize(); typedef int cudaStream_t; cudaError_t *cudaConfigureCall(dim3 gridDim, dim3 blockDim, size_t sharedMem = 0, cudaStream_t stream = 0); #endif #define SAFE_CALL(Call) { \ cudaError_t cuerr = Call; \ if(cuerr != cudaSuccess) { \ printf("CUDA error: %s at call \"" #Call "\"\n", cudaGetErrorString(cuerr)); \ throw "error in CUDA API function, aborting..."; \ } \ } #define SAFE_KERNEL_CALL(KernelCall) { \ KernelCall; \ cudaError_t cuerr = cudaGetLastError(); \ if(cuerr != cudaSuccess) { \ printf("CUDA error in kernel launch: %s at kernel \"" #KernelCall "\"\n", cudaGetErrorString(cuerr)); \ throw "error in CUDA kernel launch, aborting..."; \ } \ cuerr = cudaDeviceSynchronize(); \ if(cuerr != cudaSuccess) { \ printf("CUDA error in kernel execution: %s at kernel \"" #KernelCall "\"\n", cudaGetErrorString(cuerr)); \ throw "error in CUDA kernel execution, aborting..."; \ } \ } __constant__ int d_shapeYZ; __constant__ int d_shapeZ; __constant__ double d_tau; __constant__ double d_h_x; __constant__ double d_h_y; __constant__ double d_h_z; __constant__ double d_L_x; __constant__ double d_L_y; __constant__ double d_L_z; __constant__ double d_a_t; __device__ double laplacian(double *g, int index) { double center = g[index]; return (g[index - d_shapeYZ] - 2.0 * center + g[index + d_shapeYZ]) / (d_h_x * d_h_x) + (g[index - d_shapeZ] - 2.0 * center + g[index + d_shapeZ]) / (d_h_y * d_h_y) + (g[index - 1] - 2.0 * center + g[index + 1]) / (d_h_z * d_h_z); } __device__ int flat_index(int i, int j, int k) { return i * d_shapeYZ + j * d_shapeZ + k; } __global__ void cuda_step(double *grid, double *previous_1, double *previous_2) { int i = blockIdx.z * blockDim.z + threadIdx.z + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; int k = blockIdx.x * blockDim.x + threadIdx.x + 1; int index = flat_index(i, j, k); grid[index] = 2.0 * previous_1[index] - previous_2[index] + d_tau * d_tau * laplacian(previous_1, index); } __global__ void cuda_c1(double *left, double *right, double *result) { int i = blockIdx.z * blockDim.z + threadIdx.z + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; int k = blockIdx.x * blockDim.x + threadIdx.x + 1; int index = flat_index(i, j, k); result[index] = abs(left[index] - right[index]); } __global__ void cuda_squared_error(double *left, double *right, double *result) { int i = blockIdx.z * blockDim.z + threadIdx.z + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; int k = blockIdx.x * blockDim.x + threadIdx.x + 1; int index = flat_index(i, j, k); double error = left[index] - right[index]; result[index] = error * error; } __device__ double cuda_u(double t, double x, double y, double z) { return sin(2 * M_PI * x / d_L_x) * sin(M_PI * y / d_L_y) * sin(2 * M_PI * z / d_L_z) * cos(d_a_t * t); } __device__ double cuda_phi(double x, double y, double z) { return sin(2 * M_PI * x / d_L_x) * sin(M_PI * y / d_L_y) * sin(2 * M_PI * z / d_L_z); } __global__ void cuda_fillByGt(double *grid, int n, int start_i, int start_j, int start_k) { int i = blockIdx.z * blockDim.z + threadIdx.z; int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; int index = flat_index(i, j, k); grid[index] = cuda_u( d_tau * n, d_h_x * (start_i + i), d_h_y * (start_j + j), d_h_z * (start_k + k) ); } __global__ void cuda_init0(double *grid, int start_i, int start_j, int start_k) { int i = blockIdx.z * blockDim.z + threadIdx.z + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; int k = blockIdx.x * blockDim.x + threadIdx.x + 1; int index = flat_index(i, j, k); grid[index] = cuda_phi( d_h_x * (start_i + i), d_h_y * (start_j + j), d_h_z * (start_k + k) ); } __global__ void cuda_init1(double *grid, double *previous) { int i = blockIdx.z * blockDim.z + threadIdx.z + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; int k = blockIdx.x * blockDim.x + threadIdx.x + 1; int index = flat_index(i, j, k); grid[index] = previous[index] + 0.5 * d_tau * d_tau * laplacian(previous, index); } __global__ void cuda_get_slice(int c0, int c1, int c2, double *slice, double *grid) { int grid_idx = c0 + blockIdx.x * c1 + threadIdx.x * c2; int slice_idx = blockIdx.x * blockDim.x + threadIdx.x; slice[slice_idx] = grid[grid_idx]; } __global__ void cuda_set_slice(int c0, int c1, int c2, double *slice, double *grid) { int grid_idx = c0 + blockIdx.x * c1 + threadIdx.x * c2; int slice_idx = blockIdx.x * blockDim.x + threadIdx.x; grid[grid_idx] = slice[slice_idx]; } dim3 blockSizeFull; dim3 blockSizeInner; dim3 gridSizeFull; dim3 gridSizeInner; CudaSolver::CudaSolver(double T, double L_x, double L_y, double L_z, int N, int K, U u, Phi phi, int shapeX, int shapeY, int shapeZ) : MathSolver(T, L_x, L_y, L_z, N, K, u, phi), sizeInBytes(sizeof(double) * shapeX * shapeY * shapeZ), flatSize(shapeX * shapeY * shapeZ), grid3D(shapeX, shapeY, shapeZ) { gridSizeFull = dim3(1, shapeY, shapeX); blockSizeFull = dim3(shapeZ); gridSizeInner = dim3(1, shapeY - 2, shapeX - 2); blockSizeInner = dim3(shapeZ - 2); cudaMemcpyToSymbol(d_h_x, &h_x, sizeof(double)); cudaMemcpyToSymbol(d_h_y, &h_y, sizeof(double)); cudaMemcpyToSymbol(d_h_z, &h_z, sizeof(double)); cudaMemcpyToSymbol(d_tau, &tau, sizeof(double)); cudaMemcpyToSymbol(d_L_x, &u.L_x, sizeof(double)); cudaMemcpyToSymbol(d_L_y, &u.L_y, sizeof(double)); cudaMemcpyToSymbol(d_L_z, &u.L_z, sizeof(double)); cudaMemcpyToSymbol(d_a_t, &u.a_t, sizeof(double)); int shapeYZ = shapeY * shapeZ; cudaMemcpyToSymbol(d_shapeYZ, &shapeYZ, sizeof(int)); cudaMemcpyToSymbol(d_shapeZ, &shapeZ, sizeof(int)); d_grids.resize(N_GRIDS); for (int i = 0; i < N_GRIDS; ++i) { SAFE_CALL(cudaMalloc((void **) &d_grids[i], sizeInBytes)); } SAFE_CALL(cudaMalloc((void **) &d_groundTruth, sizeInBytes)); SAFE_CALL(cudaMalloc((void **) &d_errorC1, sizeInBytes)); SAFE_CALL(cudaMalloc((void **) &d_errorMSE, sizeInBytes)); SAFE_CALL(cudaMemset(d_errorC1, 0, sizeInBytes)); SAFE_CALL(cudaMemset(d_errorMSE, 0, sizeInBytes)); int maxSliceSize = max(grid3D.getSliceSize(0), max(grid3D.getSliceSize(1), grid3D.getSliceSize(2))); h_slice.resize(maxSliceSize); SAFE_CALL(cudaMalloc((void **) &d_slice, sizeof(double) * maxSliceSize)); } CudaSolver::~CudaSolver() { for (int i = 0; i < N_GRIDS; ++i) { SAFE_CALL(cudaFree(d_grids[i])); } SAFE_CALL(cudaFree(d_groundTruth)); SAFE_CALL(cudaFree(d_errorC1)); SAFE_CALL(cudaFree(d_errorMSE)); SAFE_CALL(cudaFree(d_slice)); } void CudaSolver::init_0(int start_i, int start_j, int start_k) { SAFE_KERNEL_CALL((cuda_init0<<<gridSizeInner, blockSizeInner>>>( getCurrentState(0), start_i, start_j, start_k ))); } void CudaSolver::init_1() { double *d_grid = getCurrentState(1); double *d_previous = getCurrentState(0); SAFE_KERNEL_CALL((cuda_init1<<<gridSizeInner, blockSizeInner>>>(d_grid, d_previous))); } void CudaSolver::makeStepForInnerNodes(int n) { double *d_grid = getCurrentState(n); double *d_previous_1 = getCurrentState(n - 1); double *d_previous_2 = getCurrentState(n - 2); SAFE_KERNEL_CALL((cuda_step<<<gridSizeInner, blockSizeInner>>>(d_grid, d_previous_1, d_previous_2))); } void CudaSolver::updateGroundTruth(int n, int start_i, int start_j, int start_k) { SAFE_KERNEL_CALL((cuda_fillByGt<<<gridSizeFull, blockSizeFull>>>( d_groundTruth, n, start_i, start_j, start_k ))); } double CudaSolver::maxAbsoluteErrorInner(int n) { SAFE_KERNEL_CALL((cuda_c1<<<gridSizeInner, blockSizeInner>>>( getCurrentState(n), d_groundTruth, d_errorC1 ))); return thrust::reduce(thrust::device, d_errorC1, d_errorC1 + flatSize, 0.0, thrust::maximum<double>()); } double CudaSolver::sumSquaredErrorInner(int n) { SAFE_KERNEL_CALL((cuda_squared_error<<<gridSizeInner, blockSizeInner>>>( getCurrentState(n), d_groundTruth, d_errorMSE ))); return thrust::reduce(thrust::device, d_errorMSE, d_errorMSE + flatSize, 0.0, thrust::plus<double>()); } int CudaSolver::getSliceSize(int axis) { return grid3D.getSliceSize(axis); } std::vector<double> CudaSolver::getSlice(int n, int index, int axis) { int c0, c1, c2, gridSize, blockSize; grid3D.getSliceParams(axis, c0, c1, c2, gridSize, blockSize); SAFE_KERNEL_CALL((cuda_get_slice<<<gridSize, blockSize>>>( c0 * index, c1, c2, d_slice, getCurrentState(n) ))); SAFE_CALL(cudaMemcpy(h_slice.data(), d_slice, getSliceSize(axis) * sizeof(double), cudaMemcpyDeviceToHost)); return std::vector<double>(h_slice.begin(), h_slice.begin() + getSliceSize(axis)); } void CudaSolver::setSlice(int n, int index, int axis, std::vector<double> &slice) { int c0, c1, c2, gridSize, blockSize; grid3D.getSliceParams(axis, c0, c1, c2, gridSize, blockSize); SAFE_CALL(cudaMemcpy(d_slice, slice.data(), getSliceSize(axis) * sizeof(double), cudaMemcpyHostToDevice)); SAFE_KERNEL_CALL((cuda_set_slice<<<gridSize, blockSize>>>( c0 * index, c1, c2, d_slice, getCurrentState(n) ))); } void CudaSolver::setZeros(int n, int index, int axis) { int c0, c1, c2, gridSize, blockSize; grid3D.getSliceParams(axis, c0, c1, c2, gridSize, blockSize); SAFE_CALL(cudaMemset(d_slice, 0, getSliceSize(axis) * sizeof(double))); SAFE_KERNEL_CALL((cuda_set_slice<<<gridSize, blockSize>>>( c0 * index, c1, c2, d_slice, getCurrentState(n) ))); } double *CudaSolver::getCurrentState(int n) { return d_grids[n % N_GRIDS]; } double CudaSolver::maxGroundTruth() { return thrust::reduce(thrust::device, d_groundTruth, d_groundTruth + flatSize, 0.0, thrust::maximum<double>()); }
07c0fd3b4a36bc9a6a7a104e04769bb0d494abba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zparilut_candidates.cu, normal z -> s, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define PRECISION_s __global__ void sparilut_candidates_count_1( const magma_int_t num_rows, const magma_index_t* L0_row, const magma_index_t* L0_col, const magma_index_t* U0_row, const magma_index_t* U0_col, const magma_index_t* L_row, const magma_index_t* L_col, const magma_index_t* U_row, const magma_index_t* U_col, magma_index_t* L_new_row, magma_index_t* U_new_row) { int row = blockDim.x * blockIdx.x + threadIdx.x; //for(int row=0; row<num_rows; row++){ if (row < num_rows) { int numaddrowL = 0; int ilu0 = L0_row[row]; int ilut = L_row[row]; int endilu0 = L0_row[ row+1 ]; int endilut = L_row[ row+1 ]; int ilu0col; int ilutcol; do{ ilu0col = L0_col[ ilu0 ]; ilutcol = L_col[ ilut ]; if(ilu0col == ilutcol ){ ilu0++; ilut++; } else if(ilutcol<ilu0col ){ ilut++; } else { // this element is missing in the current approximation // mark it as candidate numaddrowL++; ilu0++; } } while (ilut < endilut && ilu0 < endilu0); // do the rest if existing if(ilu0<endilu0 ){ do{ numaddrowL++; ilu0++; }while(ilu0<endilu0 ); } L_new_row[ row ] = L_new_row[ row ]+numaddrowL; magma_int_t numaddrowU = 0; ilu0 = U0_row[row]; ilut = U_row[row]; endilu0 = U0_row[ row+1 ]; endilut = U_row[ row+1 ]; do{ ilu0col = U0_col[ ilu0 ]; ilutcol = U_col[ ilut ]; if(ilu0col == ilutcol ){ ilu0++; ilut++; } else if(ilutcol<ilu0col ){ ilut++; } else { // this element is missing in the current approximation // mark it as candidate numaddrowU++; ilu0++; } }while(ilut<endilut && ilu0<endilu0 ); if(ilu0<endilu0 ){ do{ numaddrowU++; ilu0++; }while(ilu0<endilu0 ); } U_new_row[ row ] = U_new_row[ row ]+numaddrowU; } } __global__ void sparilut_candidates_count_2( const magma_int_t num_rows, const magma_index_t* L0_row, const magma_index_t* L0_col, const magma_index_t* U0_row, const magma_index_t* U0_col, const magma_index_t* L_row, const magma_index_t* L_col, const magma_index_t* U_row, const magma_index_t* U_col, magma_index_t* L_new_row, magma_index_t* U_new_row) { int row = blockDim.x * blockIdx.x + threadIdx.x; //for(int row=0; row<num_rows; row++){ if (row < num_rows) { // how to determine candidates: // for each node i, look at any "intermediate" neighbor nodes numbered // less, and then see if this neighbor has another neighbor j numbered // more than the intermediate; if so, fill in is (i,j) if it is not // already nonzero int numaddrowL = 0, numaddrowU = 0; // loop first element over row - only for elements smaller the diagonal for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){ int col1 = L_col[ el1 ]; // now check the upper triangular // second loop first element over row - only for elements larger the intermediate for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){ int col2 = U_col[ el2 ]; int cand_col = col2; // check whether this element already exists // first case: part of L if(cand_col < row ){ // check whether this element already exists in L // int exist = 0; // for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){ // if(L_col[ k ] == cand_col ){ // exist = 1; // //break; // } // } // if it does not exist, increase counter for this location // use the entry one further down to allow for parallel insertion // if(exist == 0 ){ numaddrowL++; // } } else { // check whether this element already exists in U // int exist = 0; // for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){ // if(U_col[ k ] == cand_col ){ // exist = 1; // //break; // } // } // if(exist == 0 ){ //printf("checked row: %d this element does not yet exist in L: (%d,%d)\n", cand_row, cand_col); numaddrowU++; // } } } } U_new_row[ row ] = U_new_row[ row ]+numaddrowU; L_new_row[ row ] = L_new_row[ row ]+numaddrowL; } } __global__ void sparilut_candidates_insert_1( const magma_int_t num_rows, const magma_index_t* L0_row, const magma_index_t* L0_col, const magma_index_t* U0_row, const magma_index_t* U0_col, const magma_index_t* L_row, const magma_index_t* L_col, const magma_index_t* U_row, const magma_index_t* U_col, magma_index_t* L_new_row, magma_index_t* L_new_rowidx, magma_index_t* L_new_col, float* L_new_val, magma_index_t* insertedL, magma_index_t* U_new_row, magma_index_t* U_new_rowidx, magma_index_t* U_new_col, float* U_new_val, magma_index_t* insertedU) { int row = blockDim.x * blockIdx.x + threadIdx.x; //for(int row=0; row<num_rows; row++){ if (row < num_rows) { int laddL = 0; int offsetL = L_new_row[row]; int ilu0 = L0_row[row]; int ilut = L_row[row]; int endilu0 = L0_row[ row+1 ]; int endilut = L_row[ row+1 ]; int ilu0col; int ilutcol; do{ ilu0col = L0_col[ ilu0 ]; ilutcol = L_col[ ilut ]; if(ilu0col == ilutcol ){ ilu0++; ilut++; } else if(ilutcol<ilu0col ){ ilut++; } else { // this element is missing in the current approximation // mark it as candidate L_new_col[ offsetL + laddL ] = ilu0col; L_new_rowidx[ offsetL + laddL ] = row; L_new_val[ offsetL + laddL ] = MAGMA_S_ONE + MAGMA_S_ONE + MAGMA_S_ONE; laddL++; ilu0++; } } while(ilut<endilut && ilu0<endilu0 ); if (ilu0<endilu0){ do{ ilu0col = L0_col[ ilu0 ]; L_new_col[ offsetL + laddL ] = ilu0col; L_new_rowidx[ offsetL + laddL ] = row; L_new_val[ offsetL + laddL ] = MAGMA_S_ONE + MAGMA_S_ONE + MAGMA_S_ONE; laddL++; ilu0++; }while(ilu0<endilu0 ); } insertedL[row] = laddL; int laddU = 0; int offsetU = U_new_row[row]; ilu0 = U0_row[row]; ilut = U_row[row]; endilu0 = U0_row[ row+1 ]; endilut = U_row[ row+1 ]; do{ ilu0col = U0_col[ ilu0 ]; ilutcol = U_col[ ilut ]; if(ilu0col == ilutcol ){ ilu0++; ilut++; } else if(ilutcol<ilu0col ){ ilut++; } else { // this element is missing in the current approximation // mark it as candidate U_new_col[ offsetU + laddU ] = ilu0col; U_new_rowidx[ offsetU + laddU ] = row; U_new_val[ offsetU + laddU ] = MAGMA_S_ONE + MAGMA_S_ONE + MAGMA_S_ONE; laddU++; ilu0++; } }while(ilut<endilut && ilu0<endilu0 ); if(ilu0<endilu0 ){ do{ ilu0col = U0_col[ ilu0 ]; U_new_col[ offsetU + laddU ] = ilu0col; U_new_rowidx[ offsetU + laddU ] = row; U_new_val[ offsetU + laddU ] = MAGMA_S_ONE + MAGMA_S_ONE + MAGMA_S_ONE; laddU++; ilu0++; }while(ilu0<endilu0 ); } insertedU[row] = laddU; } } __global__ void sparilut_candidates_insert_2( const magma_int_t num_rows, const magma_index_t* L0_row, const magma_index_t* L0_col, const magma_index_t* U0_row, const magma_index_t* U0_col, const magma_index_t* L_row, const magma_index_t* L_col, const magma_index_t* U_row, const magma_index_t* U_col, magma_index_t* L_new_row, magma_index_t* L_new_rowidx, magma_index_t* L_new_col, float* L_new_val, magma_index_t* insertedL, magma_index_t* U_new_row, magma_index_t* U_new_rowidx, magma_index_t* U_new_col, float* U_new_val, magma_index_t* insertedU) { int row = blockDim.x * blockIdx.x + threadIdx.x; //for(int row=0; row<num_rows; row++){ if (row < num_rows) { int cand_row = row; int laddL = 0; int laddU = 0; int offsetL = L_new_row[row] + insertedL[row]; int offsetU = U_new_row[row] + insertedU[row]; // loop first element over row - only for elements smaller the diagonal for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){ int col1 = L_col[ el1 ]; // now check the upper triangular // second loop first element over row - only for elements larger the intermediate for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){ int col2 = U_col[ el2 ]; int cand_col = col2; // check whether this element already exists // first case: part of L if(cand_col < row ){ int exist = 0; for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){ if(L_col[ k ] == cand_col ){ exist = -1; // printf("already exists:(%d,%d\n", row, cand_col); //break; } } for(int k=L_new_row[cand_row]; k<L_new_row[cand_row+1]; k++){ if(L_new_col[ k ] == cand_col ){ // element included in LU and nonzero // printf("already inserted:(%d,%d\n", row, cand_col); exist = -2; //break; } } L_new_rowidx[ offsetL + laddL ] = cand_row; L_new_col[ offsetL + laddL ] = (exist == 0) ? cand_col : exist; L_new_val[ offsetL + laddL ] = (exist == 0) ? MAGMA_S_ONE : MAGMA_S_ZERO; laddL++; } else { // check whether this element already exists in U int exist = 0; for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){ if(U_col[ k ] == cand_col ){ // printf("already exists:(%d,%d\n", row, cand_col); exist = -1; //break; } } for(int k=U_new_row[cand_row]; k<U_new_row[cand_row+1]; k++){ if(U_new_col[ k ] == cand_col ){ // element included in LU and nonzero // printf("already inserted:(%d,%d==%d) k:%d -> %d -> %d\n", row, cand_col , U_new_col[ k ], U_new_row[cand_row], k, U_new_row[cand_row+1] ); exist = -2; //break; } } U_new_rowidx[ offsetU + laddU ] = cand_row; U_new_col[ offsetU + laddU ] = (exist == 0) ? cand_col : exist; U_new_val[ offsetU + laddU ] = (exist == 0) ? MAGMA_S_ONE : MAGMA_S_ZERO; laddU++; } } } } } /***************************************************************************//** Purpose ------- This function identifies the locations with a potential nonzero ILU residual R = A - L*U where L and U are the current incomplete factors. Nonzero ILU residuals are possible 1 where A is nonzero but L and U have no nonzero entry 2 where the product L*U has fill-in but the location is not included in L or U We assume that the incomplete factors are exact fro the elements included in the current pattern. This is the GPU implementation of the candidate search. 2 GPU kernels are used: the first is a dry run assessing the memory need, the second then computes the candidate locations, the third eliminates float entries. The fourth kernel ensures the elements in a row are sorted for increasing column index. Arguments --------- @param[in] L0 magma_s_matrix tril(ILU(0) ) pattern of original system matrix. @param[in] U0 magma_s_matrix triu(ILU(0) ) pattern of original system matrix. @param[in] L magma_s_matrix Current lower triangular factor. @param[in] U magma_s_matrix Current upper triangular factor. @param[in,out] L_new magma_s_matrix* List of candidates for L in COO format. @param[in,out] U_new magma_s_matrix* List of candidates for U in COO format. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux *******************************************************************************/ extern "C" magma_int_t magma_sparilut_candidates_gpu( magma_s_matrix L0, magma_s_matrix U0, magma_s_matrix L, magma_s_matrix U, magma_s_matrix *L_new, magma_s_matrix *U_new, magma_queue_t queue ) { magma_int_t info = 0; int num_rows = L.num_rows; float thrs = 1e-8; int blocksize1 = 128; int blocksize2 = 1; int dimgrid11 = magma_ceildiv(num_rows, blocksize1 ); int dimgrid12 = 1; int dimgrid13 = 1; dim3 grid1(dimgrid11, dimgrid12, dimgrid13 ); dim3 block1(blocksize1, blocksize2, 1 ); magmaIndex_ptr insertedL = NULL; magmaIndex_ptr insertedU = NULL; magma_smfree(L_new, queue); magma_smfree(U_new, queue); CHECK(magma_index_malloc(&insertedL, num_rows)); CHECK(magma_index_malloc(&insertedU, num_rows)); CHECK(magma_index_malloc(&L_new->drow, num_rows+1)); CHECK(magma_index_malloc(&U_new->drow, num_rows+1)); CHECK(magma_sindexinit_gpu(num_rows+1, L_new->drow, queue)); CHECK(magma_sindexinit_gpu(num_rows+1, U_new->drow, queue)); CHECK(magma_sindexinit_gpu(num_rows, insertedL, queue)); CHECK(magma_sindexinit_gpu(num_rows, insertedU, queue)); L_new->num_rows = L.num_rows; L_new->num_cols = L.num_cols; L_new->storage_type = Magma_CSR; L_new->memory_location = Magma_DEV; U_new->num_rows = L.num_rows; U_new->num_cols = L.num_cols; U_new->storage_type = Magma_CSR; U_new->memory_location = Magma_DEV; hipLaunchKernelGGL(( sparilut_candidates_count_1), dim3(grid1), dim3(block1), 0, queue->cuda_stream(), L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol, L.drow, L.dcol, U.drow, U.dcol, insertedL, insertedU); hipLaunchKernelGGL(( sparilut_candidates_count_2), dim3(grid1), dim3(block1), 0, queue->cuda_stream(), L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol, L.drow, L.dcol, U.drow, U.dcol, insertedL, insertedU); CHECK(magma_sget_row_ptr(num_rows, &L_new->nnz, insertedL, L_new->drow, queue)); CHECK(magma_sget_row_ptr(num_rows, &U_new->nnz, insertedU, U_new->drow, queue)); CHECK(magma_sindexinit_gpu(num_rows, insertedL, queue)); CHECK(magma_sindexinit_gpu(num_rows, insertedU, queue)); CHECK(magma_smalloc(&L_new->dval, L_new->nnz)); CHECK(magma_index_malloc(&L_new->drowidx, L_new->nnz)); CHECK(magma_index_malloc(&L_new->dcol, L_new->nnz)); CHECK(magma_smalloc(&U_new->dval, U_new->nnz)); CHECK(magma_index_malloc(&U_new->drowidx, U_new->nnz)); CHECK(magma_index_malloc(&U_new->dcol, U_new->nnz)); CHECK(magma_svalinit_gpu(L_new->nnz, L_new->dval, queue)); CHECK(magma_svalinit_gpu(U_new->nnz, U_new->dval, queue)); //CHECK(magma_sindexinit_gpu(L_new->nnz, L_new->dcol, queue)); //CHECK(magma_sindexinit_gpu(U_new->nnz, U_new->dcol, queue)); //CHECK(magma_sindexinit_gpu(L_new->nnz, L_new->drowidx, queue)); //CHECK(magma_sindexinit_gpu(U_new->nnz, U_new->drowidx, queue)); // we don't need to init rowidx and col // the uninitilazed values will be removed anyways hipLaunchKernelGGL(( sparilut_candidates_insert_1), dim3(grid1), dim3(block1), 0, queue->cuda_stream(), L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol, L.drow, L.dcol, U.drow, U.dcol, L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL, U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU); hipLaunchKernelGGL(( sparilut_candidates_insert_2), dim3(grid1), dim3(block1), 0, queue->cuda_stream(), L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol, L.drow, L.dcol, U.drow, U.dcol, L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL, U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU); CHECK(magma_sthrsholdrm_gpu(1, L_new, &thrs, queue)); CHECK(magma_sthrsholdrm_gpu(1, U_new, &thrs, queue)); cleanup: magma_free(insertedL); magma_free(insertedU); return info; }
07c0fd3b4a36bc9a6a7a104e04769bb0d494abba.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zparilut_candidates.cu, normal z -> s, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define PRECISION_s __global__ void sparilut_candidates_count_1( const magma_int_t num_rows, const magma_index_t* L0_row, const magma_index_t* L0_col, const magma_index_t* U0_row, const magma_index_t* U0_col, const magma_index_t* L_row, const magma_index_t* L_col, const magma_index_t* U_row, const magma_index_t* U_col, magma_index_t* L_new_row, magma_index_t* U_new_row) { int row = blockDim.x * blockIdx.x + threadIdx.x; //for(int row=0; row<num_rows; row++){ if (row < num_rows) { int numaddrowL = 0; int ilu0 = L0_row[row]; int ilut = L_row[row]; int endilu0 = L0_row[ row+1 ]; int endilut = L_row[ row+1 ]; int ilu0col; int ilutcol; do{ ilu0col = L0_col[ ilu0 ]; ilutcol = L_col[ ilut ]; if(ilu0col == ilutcol ){ ilu0++; ilut++; } else if(ilutcol<ilu0col ){ ilut++; } else { // this element is missing in the current approximation // mark it as candidate numaddrowL++; ilu0++; } } while (ilut < endilut && ilu0 < endilu0); // do the rest if existing if(ilu0<endilu0 ){ do{ numaddrowL++; ilu0++; }while(ilu0<endilu0 ); } L_new_row[ row ] = L_new_row[ row ]+numaddrowL; magma_int_t numaddrowU = 0; ilu0 = U0_row[row]; ilut = U_row[row]; endilu0 = U0_row[ row+1 ]; endilut = U_row[ row+1 ]; do{ ilu0col = U0_col[ ilu0 ]; ilutcol = U_col[ ilut ]; if(ilu0col == ilutcol ){ ilu0++; ilut++; } else if(ilutcol<ilu0col ){ ilut++; } else { // this element is missing in the current approximation // mark it as candidate numaddrowU++; ilu0++; } }while(ilut<endilut && ilu0<endilu0 ); if(ilu0<endilu0 ){ do{ numaddrowU++; ilu0++; }while(ilu0<endilu0 ); } U_new_row[ row ] = U_new_row[ row ]+numaddrowU; } } __global__ void sparilut_candidates_count_2( const magma_int_t num_rows, const magma_index_t* L0_row, const magma_index_t* L0_col, const magma_index_t* U0_row, const magma_index_t* U0_col, const magma_index_t* L_row, const magma_index_t* L_col, const magma_index_t* U_row, const magma_index_t* U_col, magma_index_t* L_new_row, magma_index_t* U_new_row) { int row = blockDim.x * blockIdx.x + threadIdx.x; //for(int row=0; row<num_rows; row++){ if (row < num_rows) { // how to determine candidates: // for each node i, look at any "intermediate" neighbor nodes numbered // less, and then see if this neighbor has another neighbor j numbered // more than the intermediate; if so, fill in is (i,j) if it is not // already nonzero int numaddrowL = 0, numaddrowU = 0; // loop first element over row - only for elements smaller the diagonal for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){ int col1 = L_col[ el1 ]; // now check the upper triangular // second loop first element over row - only for elements larger the intermediate for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){ int col2 = U_col[ el2 ]; int cand_col = col2; // check whether this element already exists // first case: part of L if(cand_col < row ){ // check whether this element already exists in L // int exist = 0; // for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){ // if(L_col[ k ] == cand_col ){ // exist = 1; // //break; // } // } // if it does not exist, increase counter for this location // use the entry one further down to allow for parallel insertion // if(exist == 0 ){ numaddrowL++; // } } else { // check whether this element already exists in U // int exist = 0; // for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){ // if(U_col[ k ] == cand_col ){ // exist = 1; // //break; // } // } // if(exist == 0 ){ //printf("checked row: %d this element does not yet exist in L: (%d,%d)\n", cand_row, cand_col); numaddrowU++; // } } } } U_new_row[ row ] = U_new_row[ row ]+numaddrowU; L_new_row[ row ] = L_new_row[ row ]+numaddrowL; } } __global__ void sparilut_candidates_insert_1( const magma_int_t num_rows, const magma_index_t* L0_row, const magma_index_t* L0_col, const magma_index_t* U0_row, const magma_index_t* U0_col, const magma_index_t* L_row, const magma_index_t* L_col, const magma_index_t* U_row, const magma_index_t* U_col, magma_index_t* L_new_row, magma_index_t* L_new_rowidx, magma_index_t* L_new_col, float* L_new_val, magma_index_t* insertedL, magma_index_t* U_new_row, magma_index_t* U_new_rowidx, magma_index_t* U_new_col, float* U_new_val, magma_index_t* insertedU) { int row = blockDim.x * blockIdx.x + threadIdx.x; //for(int row=0; row<num_rows; row++){ if (row < num_rows) { int laddL = 0; int offsetL = L_new_row[row]; int ilu0 = L0_row[row]; int ilut = L_row[row]; int endilu0 = L0_row[ row+1 ]; int endilut = L_row[ row+1 ]; int ilu0col; int ilutcol; do{ ilu0col = L0_col[ ilu0 ]; ilutcol = L_col[ ilut ]; if(ilu0col == ilutcol ){ ilu0++; ilut++; } else if(ilutcol<ilu0col ){ ilut++; } else { // this element is missing in the current approximation // mark it as candidate L_new_col[ offsetL + laddL ] = ilu0col; L_new_rowidx[ offsetL + laddL ] = row; L_new_val[ offsetL + laddL ] = MAGMA_S_ONE + MAGMA_S_ONE + MAGMA_S_ONE; laddL++; ilu0++; } } while(ilut<endilut && ilu0<endilu0 ); if (ilu0<endilu0){ do{ ilu0col = L0_col[ ilu0 ]; L_new_col[ offsetL + laddL ] = ilu0col; L_new_rowidx[ offsetL + laddL ] = row; L_new_val[ offsetL + laddL ] = MAGMA_S_ONE + MAGMA_S_ONE + MAGMA_S_ONE; laddL++; ilu0++; }while(ilu0<endilu0 ); } insertedL[row] = laddL; int laddU = 0; int offsetU = U_new_row[row]; ilu0 = U0_row[row]; ilut = U_row[row]; endilu0 = U0_row[ row+1 ]; endilut = U_row[ row+1 ]; do{ ilu0col = U0_col[ ilu0 ]; ilutcol = U_col[ ilut ]; if(ilu0col == ilutcol ){ ilu0++; ilut++; } else if(ilutcol<ilu0col ){ ilut++; } else { // this element is missing in the current approximation // mark it as candidate U_new_col[ offsetU + laddU ] = ilu0col; U_new_rowidx[ offsetU + laddU ] = row; U_new_val[ offsetU + laddU ] = MAGMA_S_ONE + MAGMA_S_ONE + MAGMA_S_ONE; laddU++; ilu0++; } }while(ilut<endilut && ilu0<endilu0 ); if(ilu0<endilu0 ){ do{ ilu0col = U0_col[ ilu0 ]; U_new_col[ offsetU + laddU ] = ilu0col; U_new_rowidx[ offsetU + laddU ] = row; U_new_val[ offsetU + laddU ] = MAGMA_S_ONE + MAGMA_S_ONE + MAGMA_S_ONE; laddU++; ilu0++; }while(ilu0<endilu0 ); } insertedU[row] = laddU; } } __global__ void sparilut_candidates_insert_2( const magma_int_t num_rows, const magma_index_t* L0_row, const magma_index_t* L0_col, const magma_index_t* U0_row, const magma_index_t* U0_col, const magma_index_t* L_row, const magma_index_t* L_col, const magma_index_t* U_row, const magma_index_t* U_col, magma_index_t* L_new_row, magma_index_t* L_new_rowidx, magma_index_t* L_new_col, float* L_new_val, magma_index_t* insertedL, magma_index_t* U_new_row, magma_index_t* U_new_rowidx, magma_index_t* U_new_col, float* U_new_val, magma_index_t* insertedU) { int row = blockDim.x * blockIdx.x + threadIdx.x; //for(int row=0; row<num_rows; row++){ if (row < num_rows) { int cand_row = row; int laddL = 0; int laddU = 0; int offsetL = L_new_row[row] + insertedL[row]; int offsetU = U_new_row[row] + insertedU[row]; // loop first element over row - only for elements smaller the diagonal for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){ int col1 = L_col[ el1 ]; // now check the upper triangular // second loop first element over row - only for elements larger the intermediate for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){ int col2 = U_col[ el2 ]; int cand_col = col2; // check whether this element already exists // first case: part of L if(cand_col < row ){ int exist = 0; for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){ if(L_col[ k ] == cand_col ){ exist = -1; // printf("already exists:(%d,%d\n", row, cand_col); //break; } } for(int k=L_new_row[cand_row]; k<L_new_row[cand_row+1]; k++){ if(L_new_col[ k ] == cand_col ){ // element included in LU and nonzero // printf("already inserted:(%d,%d\n", row, cand_col); exist = -2; //break; } } L_new_rowidx[ offsetL + laddL ] = cand_row; L_new_col[ offsetL + laddL ] = (exist == 0) ? cand_col : exist; L_new_val[ offsetL + laddL ] = (exist == 0) ? MAGMA_S_ONE : MAGMA_S_ZERO; laddL++; } else { // check whether this element already exists in U int exist = 0; for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){ if(U_col[ k ] == cand_col ){ // printf("already exists:(%d,%d\n", row, cand_col); exist = -1; //break; } } for(int k=U_new_row[cand_row]; k<U_new_row[cand_row+1]; k++){ if(U_new_col[ k ] == cand_col ){ // element included in LU and nonzero // printf("already inserted:(%d,%d==%d) k:%d -> %d -> %d\n", row, cand_col , U_new_col[ k ], U_new_row[cand_row], k, U_new_row[cand_row+1] ); exist = -2; //break; } } U_new_rowidx[ offsetU + laddU ] = cand_row; U_new_col[ offsetU + laddU ] = (exist == 0) ? cand_col : exist; U_new_val[ offsetU + laddU ] = (exist == 0) ? MAGMA_S_ONE : MAGMA_S_ZERO; laddU++; } } } } } /***************************************************************************//** Purpose ------- This function identifies the locations with a potential nonzero ILU residual R = A - L*U where L and U are the current incomplete factors. Nonzero ILU residuals are possible 1 where A is nonzero but L and U have no nonzero entry 2 where the product L*U has fill-in but the location is not included in L or U We assume that the incomplete factors are exact fro the elements included in the current pattern. This is the GPU implementation of the candidate search. 2 GPU kernels are used: the first is a dry run assessing the memory need, the second then computes the candidate locations, the third eliminates float entries. The fourth kernel ensures the elements in a row are sorted for increasing column index. Arguments --------- @param[in] L0 magma_s_matrix tril(ILU(0) ) pattern of original system matrix. @param[in] U0 magma_s_matrix triu(ILU(0) ) pattern of original system matrix. @param[in] L magma_s_matrix Current lower triangular factor. @param[in] U magma_s_matrix Current upper triangular factor. @param[in,out] L_new magma_s_matrix* List of candidates for L in COO format. @param[in,out] U_new magma_s_matrix* List of candidates for U in COO format. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux *******************************************************************************/ extern "C" magma_int_t magma_sparilut_candidates_gpu( magma_s_matrix L0, magma_s_matrix U0, magma_s_matrix L, magma_s_matrix U, magma_s_matrix *L_new, magma_s_matrix *U_new, magma_queue_t queue ) { magma_int_t info = 0; int num_rows = L.num_rows; float thrs = 1e-8; int blocksize1 = 128; int blocksize2 = 1; int dimgrid11 = magma_ceildiv(num_rows, blocksize1 ); int dimgrid12 = 1; int dimgrid13 = 1; dim3 grid1(dimgrid11, dimgrid12, dimgrid13 ); dim3 block1(blocksize1, blocksize2, 1 ); magmaIndex_ptr insertedL = NULL; magmaIndex_ptr insertedU = NULL; magma_smfree(L_new, queue); magma_smfree(U_new, queue); CHECK(magma_index_malloc(&insertedL, num_rows)); CHECK(magma_index_malloc(&insertedU, num_rows)); CHECK(magma_index_malloc(&L_new->drow, num_rows+1)); CHECK(magma_index_malloc(&U_new->drow, num_rows+1)); CHECK(magma_sindexinit_gpu(num_rows+1, L_new->drow, queue)); CHECK(magma_sindexinit_gpu(num_rows+1, U_new->drow, queue)); CHECK(magma_sindexinit_gpu(num_rows, insertedL, queue)); CHECK(magma_sindexinit_gpu(num_rows, insertedU, queue)); L_new->num_rows = L.num_rows; L_new->num_cols = L.num_cols; L_new->storage_type = Magma_CSR; L_new->memory_location = Magma_DEV; U_new->num_rows = L.num_rows; U_new->num_cols = L.num_cols; U_new->storage_type = Magma_CSR; U_new->memory_location = Magma_DEV; sparilut_candidates_count_1<<<grid1, block1, 0, queue->cuda_stream()>>>( L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol, L.drow, L.dcol, U.drow, U.dcol, insertedL, insertedU); sparilut_candidates_count_2<<<grid1, block1, 0, queue->cuda_stream()>>>( L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol, L.drow, L.dcol, U.drow, U.dcol, insertedL, insertedU); CHECK(magma_sget_row_ptr(num_rows, &L_new->nnz, insertedL, L_new->drow, queue)); CHECK(magma_sget_row_ptr(num_rows, &U_new->nnz, insertedU, U_new->drow, queue)); CHECK(magma_sindexinit_gpu(num_rows, insertedL, queue)); CHECK(magma_sindexinit_gpu(num_rows, insertedU, queue)); CHECK(magma_smalloc(&L_new->dval, L_new->nnz)); CHECK(magma_index_malloc(&L_new->drowidx, L_new->nnz)); CHECK(magma_index_malloc(&L_new->dcol, L_new->nnz)); CHECK(magma_smalloc(&U_new->dval, U_new->nnz)); CHECK(magma_index_malloc(&U_new->drowidx, U_new->nnz)); CHECK(magma_index_malloc(&U_new->dcol, U_new->nnz)); CHECK(magma_svalinit_gpu(L_new->nnz, L_new->dval, queue)); CHECK(magma_svalinit_gpu(U_new->nnz, U_new->dval, queue)); //CHECK(magma_sindexinit_gpu(L_new->nnz, L_new->dcol, queue)); //CHECK(magma_sindexinit_gpu(U_new->nnz, U_new->dcol, queue)); //CHECK(magma_sindexinit_gpu(L_new->nnz, L_new->drowidx, queue)); //CHECK(magma_sindexinit_gpu(U_new->nnz, U_new->drowidx, queue)); // we don't need to init rowidx and col // the uninitilazed values will be removed anyways sparilut_candidates_insert_1<<<grid1, block1, 0, queue->cuda_stream()>>>( L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol, L.drow, L.dcol, U.drow, U.dcol, L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL, U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU); sparilut_candidates_insert_2<<<grid1, block1, 0, queue->cuda_stream()>>>( L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol, L.drow, L.dcol, U.drow, U.dcol, L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL, U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU); CHECK(magma_sthrsholdrm_gpu(1, L_new, &thrs, queue)); CHECK(magma_sthrsholdrm_gpu(1, U_new, &thrs, queue)); cleanup: magma_free(insertedL); magma_free(insertedU); return info; }
be53652377792e51d2a428a75ef9692a651f285f.hip
// !!! This is a file automatically generated by hipify!!! #include <bits/stdc++.h> using namespace std; const int MAXX = 1e8; #define CSC(call) \ do { \ hipError_t res = call; \ if (res != hipSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, hipGetErrorString(res)); \ exit(0); \ } \ } while(0) struct pnt { int x, y; }; int main() { int w, h; char inputFile[256], outputFile[256]; cin >> inputFile >> outputFile; FILE *fp = fopen(inputFile, "rb"); fread(&w, sizeof(int), 1, fp); fread(&h, sizeof(int), 1, fp); uchar4 *data = (uchar4 *) malloc(sizeof(uchar4) * w * h); fread(data, sizeof(uchar4), w * h, fp); fclose(fp); int nc, np; cin >> nc; vector<vector<pnt>> classes(nc); int4 avg[32]; double cov[32][3][3]; double cov_inv[32][3][3]; double dets[32]; for (int i = 0; i < nc; ++i) { cin >> np; classes[i].resize(np); // input + counting averages for (int j = 0; j < np; ++j) { cin >> classes[i][j].x >> classes[i][j].y; uchar4 ps = data[classes[i][j].y * w + classes[i][j].x]; avg[i].x += ps.x; avg[i].y += ps.y; avg[i].z += ps.z; } avg[i].x /= np; avg[i].y /= np; avg[i].z /= np; // counting cov for (int j = 0; j < np; ++j) { uchar4 ps = data[classes[i][j].y * w + classes[i][j].x]; int diff[3]; diff[0] = ps.x - avg[i].x; diff[1] = ps.y - avg[i].y; diff[2] = ps.z - avg[i].z; for (int k = 0; k < 3; ++k) { for (int m = 0; m < 3; ++m) { cov[i][k][m] += diff[k] * diff[m]; } } } for (int k = 0; k < 3; ++k) { for (int m = 0; m < 3; ++m) { cov[i][k][m] /= (np - 1); } } // counting cov_inverse + determinants double det = cov[i][0][0] * (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2]) - cov[i][0][1] * (cov[i][1][0] * cov[i][2][2] - cov[i][2][0] * cov[i][1][2]) + cov[i][0][2] * (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]); cov_inv[i][0][0] = (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2]) / det; cov_inv[i][1][0] = -(cov[i][1][0] * cov[i][2][2] - cov[i][2][0] * cov[i][1][2]) / det; cov_inv[i][2][0] = (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]) / det; cov_inv[i][0][1] = -(cov[i][0][1] * cov[i][2][2] - cov[i][2][1] * cov[i][0][2]) / det; cov_inv[i][1][1] = (cov[i][0][0] * cov[i][2][2] - cov[i][2][0] * cov[i][0][2]) / det; cov_inv[i][2][1] = -(cov[i][0][0] * cov[i][2][1] - cov[i][2][0] * cov[i][0][1]) / det; cov_inv[i][0][2] = (cov[i][0][1] * cov[i][1][2] - cov[i][1][1] * cov[i][0][2]) / det; cov_inv[i][1][2] = -(cov[i][0][0] * cov[i][1][2] - cov[i][1][0] * cov[i][0][2]) / det; cov_inv[i][2][2] = (cov[i][0][0] * cov[i][1][1] - cov[i][1][0] * cov[i][0][1]) / det; dets[i] = det; } hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for (int y = 0; y < h; ++y){ for (int x = 0; x < w; ++x){ uchar4 ps = data[y * w + x]; double mx = -MAXX; int idx = -1; for (int i = 0; i < nc; ++i){ int diff[3]; diff[0] = ps.x - avg[i].x; diff[1] = ps.y - avg[i].y; diff[2] = ps.z - avg[i].z; double tmp[3]; for(int j = 0; j < 3; ++j){ tmp[j] = 0; for(int k = 0; k < 3; ++k){ tmp[j] += (diff[k] * cov_inv[i][k][j]); } } double ans = 0; for(int j = 0; j < 3; ++j){ ans += (tmp[j] * diff[j]); } ans = -ans - log(abs(dets[i])); if(ans > mx){ mx = ans; idx = i; } } data[y * w + x].w = idx; } } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); fprintf(stderr, "%.2f\n", time); hipEventDestroy(stop); hipEventDestroy(start); fp = fopen(outputFile, "wb"); fwrite(&w, sizeof(int), 1, fp); fwrite(&h, sizeof(int), 1, fp); fwrite(data, sizeof(uchar4), w * h, fp); fclose(fp); free(data); return 0; }
be53652377792e51d2a428a75ef9692a651f285f.cu
#include <bits/stdc++.h> using namespace std; const int MAXX = 1e8; #define CSC(call) \ do { \ cudaError_t res = call; \ if (res != cudaSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, cudaGetErrorString(res)); \ exit(0); \ } \ } while(0) struct pnt { int x, y; }; int main() { int w, h; char inputFile[256], outputFile[256]; cin >> inputFile >> outputFile; FILE *fp = fopen(inputFile, "rb"); fread(&w, sizeof(int), 1, fp); fread(&h, sizeof(int), 1, fp); uchar4 *data = (uchar4 *) malloc(sizeof(uchar4) * w * h); fread(data, sizeof(uchar4), w * h, fp); fclose(fp); int nc, np; cin >> nc; vector<vector<pnt>> classes(nc); int4 avg[32]; double cov[32][3][3]; double cov_inv[32][3][3]; double dets[32]; for (int i = 0; i < nc; ++i) { cin >> np; classes[i].resize(np); // input + counting averages for (int j = 0; j < np; ++j) { cin >> classes[i][j].x >> classes[i][j].y; uchar4 ps = data[classes[i][j].y * w + classes[i][j].x]; avg[i].x += ps.x; avg[i].y += ps.y; avg[i].z += ps.z; } avg[i].x /= np; avg[i].y /= np; avg[i].z /= np; // counting cov for (int j = 0; j < np; ++j) { uchar4 ps = data[classes[i][j].y * w + classes[i][j].x]; int diff[3]; diff[0] = ps.x - avg[i].x; diff[1] = ps.y - avg[i].y; diff[2] = ps.z - avg[i].z; for (int k = 0; k < 3; ++k) { for (int m = 0; m < 3; ++m) { cov[i][k][m] += diff[k] * diff[m]; } } } for (int k = 0; k < 3; ++k) { for (int m = 0; m < 3; ++m) { cov[i][k][m] /= (np - 1); } } // counting cov_inverse + determinants double det = cov[i][0][0] * (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2]) - cov[i][0][1] * (cov[i][1][0] * cov[i][2][2] - cov[i][2][0] * cov[i][1][2]) + cov[i][0][2] * (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]); cov_inv[i][0][0] = (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2]) / det; cov_inv[i][1][0] = -(cov[i][1][0] * cov[i][2][2] - cov[i][2][0] * cov[i][1][2]) / det; cov_inv[i][2][0] = (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]) / det; cov_inv[i][0][1] = -(cov[i][0][1] * cov[i][2][2] - cov[i][2][1] * cov[i][0][2]) / det; cov_inv[i][1][1] = (cov[i][0][0] * cov[i][2][2] - cov[i][2][0] * cov[i][0][2]) / det; cov_inv[i][2][1] = -(cov[i][0][0] * cov[i][2][1] - cov[i][2][0] * cov[i][0][1]) / det; cov_inv[i][0][2] = (cov[i][0][1] * cov[i][1][2] - cov[i][1][1] * cov[i][0][2]) / det; cov_inv[i][1][2] = -(cov[i][0][0] * cov[i][1][2] - cov[i][1][0] * cov[i][0][2]) / det; cov_inv[i][2][2] = (cov[i][0][0] * cov[i][1][1] - cov[i][1][0] * cov[i][0][1]) / det; dets[i] = det; } cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for (int y = 0; y < h; ++y){ for (int x = 0; x < w; ++x){ uchar4 ps = data[y * w + x]; double mx = -MAXX; int idx = -1; for (int i = 0; i < nc; ++i){ int diff[3]; diff[0] = ps.x - avg[i].x; diff[1] = ps.y - avg[i].y; diff[2] = ps.z - avg[i].z; double tmp[3]; for(int j = 0; j < 3; ++j){ tmp[j] = 0; for(int k = 0; k < 3; ++k){ tmp[j] += (diff[k] * cov_inv[i][k][j]); } } double ans = 0; for(int j = 0; j < 3; ++j){ ans += (tmp[j] * diff[j]); } ans = -ans - log(abs(dets[i])); if(ans > mx){ mx = ans; idx = i; } } data[y * w + x].w = idx; } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); fprintf(stderr, "%.2f\n", time); cudaEventDestroy(stop); cudaEventDestroy(start); fp = fopen(outputFile, "wb"); fwrite(&w, sizeof(int), 1, fp); fwrite(&h, sizeof(int), 1, fp); fwrite(data, sizeof(uchar4), w * h, fp); fclose(fp); free(data); return 0; }
94bd75542a80fe90aa204ddd521f20fcae5c6dbb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "projektcuda.h" #include "project_comm.h" /* Kernel to square elements of the array on the GPU */ /* Matrix A is mA x nB , Vector B is nB Vector C output vector in size of mA C=A*B description: each row of A occuppy one block. if gridDim is smaller than the row number of A */ __global__ void matrixMul( t_ve* C, t_ve* A, t_ve* B, int mA, int nB) { //define a Result Vector for each block __shared__ float Cs[VECTOR_BLOCK_SIZE];//VECTOR_BLOCK_SIZE shuld equal blockDim 512 //define gridIndex, if gridDim < mA, gridIndex > 0; int gridIndex = 0; // get a thread indentifier //int idx = gridIndex*gridDim.x + blockIdx.x*blockDim.x+threadIdx.x; int aBegin = 0; int bBegin = 0; int aStep = gridDim.x; int bStep = VECTOR_BLOCK_SIZE; // blockDim.x int aEnd = mA; int bEnd = nB; int tx; tx = threadIdx.x; //initialise Cs Cs[tx] = 0; __syncthreads(); //initialize output vector for each block if(tx==0){ C[gridIndex*gridDim.x+blockIdx.x]=0; } __syncthreads(); // if nB > gridDim??????? //idx < (gridIndex*gridDim.x+mA%VECTOR_BLOCK_SIZE)*() for(int a = aBegin; (a < aEnd)&&((gridIndex*gridDim.x+blockIdx.x)<aEnd); a += aStep, gridIndex++){ //initialize output vector for each block if(threadIdx.x==0){ C[gridIndex*gridDim.x+blockIdx.x]=0; } __syncthreads(); //following is operations within one block // initialize the dot product for each row in A and vector B t_ve blocksum = 0; //if nB> blockDim, split repeat the //for(int b = bBegin; (b < bEnd)&&((threadIdx.x+b) < bEnd); b += bStep ) { for(int b = bBegin; b < bEnd; b += bStep ) { //initialise Cs Cs[tx] = 0; __syncthreads(); // compute scalar product if (( (gridIndex*gridDim.x+blockIdx.x)<aEnd)&&((b+tx) < bEnd)) { //Cs[threadIdx.x] = A[a + blockIdx.x ][b + threadIdx.x] * B[b + threadIdx.x ]; Cs[threadIdx.x] = A[(a + blockIdx.x)* nB+b + tx] * B[b + tx ]; } __syncthreads(); if(tx == 0){ //30.Nov.2009 fixeded for Cs summe int kEnd = bEnd-b; if(kEnd > VECTOR_BLOCK_SIZE)kEnd = VECTOR_BLOCK_SIZE; //Because I add Cs[0...k], if blockSize and Matrix does not fit, Parts of Cs[k] are not initialized as 0. for (int k = 1; k < kEnd; k++) Cs[0] += Cs[k]; blocksum += Cs[0]; } __syncthreads(); /* int offset; offset = VECTOR_BLOCK_SIZE/2; while (offset > 0) { if(tx < offset) { Cs[tx] += Cs[tx + offset]; } offset >>= 1; __syncthreads(); } __syncthreads(); if(threadIdx.x == 0) blocksum += Cs[0]; //??? blocksum = Cs[0]; }//for b __syncthreads(); */ if(threadIdx.x == 0) C[gridIndex*gridDim.x+blockIdx.x] = blocksum; __syncthreads(); // summe all block, need test for mA bigger than one Grid //idx = gridIndex*gridDim.x + blockIdx.x*blockDim.x+threadIdx.x; } }
94bd75542a80fe90aa204ddd521f20fcae5c6dbb.cu
#include "cuda.h" #include <stdio.h> #include "projektcuda.h" #include "project_comm.h" /* Kernel to square elements of the array on the GPU */ /* Matrix A is mA x nB , Vector B is nB Vector C output vector in size of mA C=A*B description: each row of A occuppy one block. if gridDim is smaller than the row number of A */ __global__ void matrixMul( t_ve* C, t_ve* A, t_ve* B, int mA, int nB) { //define a Result Vector for each block __shared__ float Cs[VECTOR_BLOCK_SIZE];//VECTOR_BLOCK_SIZE shuld equal blockDim 512 //define gridIndex, if gridDim < mA, gridIndex > 0; int gridIndex = 0; // get a thread indentifier //int idx = gridIndex*gridDim.x + blockIdx.x*blockDim.x+threadIdx.x; int aBegin = 0; int bBegin = 0; int aStep = gridDim.x; int bStep = VECTOR_BLOCK_SIZE; // blockDim.x int aEnd = mA; int bEnd = nB; int tx; tx = threadIdx.x; //initialise Cs Cs[tx] = 0; __syncthreads(); //initialize output vector for each block if(tx==0){ C[gridIndex*gridDim.x+blockIdx.x]=0; } __syncthreads(); // if nB > gridDim??????? //idx < (gridIndex*gridDim.x+mA%VECTOR_BLOCK_SIZE)*() for(int a = aBegin; (a < aEnd)&&((gridIndex*gridDim.x+blockIdx.x)<aEnd); a += aStep, gridIndex++){ //initialize output vector for each block if(threadIdx.x==0){ C[gridIndex*gridDim.x+blockIdx.x]=0; } __syncthreads(); //following is operations within one block // initialize the dot product for each row in A and vector B t_ve blocksum = 0; //if nB> blockDim, split repeat the //for(int b = bBegin; (b < bEnd)&&((threadIdx.x+b) < bEnd); b += bStep ) { for(int b = bBegin; b < bEnd; b += bStep ) { //initialise Cs Cs[tx] = 0; __syncthreads(); // compute scalar product if (( (gridIndex*gridDim.x+blockIdx.x)<aEnd)&&((b+tx) < bEnd)) { //Cs[threadIdx.x] = A[a + blockIdx.x ][b + threadIdx.x] * B[b + threadIdx.x ]; Cs[threadIdx.x] = A[(a + blockIdx.x)* nB+b + tx] * B[b + tx ]; } __syncthreads(); if(tx == 0){ //30.Nov.2009 fixeded for Cs summe int kEnd = bEnd-b; if(kEnd > VECTOR_BLOCK_SIZE)kEnd = VECTOR_BLOCK_SIZE; //Because I add Cs[0...k], if blockSize and Matrix does not fit, Parts of Cs[k] are not initialized as 0. for (int k = 1; k < kEnd; k++) Cs[0] += Cs[k]; blocksum += Cs[0]; } __syncthreads(); /* int offset; offset = VECTOR_BLOCK_SIZE/2; while (offset > 0) { if(tx < offset) { Cs[tx] += Cs[tx + offset]; } offset >>= 1; __syncthreads(); } __syncthreads(); if(threadIdx.x == 0) blocksum += Cs[0]; //??? blocksum = Cs[0]; }//for b __syncthreads(); */ if(threadIdx.x == 0) C[gridIndex*gridDim.x+blockIdx.x] = blocksum; __syncthreads(); // summe all block, need test for mA bigger than one Grid //idx = gridIndex*gridDim.x + blockIdx.x*blockDim.x+threadIdx.x; } }
002d34d18bf8be9a1cbb371819506e00980943d5.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <framework/gpu_volume.h> #include <framework/helper_cuda.h> #include <framework/profiler.h> #include <framework/volume.h> texture<float, 3, hipReadModeElementType> volume_in; surface<void, 3> volume_out; __global__ void copy_kernel(dim3 volume_size) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int z = blockIdx.z*blockDim.z + threadIdx.z; if (x >= volume_size.x || y >= volume_size.y || z >= volume_size.z) { return; } float v = tex3D(volume_in, x, y, z); surf3Dwrite(v, volume_out, x*sizeof(float), y, z); } namespace sample { void copy_volume(const Volume& in, Volume& out) { PROFILE_SCOPE("copy_volume"); GpuVolume gpu_in, gpu_out; { PROFILE_SCOPE("alloc_and_upload"); // Allocate memory and upload to GPU gpu_in = in.upload(); gpu_out = gpu::allocate_volume( in.voxel_type(), in.size(), gpu::Flag_BindAsSurface); } // Bind arrays as textures and surfaces checkCudaErrors(hipBindTextureToArray(volume_in, gpu_in.ptr, gpu_in.format_desc)); checkCudaErrors(hipBindSurfaceToArray(volume_out, gpu_out.ptr)); // Parameters dim3 block_size(8, 8, 1); dim3 grid_size(in.size().width/8, in.size().height / 8, in.size().depth); // Launch kernel dim3 volume_size = { uint32_t(in.size().width), uint32_t(in.size().height), uint32_t(in.size().depth) }; hipLaunchKernelGGL(( copy_kernel), dim3(grid_size), dim3(block_size), 0, 0, volume_size); getLastCudaError(""); checkCudaErrors(hipDeviceSynchronize()); // Download results out.download(gpu_out); // Cleanup gpu::release_volume(gpu_in); gpu::release_volume(gpu_out); } }
002d34d18bf8be9a1cbb371819506e00980943d5.cu
#include <cuda_runtime.h> #include <framework/gpu_volume.h> #include <framework/helper_cuda.h> #include <framework/profiler.h> #include <framework/volume.h> texture<float, 3, cudaReadModeElementType> volume_in; surface<void, 3> volume_out; __global__ void copy_kernel(dim3 volume_size) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int z = blockIdx.z*blockDim.z + threadIdx.z; if (x >= volume_size.x || y >= volume_size.y || z >= volume_size.z) { return; } float v = tex3D(volume_in, x, y, z); surf3Dwrite(v, volume_out, x*sizeof(float), y, z); } namespace sample { void copy_volume(const Volume& in, Volume& out) { PROFILE_SCOPE("copy_volume"); GpuVolume gpu_in, gpu_out; { PROFILE_SCOPE("alloc_and_upload"); // Allocate memory and upload to GPU gpu_in = in.upload(); gpu_out = gpu::allocate_volume( in.voxel_type(), in.size(), gpu::Flag_BindAsSurface); } // Bind arrays as textures and surfaces checkCudaErrors(cudaBindTextureToArray(volume_in, gpu_in.ptr, gpu_in.format_desc)); checkCudaErrors(cudaBindSurfaceToArray(volume_out, gpu_out.ptr)); // Parameters dim3 block_size(8, 8, 1); dim3 grid_size(in.size().width/8, in.size().height / 8, in.size().depth); // Launch kernel dim3 volume_size = { uint32_t(in.size().width), uint32_t(in.size().height), uint32_t(in.size().depth) }; copy_kernel<<<grid_size, block_size>>>(volume_size); getLastCudaError(""); checkCudaErrors(cudaDeviceSynchronize()); // Download results out.download(gpu_out); // Cleanup gpu::release_volume(gpu_in); gpu::release_volume(gpu_out); } }
0098b1abe78abf6626c793b24a33f5ed27fdba10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "feintrack_d.h" //////////////////////////////////////////////////////////////////////////////// const int MAX_THREADS_PER_BLOCK = 512; const int BACK_BLOCK_SIZE = 16; //////////////////////////////////////////////////////////////////////////////// template<class T> __device__ T sqr_(T val) { return val * val; } //////////////////////////////////////////////////////////////////////// #define CALC_I() gridDim.x * blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x + threadIdx.y * gridDim.x * blockDim.x + threadIdx.x //////////////////////////////////////////////////////////////////////// #define get_b(v) (unsigned char)(0x000000ff & (v)); #define get_g(v) (unsigned char)(0x000000ff & ((v) >> 8)); #define get_r(v) (unsigned char)(0x000000ff & ((v) >> 16)); //////////////////////////////////////////////////////////////////////// __global__ void back_substraction_(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, mask_type *mask, float eps_b, float eps_g, float eps_r) { int i = CALC_I(); int32_t p = bgr32[i]; float b = (float)get_b(p); float g = (float)get_g(p); float r = (float)get_r(p); mask_type res = 1; if (eps_b * params_b_sigma[i] > fabsf(params_b_mu[i] - b) && eps_g * params_g_sigma[i] > fabsf(params_g_mu[i] - g) && eps_r * params_r_sigma[i] > fabsf(params_r_mu[i] - r)) { res = 0; } mask[i] = res; } //////////////////////////////////////////////////////////////////////////////// void back_substraction(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, mask_type *mask, int frame_width, int frame_height, float eps_b, float eps_g, float eps_r) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); hipLaunchKernelGGL(( back_substraction_), dim3(dim_grid), dim3(dim_block), 0, 0, bgr32, params_b_mu, params_b_sigma, params_g_mu, params_g_sigma, params_r_mu, params_r_sigma, mask, eps_b, eps_g, eps_r); } //////////////////////////////////////////////////////////////////////////////// __global__ void back_substraction_gray_(float *p_val, float *params_mu, float *params_sigma, mask_type *mask, float eps) { int i = CALC_I(); mask_type res = 1; if (eps * params_sigma[i] > fabsf(params_mu[i] - p_val[i])) res = 0; mask[i] = res; } //////////////////////////////////////////////////////////////////////////////// void back_substraction_gray(float *p_val, float *params_mu, float *params_sigma, mask_type *mask, int frame_width, int frame_height, float eps) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); hipLaunchKernelGGL(( back_substraction_gray_), dim3(dim_grid), dim3(dim_block), 0, 0, p_val, params_mu, params_sigma, mask, eps); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// __global__ void back_substraction_upd_(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, mask_type *mask, float eps_b, float eps_g, float eps_r, PixelUpdateParams pup) { int i = CALC_I(); int32_t p = bgr32[i]; float b = (float)get_b(p); float g = (float)get_g(p); float r = (float)get_r(p); mask_type res = 1; float b_mu = params_b_mu[i]; float b_sigma = params_b_sigma[i]; float g_mu = params_g_mu[i]; float g_sigma = params_g_sigma[i]; float r_mu = params_r_mu[i]; float r_sigma = params_r_sigma[i]; if (eps_b * b_sigma > fabsf(b_mu - b) && eps_g * g_sigma > fabsf(g_mu - g) && eps_r * r_sigma > fabsf(r_mu - r)) { res = 0; b_mu = (1.f - pup.alpha1) * b_mu + pup.alpha1 * b; g_mu = (1.f - pup.alpha1) * g_mu + pup.alpha1 * g; r_mu = (1.f - pup.alpha1) * r_mu + pup.alpha1 * r; b_sigma = sqrtf((1.f - pup.alpha2) * sqr_(b_sigma) + pup.alpha2 * sqr_(b - b_mu)); g_sigma = sqrtf((1.f - pup.alpha2) * sqr_(g_sigma) + pup.alpha2 * sqr_(g - g_mu)); r_sigma = sqrtf((1.f - pup.alpha2) * sqr_(r_sigma) + pup.alpha2 * sqr_(r - r_mu)); b_sigma = fmaxf(fminf(pup.min_sigma_val, b_sigma), pup.max_sigma_val); g_sigma = fmaxf(fminf(pup.min_sigma_val, g_sigma), pup.max_sigma_val); r_sigma = fmaxf(fminf(pup.min_sigma_val, r_sigma), pup.max_sigma_val); params_b_mu[i] = b_mu; params_b_sigma[i] = b_sigma; params_g_mu[i] = g_mu; params_g_sigma[i] = g_sigma; params_r_mu[i] = r_mu; params_r_sigma[i] = r_sigma; } mask[i] = res; } //////////////////////////////////////////////////////////////////////////////// void back_substraction_upd(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, mask_type *mask, int frame_width, int frame_height, float eps_b, float eps_g, float eps_r, PixelUpdateParams pup) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); hipLaunchKernelGGL(( back_substraction_upd_), dim3(dim_grid), dim3(dim_block), 0, 0, bgr32, params_b_mu, params_b_sigma, params_g_mu, params_g_sigma, params_r_mu, params_r_sigma, mask, eps_b, eps_g, eps_r, pup); } //////////////////////////////////////////////////////////////////////////////// __global__ void back_substraction_gray_upd_(float *p_val, float *params_mu, float *params_sigma, mask_type *mask, float eps, PixelUpdateParams pup) { int i = CALC_I(); float p = p_val[i]; mask_type res = 1; float mu = params_mu[i]; float sigma = params_sigma[i]; if (eps * sigma > fabsf(mu - p)) { res = 0; mu = (1.f - pup.alpha1) * mu + pup.alpha1 * p; sigma = sqrtf((1.f - pup.alpha2) * sqr_(sigma) + pup.alpha2 * sqr_(p - mu)); sigma = fmaxf(fminf(pup.min_sigma_val, sigma), pup.max_sigma_val); params_mu[i] = mu; params_sigma[i] = sigma; } mask[i] = res; } //////////////////////////////////////////////////////////////////////////////// void back_substraction_gray_upd(float *p_val, float *params_mu, float *params_sigma, mask_type *mask, int frame_width, int frame_height, float eps, PixelUpdateParams pup) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); hipLaunchKernelGGL(( back_substraction_gray_upd_), dim3(dim_grid), dim3(dim_block), 0, 0, p_val, params_mu, params_sigma, mask, eps, pup); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// __global__ void back_substraction_mixture_(int32_t* bgr32, BgrndProcess* process1, BgrndProcess* process2, BgrndProcess* process3, int32_t* curr_processes, int32_t* created_processes, mask_type* mask, float eps_b, float eps_g, float eps_r, PixelUpdateParams pup, MixturePixelUpdateParams mup) { int i = CALC_I(); int32_t p = bgr32[i]; float b = (float)get_b(p); float g = (float)get_g(p); float r = (float)get_r(p); mask_type res = 1; bool find_process = false; int32_t curr_proc = curr_processes[i]; int32_t cr_processes = created_processes[i]; BgrndProcess proc_list[3] = { process1[i], process2[i], process3[i] }; for (size_t proc_ind = 0; proc_ind < cr_processes; ++proc_ind) { // , if (eps_b * proc_list[proc_ind].sigma[0] > fabsf(proc_list[proc_ind].mu[0] - b) && eps_g * proc_list[proc_ind].sigma[1] > fabsf(proc_list[proc_ind].mu[1] - g) && eps_r * proc_list[proc_ind].sigma[2] > fabsf(proc_list[proc_ind].mu[2] - r)) { // - curr_proc = proc_ind; // . proc_list[proc_ind].mu[0] = (1.f - pup.alpha1) * proc_list[proc_ind].mu[0] + pup.alpha1 * b; proc_list[proc_ind].mu[1] = (1.f - pup.alpha1) * proc_list[proc_ind].mu[1] + pup.alpha1 * g; proc_list[proc_ind].mu[2] = (1.f - pup.alpha1) * proc_list[proc_ind].mu[2] + pup.alpha1 * r; proc_list[proc_ind].sigma[0] = sqrtf((1.f - pup.alpha2) * sqr_(proc_list[proc_ind].sigma[0]) + pup.alpha2 * sqr_(b - proc_list[proc_ind].mu[0])); proc_list[proc_ind].sigma[1] = sqrtf((1.f - pup.alpha2) * sqr_(proc_list[proc_ind].sigma[1]) + pup.alpha2 * sqr_(g - proc_list[proc_ind].mu[1])); proc_list[proc_ind].sigma[2] = sqrtf((1.f - pup.alpha2) * sqr_(proc_list[proc_ind].sigma[2]) + pup.alpha2 * sqr_(r - proc_list[proc_ind].mu[2])); proc_list[proc_ind].sigma[0] = fmaxf(fminf(pup.min_sigma_val, proc_list[proc_ind].sigma[0]), pup.max_sigma_val); proc_list[proc_ind].sigma[1] = fmaxf(fminf(pup.min_sigma_val, proc_list[proc_ind].sigma[1]), pup.max_sigma_val); proc_list[proc_ind].sigma[2] = fmaxf(fminf(pup.min_sigma_val, proc_list[proc_ind].sigma[2]), pup.max_sigma_val); find_process = true; break; } } if (!find_process) // { // , if (cr_processes < 3) { ++cr_processes; curr_proc = cr_processes - 1; proc_list[curr_proc].mu[0] = b; proc_list[curr_proc].mu[1] = g; proc_list[curr_proc].mu[2] = r; proc_list[curr_proc].sigma[0] = pup.min_sigma_val; proc_list[curr_proc].sigma[1] = pup.min_sigma_val; proc_list[curr_proc].sigma[2] = pup.min_sigma_val; find_process = true; } // 3, else { float min_weight = proc_list[0].weight; size_t min_proc = 0; for (size_t proc_ind = 1; proc_ind < cr_processes; ++proc_ind) { if (proc_list[proc_ind].weight < min_weight) { min_proc = proc_ind; min_weight = proc_list[proc_ind].weight; } } curr_proc = min_proc; proc_list[curr_proc].mu[0] = b; proc_list[curr_proc].mu[1] = g; proc_list[curr_proc].mu[2] = r; proc_list[curr_proc].sigma[0] = pup.min_sigma_val; proc_list[curr_proc].sigma[1] = pup.min_sigma_val; proc_list[curr_proc].sigma[2] = pup.min_sigma_val; } } // if (find_process) { for (size_t proc_ind = 0; proc_ind < cr_processes; ++proc_ind) { proc_list[proc_ind].weight = (1 - mup.alpha3) * proc_list[proc_ind].weight + mup.alpha3 * ((proc_ind == curr_proc) ? 1 : 0); } } if (proc_list[curr_proc].weight > mup.weight_threshold) res = 0; mask[i] = res; curr_processes[i] = curr_proc; created_processes[i] = cr_processes; process1[i] = proc_list[0]; process2[i] = proc_list[1]; process3[i] = proc_list[2]; } //////////////////////////////////////////////////////////////////////////////// void back_substraction_mixture(int32_t* bgr32, BgrndProcess* process1, BgrndProcess* process2, BgrndProcess* process3, int32_t* curr_processes, int32_t* created_processes, mask_type* mask, int frame_width, int frame_height, float eps_b, float eps_g, float eps_r, PixelUpdateParams pup, MixturePixelUpdateParams mup) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); hipLaunchKernelGGL(( back_substraction_mixture_), dim3(dim_grid), dim3(dim_block), 0, 0, bgr32, process1, process2, process3, curr_processes, created_processes, mask, eps_b, eps_g, eps_r, pup, mup); } //////////////////////////////////////////////////////////////////////////////// __global__ void back_substraction_mixture_gray_(float* p_val, BgrndProcess* process1, BgrndProcess* process2, BgrndProcess* process3, int32_t* curr_processes, int32_t* created_processes, mask_type* mask, float eps, PixelUpdateParams pup, MixturePixelUpdateParams mup) { int i = CALC_I(); float p = p_val[i]; mask_type res = 1; bool find_process = false; int32_t curr_proc = curr_processes[i]; int32_t cr_processes = created_processes[i]; BgrndProcess proc_list[3] = { process1[i], process2[i], process3[i] }; for (size_t proc_ind = 0; proc_ind < cr_processes; ++proc_ind) { // , if (eps * proc_list[proc_ind].sigma[0] > fabsf(proc_list[proc_ind].mu[0] - p)) { // - curr_proc = proc_ind; // . proc_list[proc_ind].mu[0] = (1.f - pup.alpha1) * proc_list[proc_ind].mu[0] + pup.alpha1 * p; proc_list[proc_ind].sigma[0] = sqrtf((1.f - pup.alpha2) * sqr_(proc_list[proc_ind].sigma[0]) + pup.alpha2 * sqr_(p - proc_list[proc_ind].mu[0])); proc_list[proc_ind].sigma[0] = fmaxf(fminf(pup.min_sigma_val, proc_list[proc_ind].sigma[0]), pup.max_sigma_val); find_process = true; break; } } if (!find_process) // { // , if (cr_processes < 3) { ++cr_processes; curr_proc = cr_processes - 1; proc_list[curr_proc].mu[0] = p; proc_list[curr_proc].sigma[0] = pup.min_sigma_val; find_process = true; } // 3, else { float min_weight = proc_list[0].weight; size_t min_proc = 0; for (size_t proc_ind = 1; proc_ind < cr_processes; ++proc_ind) { if (proc_list[proc_ind].weight < min_weight) { min_proc = proc_ind; min_weight = proc_list[proc_ind].weight; } } curr_proc = min_proc; proc_list[curr_proc].mu[0] = p; proc_list[curr_proc].sigma[0] = pup.min_sigma_val; } } // if (find_process) { for (size_t proc_ind = 0; proc_ind < cr_processes; ++proc_ind) { proc_list[proc_ind].weight = (1 - mup.alpha3) * proc_list[proc_ind].weight + mup.alpha3 * ((proc_ind == curr_proc) ? 1 : 0); } } if (proc_list[curr_proc].weight > mup.weight_threshold) res = 0; mask[i] = res; curr_processes[i] = curr_proc; created_processes[i] = cr_processes; process1[i] = proc_list[0]; process2[i] = proc_list[1]; process3[i] = proc_list[2]; } //////////////////////////////////////////////////////////////////////////////// void back_substraction_mixture_gray(float* p_val, BgrndProcess* process1, BgrndProcess* process2, BgrndProcess* process3, int32_t* curr_processes, int32_t* created_processes, mask_type* mask, int frame_width, int frame_height, float eps, PixelUpdateParams pup, MixturePixelUpdateParams mup) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); hipLaunchKernelGGL(( back_substraction_mixture_gray_), dim3(dim_grid), dim3(dim_block), 0, 0, p_val, process1, process2, process3, curr_processes, created_processes, mask, eps, pup, mup); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// __global__ void reset_statistic_(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, int frame_width, int left, int right, int top, int bottom, float max_sigma_val) { int i = blockIdx.x * blockDim.x + threadIdx.x; int reset_pixels = (right - left + 1) * (bottom - top + 1); if (i < reset_pixels) { i += left + frame_width * top; int32_t p = bgr32[i]; float b = (float)get_b(p); float g = (float)get_g(p); float r = (float)get_r(p); params_b_mu[i] = b; params_g_mu[i] = g; params_r_mu[i] = r; params_b_sigma[i] = max_sigma_val; params_g_sigma[i] = max_sigma_val; params_r_sigma[i] = max_sigma_val; } } //////////////////////////////////////////////////////////////////////////////// void reset_statistic(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, int frame_width, int left, int right, int top, int bottom, float max_sigma_val) { int reset_pixels = (right - left + 1) * (bottom - top + 1); reset_pixels += MAX_THREADS_PER_BLOCK - reset_pixels % MAX_THREADS_PER_BLOCK; dim3 dim_block = dim3(MAX_THREADS_PER_BLOCK, 1); dim3 dim_grid = dim3(reset_pixels / dim_block.x, 1); hipLaunchKernelGGL(( reset_statistic_), dim3(dim_grid), dim3(dim_block), 0, 0, bgr32, params_b_mu, params_b_sigma, params_g_mu, params_g_sigma, params_r_mu, params_r_sigma, frame_width, left, right, top, bottom, max_sigma_val); } //////////////////////////////////////////////////////////////////////////////// __global__ void reset_statistic_gray_(float *p_val, float *params_mu, float *params_sigma, int frame_width, int left, int right, int top, int bottom, float max_sigma_val) { int i = blockIdx.x * blockDim.x + threadIdx.x; int reset_pixels = (right - left + 1) * (bottom - top + 1); if (i < reset_pixels) { i += left + frame_width * top; params_mu[i] = p_val[i]; params_sigma[i] = max_sigma_val; } } //////////////////////////////////////////////////////////////////////////////// void reset_statistic_gray(float *p_val, float *params_mu, float *params_sigma, int frame_width, int left, int right, int top, int bottom, float max_sigma_val) { int reset_pixels = (right - left + 1) * (bottom - top + 1); reset_pixels += MAX_THREADS_PER_BLOCK - reset_pixels % MAX_THREADS_PER_BLOCK; dim3 dim_block = dim3(MAX_THREADS_PER_BLOCK, 1); dim3 dim_grid = dim3(reset_pixels / dim_block.x, 1); hipLaunchKernelGGL(( reset_statistic_gray_), dim3(dim_grid), dim3(dim_block), 0, 0, p_val, params_mu, params_sigma, frame_width, left, right, top, bottom, max_sigma_val); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// __global__ void update_statistic_(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, int frame_width, int left, int right, int top, int bottom, PixelUpdateParams pup) { int i = blockIdx.x * blockDim.x + threadIdx.x; int reset_pixels = (right - left + 1) * (bottom - top + 1); if (i < reset_pixels) { i += left + frame_width * top; float b_mu = params_b_mu[i]; float b_sigma = params_b_sigma[i]; float g_mu = params_g_mu[i]; float g_sigma = params_g_sigma[i]; float r_mu = params_r_mu[i]; float r_sigma = params_r_sigma[i]; int32_t p = bgr32[i]; float b = (float)get_b(p); float g = (float)get_g(p); float r = (float)get_r(p); b_mu = (1.f - pup.alpha1) * b_mu + pup.alpha1 * b; g_mu = (1.f - pup.alpha1) * g_mu + pup.alpha1 * g; r_mu = (1.f - pup.alpha1) * r_mu + pup.alpha1 * r; b_sigma = sqrtf((1.f - pup.alpha2) * sqr_(b_sigma) + pup.alpha2 * sqr_(b - b_mu)); g_sigma = sqrtf((1.f - pup.alpha2) * sqr_(g_sigma) + pup.alpha2 * sqr_(g - g_mu)); r_sigma = sqrtf((1.f - pup.alpha2) * sqr_(r_sigma) + pup.alpha2 * sqr_(r - r_mu)); b_sigma = fmaxf(fminf(pup.min_sigma_val, b_sigma), pup.max_sigma_val); g_sigma = fmaxf(fminf(pup.min_sigma_val, g_sigma), pup.max_sigma_val); r_sigma = fmaxf(fminf(pup.min_sigma_val, r_sigma), pup.max_sigma_val); params_b_mu[i] = b_mu; params_b_sigma[i] = b_sigma; params_g_mu[i] = g_mu; params_g_sigma[i] = g_sigma; params_r_mu[i] = r_mu; params_r_sigma[i] = r_sigma; } } //////////////////////////////////////////////////////////////////////////////// void update_statistic(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, int frame_width, int left, int right, int top, int bottom, PixelUpdateParams pup) { int reset_pixels = (right - left + 1) * (bottom - top + 1); reset_pixels += MAX_THREADS_PER_BLOCK - reset_pixels % MAX_THREADS_PER_BLOCK; dim3 dim_block = dim3(MAX_THREADS_PER_BLOCK, 1); dim3 dim_grid = dim3(reset_pixels / dim_block.x, 1); hipLaunchKernelGGL(( update_statistic_), dim3(dim_grid), dim3(dim_block), 0, 0, bgr32, params_b_mu, params_b_sigma, params_g_mu, params_g_sigma, params_r_mu, params_r_sigma, frame_width, left, right, top, bottom, pup); } //////////////////////////////////////////////////////////////////////////////// __global__ void update_statistic_gray_(float *p_val, float *params_mu, float *params_sigma, int frame_width, int left, int right, int top, int bottom, PixelUpdateParams pup) { int i = blockIdx.x * blockDim.x + threadIdx.x; int reset_pixels = (right - left + 1) * (bottom - top + 1); if (i < reset_pixels) { i += left + frame_width * top; float mu = params_mu[i]; float sigma = params_sigma[i]; float p = p_val[i]; mu = (1.f - pup.alpha1) * mu + pup.alpha1 * p; sigma = sqrtf((1.f - pup.alpha2) * sqr_(sigma) + pup.alpha2 * sqr_(p - mu)); sigma = fmaxf(fminf(pup.min_sigma_val, sigma), pup.max_sigma_val); params_mu[i] = mu; params_sigma[i] = sigma; } } //////////////////////////////////////////////////////////////////////////////// void update_statistic_gray(float *p_val, float *params_mu, float *params_sigma, int frame_width, int left, int right, int top, int bottom, PixelUpdateParams pup) { int reset_pixels = (right - left + 1) * (bottom - top + 1); reset_pixels += MAX_THREADS_PER_BLOCK - reset_pixels % MAX_THREADS_PER_BLOCK; dim3 dim_block = dim3(MAX_THREADS_PER_BLOCK, 1); dim3 dim_grid = dim3(reset_pixels / dim_block.x, 1); hipLaunchKernelGGL(( update_statistic_gray_), dim3(dim_grid), dim3(dim_block), 0, 0, p_val, params_mu, params_sigma, frame_width, left, right, top, bottom, pup); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // - 33 __global__ void morphology_(mask_type *mask, mask_type *mask_temp, int frame_width, int frame_height, unsigned int pixels) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ((i < frame_width) || (i >= pixels - frame_width) || (i % frame_width == 0) || (i % (frame_width - 1) == 0)) { mask_temp[i] = 0; } else { // mask_temp[i] = mask[i - frame_width - 1] & mask[i - frame_width] & mask[i - frame_width + 1] & mask[i - 1] & mask[i] & mask[i + 1] & mask[i + frame_width - 1] & mask[i + frame_width] & mask[i + frame_width + 1]; __syncthreads(); // if (mask_temp[i]) { mask[i - frame_width - 1] = 1; mask[i - frame_width] = 1; mask[i - frame_width + 1] = 1; mask[i - 1] = 1; mask[i] = 1; mask[i + 1] = 1; mask[i + frame_width - 1] = 1; mask[i + frame_width] = 1; mask[i + frame_width + 1] = 1; } else { mask[i] = 0; } } } //////////////////////////////////////////////////////////////////////////////// void morphology(mask_type *mask, mask_type *mask_temp, int frame_width, int frame_height, unsigned int pixels) { dim3 dim_block = dim3(MAX_THREADS_PER_BLOCK, 1); dim3 dim_grid = dim3(pixels / dim_block.x, 1); hipLaunchKernelGGL(( morphology_), dim3(dim_grid), dim3(dim_block), 0, 0, mask, mask_temp, frame_width, frame_height, pixels); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// #if 1 __global__ void segmentation_(mask_type *mask, reg_label *regions) { int tx = threadIdx.x; int ty = threadIdx.y; int mi = gridDim.x * blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x + ty * gridDim.x * blockDim.x + tx; __shared__ mask_type data[SEGM_BLOCK_SIZE][SEGM_BLOCK_SIZE]; data[tx][ty] = mask[mi]; __syncthreads(); if ((tx == 0) && (ty == 0)) { mask_type s = 0; for (int i = 0; i < SEGM_BLOCK_SIZE; ++i) { for (int j = 0; j < SEGM_BLOCK_SIZE; ++j) { s += data[j][i]; } } int bi = blockIdx.x + blockIdx.y * gridDim.x; regions[bi] = (reg_label)s; } } //////////////////////////////////////////////////////////////////////////////// void segmentation(mask_type *mask, reg_label *regions, int frame_width, int frame_height) { dim3 dim_block = dim3(SEGM_BLOCK_SIZE, SEGM_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); hipLaunchKernelGGL(( segmentation_), dim3(dim_grid), dim3(dim_block), 0, 0, mask, regions); } #else __global__ void segmentation_(mask_type *mask, reg_label *regions) { int tx = threadIdx.x; int ty = threadIdx.y; int mi = CALC_I(); __shared__ mask_type data1[BACK_BLOCK_SIZE][BACK_BLOCK_SIZE]; data1[tx][ty] = mask[mi]; __syncthreads(); __shared__ mask_type data2[SEGM_BLOCK_SIZE][SEGM_BLOCK_SIZE]; if (!tx && !ty) { for (int y = 0; y < SEGM_BLOCK_SIZE; ++y) { for (int x = 0; x < SEGM_BLOCK_SIZE; ++x) { data2[x][y] = 0; } } } __syncthreads(); if (!tx) { int y = ty / SEGM_BLOCK_SIZE; for (int x = 0; x < BACK_BLOCK_SIZE; ++x) { data2[x / SEGM_BLOCK_SIZE][y] += data1[x][ty]; } } __syncthreads(); if (((tx % SEGM_BLOCK_SIZE) == 0) || ((ty % SEGM_BLOCK_SIZE) == 0)) { const int rx = tx / SEGM_BLOCK_SIZE; const int ry = ty / SEGM_BLOCK_SIZE; const int xk = BACK_BLOCK_SIZE / SEGM_BLOCK_SIZE; const int yk = BACK_BLOCK_SIZE / SEGM_BLOCK_SIZE; int bi = xk * gridDim.x * blockIdx.y * yk + blockIdx.x * xk + ry * gridDim.x * xk + rx; regions[bi] = (reg_label)data2[rx][ry]; } } //////////////////////////////////////////////////////////////////////////////// void segmentation(mask_type *mask, reg_label *regions, int frame_width, int frame_height) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); hipLaunchKernelGGL(( segmentation_), dim3(dim_grid), dim3(dim_block), 0, 0, mask, regions); } #endif ////////////////////////////////////////////////////////////////////////////////
0098b1abe78abf6626c793b24a33f5ed27fdba10.cu
#include "feintrack_d.h" //////////////////////////////////////////////////////////////////////////////// const int MAX_THREADS_PER_BLOCK = 512; const int BACK_BLOCK_SIZE = 16; //////////////////////////////////////////////////////////////////////////////// template<class T> __device__ T sqr_(T val) { return val * val; } //////////////////////////////////////////////////////////////////////// #define CALC_I() gridDim.x * blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x + threadIdx.y * gridDim.x * blockDim.x + threadIdx.x //////////////////////////////////////////////////////////////////////// #define get_b(v) (unsigned char)(0x000000ff & (v)); #define get_g(v) (unsigned char)(0x000000ff & ((v) >> 8)); #define get_r(v) (unsigned char)(0x000000ff & ((v) >> 16)); //////////////////////////////////////////////////////////////////////// __global__ void back_substraction_(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, mask_type *mask, float eps_b, float eps_g, float eps_r) { int i = CALC_I(); int32_t p = bgr32[i]; float b = (float)get_b(p); float g = (float)get_g(p); float r = (float)get_r(p); mask_type res = 1; if (eps_b * params_b_sigma[i] > fabsf(params_b_mu[i] - b) && eps_g * params_g_sigma[i] > fabsf(params_g_mu[i] - g) && eps_r * params_r_sigma[i] > fabsf(params_r_mu[i] - r)) { res = 0; } mask[i] = res; } //////////////////////////////////////////////////////////////////////////////// void back_substraction(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, mask_type *mask, int frame_width, int frame_height, float eps_b, float eps_g, float eps_r) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); back_substraction_<<<dim_grid, dim_block>>>(bgr32, params_b_mu, params_b_sigma, params_g_mu, params_g_sigma, params_r_mu, params_r_sigma, mask, eps_b, eps_g, eps_r); } //////////////////////////////////////////////////////////////////////////////// __global__ void back_substraction_gray_(float *p_val, float *params_mu, float *params_sigma, mask_type *mask, float eps) { int i = CALC_I(); mask_type res = 1; if (eps * params_sigma[i] > fabsf(params_mu[i] - p_val[i])) res = 0; mask[i] = res; } //////////////////////////////////////////////////////////////////////////////// void back_substraction_gray(float *p_val, float *params_mu, float *params_sigma, mask_type *mask, int frame_width, int frame_height, float eps) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); back_substraction_gray_<<<dim_grid, dim_block>>>(p_val, params_mu, params_sigma, mask, eps); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// __global__ void back_substraction_upd_(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, mask_type *mask, float eps_b, float eps_g, float eps_r, PixelUpdateParams pup) { int i = CALC_I(); int32_t p = bgr32[i]; float b = (float)get_b(p); float g = (float)get_g(p); float r = (float)get_r(p); mask_type res = 1; float b_mu = params_b_mu[i]; float b_sigma = params_b_sigma[i]; float g_mu = params_g_mu[i]; float g_sigma = params_g_sigma[i]; float r_mu = params_r_mu[i]; float r_sigma = params_r_sigma[i]; if (eps_b * b_sigma > fabsf(b_mu - b) && eps_g * g_sigma > fabsf(g_mu - g) && eps_r * r_sigma > fabsf(r_mu - r)) { res = 0; b_mu = (1.f - pup.alpha1) * b_mu + pup.alpha1 * b; g_mu = (1.f - pup.alpha1) * g_mu + pup.alpha1 * g; r_mu = (1.f - pup.alpha1) * r_mu + pup.alpha1 * r; b_sigma = sqrtf((1.f - pup.alpha2) * sqr_(b_sigma) + pup.alpha2 * sqr_(b - b_mu)); g_sigma = sqrtf((1.f - pup.alpha2) * sqr_(g_sigma) + pup.alpha2 * sqr_(g - g_mu)); r_sigma = sqrtf((1.f - pup.alpha2) * sqr_(r_sigma) + pup.alpha2 * sqr_(r - r_mu)); b_sigma = fmaxf(fminf(pup.min_sigma_val, b_sigma), pup.max_sigma_val); g_sigma = fmaxf(fminf(pup.min_sigma_val, g_sigma), pup.max_sigma_val); r_sigma = fmaxf(fminf(pup.min_sigma_val, r_sigma), pup.max_sigma_val); params_b_mu[i] = b_mu; params_b_sigma[i] = b_sigma; params_g_mu[i] = g_mu; params_g_sigma[i] = g_sigma; params_r_mu[i] = r_mu; params_r_sigma[i] = r_sigma; } mask[i] = res; } //////////////////////////////////////////////////////////////////////////////// void back_substraction_upd(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, mask_type *mask, int frame_width, int frame_height, float eps_b, float eps_g, float eps_r, PixelUpdateParams pup) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); back_substraction_upd_<<<dim_grid, dim_block>>>(bgr32, params_b_mu, params_b_sigma, params_g_mu, params_g_sigma, params_r_mu, params_r_sigma, mask, eps_b, eps_g, eps_r, pup); } //////////////////////////////////////////////////////////////////////////////// __global__ void back_substraction_gray_upd_(float *p_val, float *params_mu, float *params_sigma, mask_type *mask, float eps, PixelUpdateParams pup) { int i = CALC_I(); float p = p_val[i]; mask_type res = 1; float mu = params_mu[i]; float sigma = params_sigma[i]; if (eps * sigma > fabsf(mu - p)) { res = 0; mu = (1.f - pup.alpha1) * mu + pup.alpha1 * p; sigma = sqrtf((1.f - pup.alpha2) * sqr_(sigma) + pup.alpha2 * sqr_(p - mu)); sigma = fmaxf(fminf(pup.min_sigma_val, sigma), pup.max_sigma_val); params_mu[i] = mu; params_sigma[i] = sigma; } mask[i] = res; } //////////////////////////////////////////////////////////////////////////////// void back_substraction_gray_upd(float *p_val, float *params_mu, float *params_sigma, mask_type *mask, int frame_width, int frame_height, float eps, PixelUpdateParams pup) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); back_substraction_gray_upd_<<<dim_grid, dim_block>>>(p_val, params_mu, params_sigma, mask, eps, pup); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// __global__ void back_substraction_mixture_(int32_t* bgr32, BgrndProcess* process1, BgrndProcess* process2, BgrndProcess* process3, int32_t* curr_processes, int32_t* created_processes, mask_type* mask, float eps_b, float eps_g, float eps_r, PixelUpdateParams pup, MixturePixelUpdateParams mup) { int i = CALC_I(); int32_t p = bgr32[i]; float b = (float)get_b(p); float g = (float)get_g(p); float r = (float)get_r(p); mask_type res = 1; bool find_process = false; int32_t curr_proc = curr_processes[i]; int32_t cr_processes = created_processes[i]; BgrndProcess proc_list[3] = { process1[i], process2[i], process3[i] }; for (size_t proc_ind = 0; proc_ind < cr_processes; ++proc_ind) { // Ищем процесс, который лучше соответствует текущему значению пикселя if (eps_b * proc_list[proc_ind].sigma[0] > fabsf(proc_list[proc_ind].mu[0] - b) && eps_g * proc_list[proc_ind].sigma[1] > fabsf(proc_list[proc_ind].mu[1] - g) && eps_r * proc_list[proc_ind].sigma[2] > fabsf(proc_list[proc_ind].mu[2] - r)) { // Процесс найден - уточняем его параметры curr_proc = proc_ind; // Оценки мат. ожидания и дисперсии обновляются с помощью низкочастотного фильтра рекурсивного сглаживания proc_list[proc_ind].mu[0] = (1.f - pup.alpha1) * proc_list[proc_ind].mu[0] + pup.alpha1 * b; proc_list[proc_ind].mu[1] = (1.f - pup.alpha1) * proc_list[proc_ind].mu[1] + pup.alpha1 * g; proc_list[proc_ind].mu[2] = (1.f - pup.alpha1) * proc_list[proc_ind].mu[2] + pup.alpha1 * r; proc_list[proc_ind].sigma[0] = sqrtf((1.f - pup.alpha2) * sqr_(proc_list[proc_ind].sigma[0]) + pup.alpha2 * sqr_(b - proc_list[proc_ind].mu[0])); proc_list[proc_ind].sigma[1] = sqrtf((1.f - pup.alpha2) * sqr_(proc_list[proc_ind].sigma[1]) + pup.alpha2 * sqr_(g - proc_list[proc_ind].mu[1])); proc_list[proc_ind].sigma[2] = sqrtf((1.f - pup.alpha2) * sqr_(proc_list[proc_ind].sigma[2]) + pup.alpha2 * sqr_(r - proc_list[proc_ind].mu[2])); proc_list[proc_ind].sigma[0] = fmaxf(fminf(pup.min_sigma_val, proc_list[proc_ind].sigma[0]), pup.max_sigma_val); proc_list[proc_ind].sigma[1] = fmaxf(fminf(pup.min_sigma_val, proc_list[proc_ind].sigma[1]), pup.max_sigma_val); proc_list[proc_ind].sigma[2] = fmaxf(fminf(pup.min_sigma_val, proc_list[proc_ind].sigma[2]), pup.max_sigma_val); find_process = true; break; } } if (!find_process) // Процесс не найден { // Создаём новый процесс или, if (cr_processes < 3) { ++cr_processes; curr_proc = cr_processes - 1; proc_list[curr_proc].mu[0] = b; proc_list[curr_proc].mu[1] = g; proc_list[curr_proc].mu[2] = r; proc_list[curr_proc].sigma[0] = pup.min_sigma_val; proc_list[curr_proc].sigma[1] = pup.min_sigma_val; proc_list[curr_proc].sigma[2] = pup.min_sigma_val; find_process = true; } // если количество процессов равно 3, ищем процесс с наименьшим весом else { float min_weight = proc_list[0].weight; size_t min_proc = 0; for (size_t proc_ind = 1; proc_ind < cr_processes; ++proc_ind) { if (proc_list[proc_ind].weight < min_weight) { min_proc = proc_ind; min_weight = proc_list[proc_ind].weight; } } curr_proc = min_proc; proc_list[curr_proc].mu[0] = b; proc_list[curr_proc].mu[1] = g; proc_list[curr_proc].mu[2] = r; proc_list[curr_proc].sigma[0] = pup.min_sigma_val; proc_list[curr_proc].sigma[1] = pup.min_sigma_val; proc_list[curr_proc].sigma[2] = pup.min_sigma_val; } } // Обновление весов процессов if (find_process) { for (size_t proc_ind = 0; proc_ind < cr_processes; ++proc_ind) { proc_list[proc_ind].weight = (1 - mup.alpha3) * proc_list[proc_ind].weight + mup.alpha3 * ((proc_ind == curr_proc) ? 1 : 0); } } if (proc_list[curr_proc].weight > mup.weight_threshold) res = 0; mask[i] = res; curr_processes[i] = curr_proc; created_processes[i] = cr_processes; process1[i] = proc_list[0]; process2[i] = proc_list[1]; process3[i] = proc_list[2]; } //////////////////////////////////////////////////////////////////////////////// void back_substraction_mixture(int32_t* bgr32, BgrndProcess* process1, BgrndProcess* process2, BgrndProcess* process3, int32_t* curr_processes, int32_t* created_processes, mask_type* mask, int frame_width, int frame_height, float eps_b, float eps_g, float eps_r, PixelUpdateParams pup, MixturePixelUpdateParams mup) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); back_substraction_mixture_<<<dim_grid, dim_block>>>(bgr32, process1, process2, process3, curr_processes, created_processes, mask, eps_b, eps_g, eps_r, pup, mup); } //////////////////////////////////////////////////////////////////////////////// __global__ void back_substraction_mixture_gray_(float* p_val, BgrndProcess* process1, BgrndProcess* process2, BgrndProcess* process3, int32_t* curr_processes, int32_t* created_processes, mask_type* mask, float eps, PixelUpdateParams pup, MixturePixelUpdateParams mup) { int i = CALC_I(); float p = p_val[i]; mask_type res = 1; bool find_process = false; int32_t curr_proc = curr_processes[i]; int32_t cr_processes = created_processes[i]; BgrndProcess proc_list[3] = { process1[i], process2[i], process3[i] }; for (size_t proc_ind = 0; proc_ind < cr_processes; ++proc_ind) { // Ищем процесс, который лучше соответствует текущему значению пикселя if (eps * proc_list[proc_ind].sigma[0] > fabsf(proc_list[proc_ind].mu[0] - p)) { // Процесс найден - уточняем его параметры curr_proc = proc_ind; // Оценки мат. ожидания и дисперсии обновляются с помощью низкочастотного фильтра рекурсивного сглаживания proc_list[proc_ind].mu[0] = (1.f - pup.alpha1) * proc_list[proc_ind].mu[0] + pup.alpha1 * p; proc_list[proc_ind].sigma[0] = sqrtf((1.f - pup.alpha2) * sqr_(proc_list[proc_ind].sigma[0]) + pup.alpha2 * sqr_(p - proc_list[proc_ind].mu[0])); proc_list[proc_ind].sigma[0] = fmaxf(fminf(pup.min_sigma_val, proc_list[proc_ind].sigma[0]), pup.max_sigma_val); find_process = true; break; } } if (!find_process) // Процесс не найден { // Создаём новый процесс или, if (cr_processes < 3) { ++cr_processes; curr_proc = cr_processes - 1; proc_list[curr_proc].mu[0] = p; proc_list[curr_proc].sigma[0] = pup.min_sigma_val; find_process = true; } // если количество процессов равно 3, ищем процесс с наименьшим весом else { float min_weight = proc_list[0].weight; size_t min_proc = 0; for (size_t proc_ind = 1; proc_ind < cr_processes; ++proc_ind) { if (proc_list[proc_ind].weight < min_weight) { min_proc = proc_ind; min_weight = proc_list[proc_ind].weight; } } curr_proc = min_proc; proc_list[curr_proc].mu[0] = p; proc_list[curr_proc].sigma[0] = pup.min_sigma_val; } } // Обновление весов процессов if (find_process) { for (size_t proc_ind = 0; proc_ind < cr_processes; ++proc_ind) { proc_list[proc_ind].weight = (1 - mup.alpha3) * proc_list[proc_ind].weight + mup.alpha3 * ((proc_ind == curr_proc) ? 1 : 0); } } if (proc_list[curr_proc].weight > mup.weight_threshold) res = 0; mask[i] = res; curr_processes[i] = curr_proc; created_processes[i] = cr_processes; process1[i] = proc_list[0]; process2[i] = proc_list[1]; process3[i] = proc_list[2]; } //////////////////////////////////////////////////////////////////////////////// void back_substraction_mixture_gray(float* p_val, BgrndProcess* process1, BgrndProcess* process2, BgrndProcess* process3, int32_t* curr_processes, int32_t* created_processes, mask_type* mask, int frame_width, int frame_height, float eps, PixelUpdateParams pup, MixturePixelUpdateParams mup) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); back_substraction_mixture_gray_<<<dim_grid, dim_block>>>(p_val, process1, process2, process3, curr_processes, created_processes, mask, eps, pup, mup); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// __global__ void reset_statistic_(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, int frame_width, int left, int right, int top, int bottom, float max_sigma_val) { int i = blockIdx.x * blockDim.x + threadIdx.x; int reset_pixels = (right - left + 1) * (bottom - top + 1); if (i < reset_pixels) { i += left + frame_width * top; int32_t p = bgr32[i]; float b = (float)get_b(p); float g = (float)get_g(p); float r = (float)get_r(p); params_b_mu[i] = b; params_g_mu[i] = g; params_r_mu[i] = r; params_b_sigma[i] = max_sigma_val; params_g_sigma[i] = max_sigma_val; params_r_sigma[i] = max_sigma_val; } } //////////////////////////////////////////////////////////////////////////////// void reset_statistic(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, int frame_width, int left, int right, int top, int bottom, float max_sigma_val) { int reset_pixels = (right - left + 1) * (bottom - top + 1); reset_pixels += MAX_THREADS_PER_BLOCK - reset_pixels % MAX_THREADS_PER_BLOCK; dim3 dim_block = dim3(MAX_THREADS_PER_BLOCK, 1); dim3 dim_grid = dim3(reset_pixels / dim_block.x, 1); reset_statistic_<<<dim_grid, dim_block>>>(bgr32, params_b_mu, params_b_sigma, params_g_mu, params_g_sigma, params_r_mu, params_r_sigma, frame_width, left, right, top, bottom, max_sigma_val); } //////////////////////////////////////////////////////////////////////////////// __global__ void reset_statistic_gray_(float *p_val, float *params_mu, float *params_sigma, int frame_width, int left, int right, int top, int bottom, float max_sigma_val) { int i = blockIdx.x * blockDim.x + threadIdx.x; int reset_pixels = (right - left + 1) * (bottom - top + 1); if (i < reset_pixels) { i += left + frame_width * top; params_mu[i] = p_val[i]; params_sigma[i] = max_sigma_val; } } //////////////////////////////////////////////////////////////////////////////// void reset_statistic_gray(float *p_val, float *params_mu, float *params_sigma, int frame_width, int left, int right, int top, int bottom, float max_sigma_val) { int reset_pixels = (right - left + 1) * (bottom - top + 1); reset_pixels += MAX_THREADS_PER_BLOCK - reset_pixels % MAX_THREADS_PER_BLOCK; dim3 dim_block = dim3(MAX_THREADS_PER_BLOCK, 1); dim3 dim_grid = dim3(reset_pixels / dim_block.x, 1); reset_statistic_gray_<<<dim_grid, dim_block>>>(p_val, params_mu, params_sigma, frame_width, left, right, top, bottom, max_sigma_val); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// __global__ void update_statistic_(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, int frame_width, int left, int right, int top, int bottom, PixelUpdateParams pup) { int i = blockIdx.x * blockDim.x + threadIdx.x; int reset_pixels = (right - left + 1) * (bottom - top + 1); if (i < reset_pixels) { i += left + frame_width * top; float b_mu = params_b_mu[i]; float b_sigma = params_b_sigma[i]; float g_mu = params_g_mu[i]; float g_sigma = params_g_sigma[i]; float r_mu = params_r_mu[i]; float r_sigma = params_r_sigma[i]; int32_t p = bgr32[i]; float b = (float)get_b(p); float g = (float)get_g(p); float r = (float)get_r(p); b_mu = (1.f - pup.alpha1) * b_mu + pup.alpha1 * b; g_mu = (1.f - pup.alpha1) * g_mu + pup.alpha1 * g; r_mu = (1.f - pup.alpha1) * r_mu + pup.alpha1 * r; b_sigma = sqrtf((1.f - pup.alpha2) * sqr_(b_sigma) + pup.alpha2 * sqr_(b - b_mu)); g_sigma = sqrtf((1.f - pup.alpha2) * sqr_(g_sigma) + pup.alpha2 * sqr_(g - g_mu)); r_sigma = sqrtf((1.f - pup.alpha2) * sqr_(r_sigma) + pup.alpha2 * sqr_(r - r_mu)); b_sigma = fmaxf(fminf(pup.min_sigma_val, b_sigma), pup.max_sigma_val); g_sigma = fmaxf(fminf(pup.min_sigma_val, g_sigma), pup.max_sigma_val); r_sigma = fmaxf(fminf(pup.min_sigma_val, r_sigma), pup.max_sigma_val); params_b_mu[i] = b_mu; params_b_sigma[i] = b_sigma; params_g_mu[i] = g_mu; params_g_sigma[i] = g_sigma; params_r_mu[i] = r_mu; params_r_sigma[i] = r_sigma; } } //////////////////////////////////////////////////////////////////////////////// void update_statistic(int32_t *bgr32, float *params_b_mu, float *params_b_sigma, float *params_g_mu, float *params_g_sigma, float *params_r_mu, float *params_r_sigma, int frame_width, int left, int right, int top, int bottom, PixelUpdateParams pup) { int reset_pixels = (right - left + 1) * (bottom - top + 1); reset_pixels += MAX_THREADS_PER_BLOCK - reset_pixels % MAX_THREADS_PER_BLOCK; dim3 dim_block = dim3(MAX_THREADS_PER_BLOCK, 1); dim3 dim_grid = dim3(reset_pixels / dim_block.x, 1); update_statistic_<<<dim_grid, dim_block>>>(bgr32, params_b_mu, params_b_sigma, params_g_mu, params_g_sigma, params_r_mu, params_r_sigma, frame_width, left, right, top, bottom, pup); } //////////////////////////////////////////////////////////////////////////////// __global__ void update_statistic_gray_(float *p_val, float *params_mu, float *params_sigma, int frame_width, int left, int right, int top, int bottom, PixelUpdateParams pup) { int i = blockIdx.x * blockDim.x + threadIdx.x; int reset_pixels = (right - left + 1) * (bottom - top + 1); if (i < reset_pixels) { i += left + frame_width * top; float mu = params_mu[i]; float sigma = params_sigma[i]; float p = p_val[i]; mu = (1.f - pup.alpha1) * mu + pup.alpha1 * p; sigma = sqrtf((1.f - pup.alpha2) * sqr_(sigma) + pup.alpha2 * sqr_(p - mu)); sigma = fmaxf(fminf(pup.min_sigma_val, sigma), pup.max_sigma_val); params_mu[i] = mu; params_sigma[i] = sigma; } } //////////////////////////////////////////////////////////////////////////////// void update_statistic_gray(float *p_val, float *params_mu, float *params_sigma, int frame_width, int left, int right, int top, int bottom, PixelUpdateParams pup) { int reset_pixels = (right - left + 1) * (bottom - top + 1); reset_pixels += MAX_THREADS_PER_BLOCK - reset_pixels % MAX_THREADS_PER_BLOCK; dim3 dim_block = dim3(MAX_THREADS_PER_BLOCK, 1); dim3 dim_grid = dim3(reset_pixels / dim_block.x, 1); update_statistic_gray_<<<dim_grid, dim_block>>>(p_val, params_mu, params_sigma, frame_width, left, right, top, bottom, pup); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Структурный элемент - прямоугольник 3х3 __global__ void morphology_(mask_type *mask, mask_type *mask_temp, int frame_width, int frame_height, unsigned int pixels) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ((i < frame_width) || (i >= pixels - frame_width) || (i % frame_width == 0) || (i % (frame_width - 1) == 0)) { mask_temp[i] = 0; } else { // Эрозия mask_temp[i] = mask[i - frame_width - 1] & mask[i - frame_width] & mask[i - frame_width + 1] & mask[i - 1] & mask[i] & mask[i + 1] & mask[i + frame_width - 1] & mask[i + frame_width] & mask[i + frame_width + 1]; __syncthreads(); // Наращивание if (mask_temp[i]) { mask[i - frame_width - 1] = 1; mask[i - frame_width] = 1; mask[i - frame_width + 1] = 1; mask[i - 1] = 1; mask[i] = 1; mask[i + 1] = 1; mask[i + frame_width - 1] = 1; mask[i + frame_width] = 1; mask[i + frame_width + 1] = 1; } else { mask[i] = 0; } } } //////////////////////////////////////////////////////////////////////////////// void morphology(mask_type *mask, mask_type *mask_temp, int frame_width, int frame_height, unsigned int pixels) { dim3 dim_block = dim3(MAX_THREADS_PER_BLOCK, 1); dim3 dim_grid = dim3(pixels / dim_block.x, 1); morphology_<<<dim_grid, dim_block>>>(mask, mask_temp, frame_width, frame_height, pixels); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// #if 1 __global__ void segmentation_(mask_type *mask, reg_label *regions) { int tx = threadIdx.x; int ty = threadIdx.y; int mi = gridDim.x * blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x + ty * gridDim.x * blockDim.x + tx; __shared__ mask_type data[SEGM_BLOCK_SIZE][SEGM_BLOCK_SIZE]; data[tx][ty] = mask[mi]; __syncthreads(); if ((tx == 0) && (ty == 0)) { mask_type s = 0; for (int i = 0; i < SEGM_BLOCK_SIZE; ++i) { for (int j = 0; j < SEGM_BLOCK_SIZE; ++j) { s += data[j][i]; } } int bi = blockIdx.x + blockIdx.y * gridDim.x; regions[bi] = (reg_label)s; } } //////////////////////////////////////////////////////////////////////////////// void segmentation(mask_type *mask, reg_label *regions, int frame_width, int frame_height) { dim3 dim_block = dim3(SEGM_BLOCK_SIZE, SEGM_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); segmentation_<<<dim_grid, dim_block>>>(mask, regions); } #else __global__ void segmentation_(mask_type *mask, reg_label *regions) { int tx = threadIdx.x; int ty = threadIdx.y; int mi = CALC_I(); __shared__ mask_type data1[BACK_BLOCK_SIZE][BACK_BLOCK_SIZE]; data1[tx][ty] = mask[mi]; __syncthreads(); __shared__ mask_type data2[SEGM_BLOCK_SIZE][SEGM_BLOCK_SIZE]; if (!tx && !ty) { for (int y = 0; y < SEGM_BLOCK_SIZE; ++y) { for (int x = 0; x < SEGM_BLOCK_SIZE; ++x) { data2[x][y] = 0; } } } __syncthreads(); if (!tx) { int y = ty / SEGM_BLOCK_SIZE; for (int x = 0; x < BACK_BLOCK_SIZE; ++x) { data2[x / SEGM_BLOCK_SIZE][y] += data1[x][ty]; } } __syncthreads(); if (((tx % SEGM_BLOCK_SIZE) == 0) || ((ty % SEGM_BLOCK_SIZE) == 0)) { const int rx = tx / SEGM_BLOCK_SIZE; const int ry = ty / SEGM_BLOCK_SIZE; const int xk = BACK_BLOCK_SIZE / SEGM_BLOCK_SIZE; const int yk = BACK_BLOCK_SIZE / SEGM_BLOCK_SIZE; int bi = xk * gridDim.x * blockIdx.y * yk + blockIdx.x * xk + ry * gridDim.x * xk + rx; regions[bi] = (reg_label)data2[rx][ry]; } } //////////////////////////////////////////////////////////////////////////////// void segmentation(mask_type *mask, reg_label *regions, int frame_width, int frame_height) { dim3 dim_block = dim3(BACK_BLOCK_SIZE, BACK_BLOCK_SIZE); dim3 dim_grid = dim3(frame_width / dim_block.x, frame_height / dim_block.y); segmentation_<<<dim_grid, dim_block>>>(mask, regions); } #endif ////////////////////////////////////////////////////////////////////////////////
75ca5c6560278ae3090748be777eec9a28814c3b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************************* Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish **********************************************************************************/ #ifndef _KERNEL_H_ #define _KERNEL_H_ __global__ void Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int8_t* g_cost, int no_of_nodes) { int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; if( tid<no_of_nodes && g_graph_mask[tid]) { g_graph_mask[tid]=false; for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++) { int id = g_graph_edges[i]; if(!g_graph_visited[id]) { g_cost[id]=g_cost[tid]+1; g_updating_graph_mask[id]=true; } } } } #endif
75ca5c6560278ae3090748be777eec9a28814c3b.cu
/********************************************************************************* Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish **********************************************************************************/ #ifndef _KERNEL_H_ #define _KERNEL_H_ __global__ void Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int8_t* g_cost, int no_of_nodes) { int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; if( tid<no_of_nodes && g_graph_mask[tid]) { g_graph_mask[tid]=false; for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++) { int id = g_graph_edges[i]; if(!g_graph_visited[id]) { g_cost[id]=g_cost[tid]+1; g_updating_graph_mask[id]=true; } } } } #endif
cc3bc1b477f67c3d1b1f647064bf8170817e9385.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hertz_constants.h" #include "hertz_cudaneighlist.h" #include "pair_interaction.h" #include "framework.h" #include "thrust/scan.h" #ifdef TRACE #warning TRACE enabled: timing will not be accurate #include "cuPrintf.hip" #endif #ifndef MAX_GRID_DIM #error You need to #define MAX_GRID_DIM (see Makefile.config) #endif dim3 get_grid(int nelements, int block_size=BLOCK_SIZE) { int nx = (nelements + block_size - 1) / block_size; if (nx < MAX_GRID_DIM) { return dim3(nx, 1, 1); } int ny = (nx + MAX_GRID_DIM - 1) / MAX_GRID_DIM; if (ny < MAX_GRID_DIM) { return dim3(MAX_GRID_DIM, ny, 1); } assert(false); } __device__ int get_gid() { return threadIdx.x + (blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.x * gridDim.x); } // -------------------------------------------------------------------------- // UNPACK PER-PARTICLE DATA // -------------------------------------------------------------------------- __global__ void unpack_ro_data( int K, int *valid, int *dati, int *datj, double *radius, double *radiusi, double *radiusj, double *mass, double *massi, double *massj, int *type, int *typei, int *typej ) { int gid = get_gid(); if (gid < K && valid[gid]) { int i = dati[gid]; int j = datj[gid]; radiusi[gid] = radius[i]; radiusj[gid] = radius[j]; massi[gid] = mass[i]; massj[gid] = mass[j]; typei[gid] = type[i]; typej[gid] = type[j]; } } __global__ void unpack_reload_data( int K, int *valid, int *dati, int *datj, double *x, double *xi, double *xj, double *v, double *vi, double *vj, double *omega, double *omegai, double *omegaj ) { int gid = get_gid(); if (gid < K && valid[gid]) { int i = dati[gid]; int j = datj[gid]; xi[(gid*3)+0] = x[(i*3)+0]; xj[(gid*3)+0] = x[(j*3)+0]; xi[(gid*3)+1] = x[(i*3)+1]; xj[(gid*3)+1] = x[(j*3)+1]; xi[(gid*3)+2] = x[(i*3)+2]; xj[(gid*3)+2] = x[(j*3)+2]; vi[(gid*3)+0] = v[(i*3)+0]; vj[(gid*3)+0] = v[(j*3)+0]; vi[(gid*3)+1] = v[(i*3)+1]; vj[(gid*3)+1] = v[(j*3)+1]; vi[(gid*3)+2] = v[(i*3)+2]; vj[(gid*3)+2] = v[(j*3)+2]; omegai[(gid*3)+0] = omega[(i*3)+0]; omegaj[(gid*3)+0] = omega[(j*3)+0]; omegai[(gid*3)+1] = omega[(i*3)+1]; omegaj[(gid*3)+1] = omega[(j*3)+1]; omegai[(gid*3)+2] = omega[(i*3)+2]; omegaj[(gid*3)+2] = omega[(j*3)+2]; } } __global__ void test( //inputs int K, int *valid, double *xi, double *xj, double *radiusi, double *radiusj, //output int *filter, int *filtermiss ) { int gid = get_gid(); if (gid < K && valid[gid]) { // del is the vector from j to i double delx = xi[(gid*3)+0] - xj[(gid*3)+0]; double dely = xi[(gid*3)+1] - xj[(gid*3)+1]; double delz = xi[(gid*3)+2] - xj[(gid*3)+2]; double rsq = delx*delx + dely*dely + delz*delz; double radsum = radiusi[gid] + radiusj[gid]; filter[gid] = (rsq < radsum*radsum) ? 1 : 0; filtermiss[gid] = (rsq < radsum*radsum) ? 0 : 1; } } __global__ void mksubset( int K, int *filter, int *filtermiss, int *offset, int *offsetmiss, //output int *hit, int *miss ) { int gid = get_gid(); if (gid < K && filter[gid]) { hit[offset[gid]] = gid; } if (gid < K && filtermiss[gid]) { miss[offsetmiss[gid]] = gid; } } __global__ void anticompute( //inputs int NMISS, int *miss, //outputs double *shear ) { int gid = get_gid(); if (gid < NMISS) { int idx = miss[gid]; shear[(idx*3) ] = 0.0; shear[(idx*3)+1] = 0.0; shear[(idx*3)+2] = 0.0; } } __global__ void compute( //inputs int NHIT, int *hit, #ifdef TRACE int *dati, int *datj, #endif double *xi, double *xj, double *vi, double *vj, double *omegai, double *omegaj, double *radiusi, double *radiusj, double *massi, double *massj, int *typei, int *typej, //inouts double *fdelta, double *tdeltai, double *tdeltaj, double *shear ) { int gid = get_gid(); if (gid < NHIT) { int idx = hit[gid]; pair_interaction( #ifdef TRACE dati[idx], datj[idx], #endif &xi[idx*3], &xj[idx*3], &vi[idx*3], &vj[idx*3], &omegai[idx*3], &omegaj[idx*3], radiusi[idx], radiusj[idx], massi[idx], massj[idx], typei[idx], typej[idx], &shear[idx*3], &fdelta[idx*3], /*fdeltaj is*/NULL, &tdeltai[idx*3], &tdeltaj[idx*3] ); } } __global__ void collect( //inputs int N, double *fdelta, double *tdeltai, double *tdeltaj, int *off, int *len, #if HALFNL int *tad, int *ffo, int *nel, #endif //inouts double *force, double *torque ) { int gid = get_gid(); double fsum[3] = {0,0,0}; double tsum[3] = {0,0,0}; if (gid < N) { int offset = off[gid]; for (int k=0; k<len[gid]; k++) { int idx = offset+k; fsum[0] += fdelta[(idx*3)+0]; fsum[1] += fdelta[(idx*3)+1]; fsum[2] += fdelta[(idx*3)+2]; tsum[0] += tdeltai[(idx*3)+0]; tsum[1] += tdeltai[(idx*3)+1]; tsum[2] += tdeltai[(idx*3)+2]; } #if HALFNL offset = ffo[gid]; for (int k=0; k<nel[gid]; k++) { int idx = tad[offset+k]; fsum[0] -= fdelta[(idx*3)+0]; fsum[1] -= fdelta[(idx*3)+1]; fsum[2] -= fdelta[(idx*3)+2]; tsum[0] += tdeltaj[(idx*3)+0]; tsum[1] += tdeltaj[(idx*3)+1]; tsum[2] += tdeltaj[(idx*3)+2]; } #endif force[(gid*3)] += fsum[0]; force[(gid*3)+1] += fsum[1]; force[(gid*3)+2] += fsum[2]; torque[(gid*3)] += tsum[0]; torque[(gid*3)+1] += tsum[1]; torque[(gid*3)+2] += tsum[2]; } } using namespace std; // DEVICE STRUCTURES // INPUTS // packed // unpacked(i) // unpacked(j) double *d_x; double *d_xi; double *d_xj; // ] reload double *d_v; double *d_vi; double *d_vj; // ] double *d_omega; double *d_omegai; double *d_omegaj; // ] double *d_radius; double *d_radiusi; double *d_radiusj; // ] ro double *d_mass; double *d_massi; double *d_massj; // ] int *d_type; int *d_typei; int *d_typej; // ] // OUTPUTS // packed // unpacked(i) // unpacked(j) double *d_force; double *d_fdelta; double *d_torque; double *d_tdeltai; double *d_tdeltaj; // d_shear in d_nl // SUBSET int *d_filter; int *d_filtermiss; int *d_offset; int *d_offsetmiss; int *d_hit; int *d_miss; void no_cuda_error(const char *errmsg) { hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("[ERROR] %s\n", errmsg); printf("[ERROR] %d: %s\n", err, hipGetErrorString(err)); size_t free; size_t total; if (hipMemGetInfo(&free, &total) == hipSuccess) { printf("[ERROR] mem free=%zubytes total=%zubytes\n", free, total); } exit(1); } } #define NLEN(type,arity) (nparticles*arity*sizeof(type)) #define KLEN(type,arity) (nneighbors*arity*sizeof(type)) void init_dev_structures(int nparticles, int nneighbors) { //packed hipMalloc((void **)&d_x, NLEN(double,3)); hipMalloc((void **)&d_v, NLEN(double,3)); hipMalloc((void **)&d_omega, NLEN(double,3)); hipMalloc((void **)&d_radius, NLEN(double,1)); hipMalloc((void **)&d_mass, NLEN(double,1)); hipMalloc((void **)&d_type, NLEN(int, 1)); //unpacked(i) hipMalloc((void **)&d_xi, KLEN(double,3)); hipMalloc((void **)&d_vi, KLEN(double,3)); hipMalloc((void **)&d_omegai, KLEN(double,3)); hipMalloc((void **)&d_radiusi, KLEN(double,1)); hipMalloc((void **)&d_massi, KLEN(double,1)); hipMalloc((void **)&d_typei, KLEN(int ,1)); //unpacked(j) hipMalloc((void **)&d_xj, KLEN(double,3)); hipMalloc((void **)&d_vj, KLEN(double,3)); hipMalloc((void **)&d_omegaj, KLEN(double,3)); hipMalloc((void **)&d_radiusj, KLEN(double,1)); hipMalloc((void **)&d_massj, KLEN(double,1)); hipMalloc((void **)&d_typej, KLEN(int ,1)); //outputs hipMalloc((void **)&d_force, NLEN(double,3)); hipMalloc((void **)&d_torque, NLEN(double,3)); hipMalloc((void **)&d_fdelta, KLEN(double,3)); hipMalloc((void **)&d_tdeltai, KLEN(double,3)); hipMalloc((void **)&d_tdeltaj, KLEN(double,3)); //subset hipMalloc((void **)&d_filter, KLEN(int,1)); hipMalloc((void **)&d_offset, KLEN(int,1)); hipMalloc((void **)&d_hit, KLEN(int,1)); hipMalloc((void **)&d_filtermiss, KLEN(int,1)); hipMalloc((void **)&d_offsetmiss, KLEN(int,1)); hipMalloc((void **)&d_miss, KLEN(int,1)); } void free_dev_structures() { //packed hipFree(d_x); hipFree(d_v); hipFree(d_omega); hipFree(d_radius); hipFree(d_mass); hipFree(d_type); //unpacked(i) hipFree(d_xi); hipFree(d_vi); hipFree(d_omegai); hipFree(d_radiusi); hipFree(d_massi); hipFree(d_typei); //unpacked(j) hipFree(d_xj); hipFree(d_vj); hipFree(d_omegaj); hipFree(d_radiusj); hipFree(d_massj); hipFree(d_typej); //outputs hipFree(d_force); hipFree(d_torque); hipFree(d_fdelta); hipFree(d_tdeltai); hipFree(d_tdeltaj); //subset hipFree(d_filter); hipFree(d_offset); hipFree(d_hit); hipFree(d_miss); } void run(struct params *input, int num_iter) { NeighListLike *nl = new NeighListLike(input); int block_size = BLOCK_SIZE; int nparticles = input->nnode; dim3 tpa_grid_size = get_grid(nparticles); int nneighbors = nl->maxpage * nl->pgsize; dim3 tpn_grid_size = get_grid(nneighbors); #if DEBUG printf("block_size = %d\n", block_size); printf("nparticles = %d\n", nparticles); printf("nneighbors = %d -> %d (maxpage=%d, pgsize=%d)\n", input->nedge, nneighbors, nl->maxpage, nl->pgsize); printf("tpa_grid = { %d, %d, %d }\n", tpa_grid_size.x, tpa_grid_size.y, tpa_grid_size.z); printf("tpn_grid = { %d, %d, %d }\n", tpn_grid_size.x, tpn_grid_size.y, tpn_grid_size.z); #endif //ONE-TIME COSTS one_time.push_back(SimpleTimer("hertz_consts")); one_time.back().start(); setup_hertz_constants(input); one_time.back().stop_and_add_to_total(); no_cuda_error("hertz_constants"); one_time.push_back(SimpleTimer("init_nl")); one_time.back().start(); HertzCudaNeighList *d_nl = new HertzCudaNeighList( block_size, input->nnode, nl->maxpage, nl->pgsize); one_time.back().stop_and_add_to_total(); no_cuda_error("init_nl"); one_time.push_back(SimpleTimer("malloc")); one_time.back().start(); init_dev_structures(nparticles, nneighbors); one_time.back().stop_and_add_to_total(); no_cuda_error("init_dev_structures"); one_time.push_back(SimpleTimer("memcpy")); one_time.back().start(); hipMemcpy(d_force, input->force, NLEN(double,3), hipMemcpyHostToDevice); hipMemcpy(d_torque, input->torque, NLEN(double,3), hipMemcpyHostToDevice); one_time.back().stop_and_add_to_total(); no_cuda_error("memcpy"); //NL-REFRESH COSTS nl_refresh.push_back(SimpleTimer("nl_reload")); nl_refresh.back().start(); d_nl->reload( nl->numneigh, nl->firstneigh, nl->pages, nl->maxpage, nl->dpages, nl->tpages); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("nl_reload"); nl_refresh.push_back(SimpleTimer("memcpy_unpack")); nl_refresh.back().start(); hipMemcpy(d_radius, input->radius, NLEN(double,1), hipMemcpyHostToDevice); hipMemcpy(d_mass, input->mass, NLEN(double,1), hipMemcpyHostToDevice); hipMemcpy(d_type, input->type, NLEN(int,1), hipMemcpyHostToDevice); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("memcpy_unpack"); nl_refresh.push_back(SimpleTimer("unpack_ro")); nl_refresh.back().start(); hipLaunchKernelGGL(( unpack_ro_data), dim3(tpn_grid_size), dim3(block_size), 0, 0, nneighbors, d_nl->d_valid, d_nl->d_dati, d_nl->d_neighidx, d_radius, d_radiusi, d_radiusj, d_mass, d_massi, d_massj, d_type, d_typei, d_typej ); hipDeviceSynchronize(); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("unpack_ro"); // PER-ITER COSTS per_iter.push_back(SimpleTimer("memcpy_reload")); per_iter.push_back(SimpleTimer("unpack_reload")); per_iter.push_back(SimpleTimer("memset_delta")); per_iter.push_back(SimpleTimer("compute")); per_iter.push_back(SimpleTimer("collect")); per_iter.push_back(SimpleTimer("memcpy_results")); per_iter.push_back(SimpleTimer("mksubset")); per_iter.push_back(SimpleTimer("anticompute")); for (int i=0; i<(int)per_iter.size(); i++) { per_iter_timings.push_back(vector<double>(num_iter)); } double *force = new double[nparticles*3]; double *torque = new double[nparticles*3]; for (int run=0; run<num_iter; run++) { //make copies nl->restore(); d_nl->load_shear(nl->dpages); no_cuda_error("make_copies"); end_to_end.start(); //load data onto device per_iter[0].start(); hipMemcpy(d_x, input->x, NLEN(double,3), hipMemcpyHostToDevice); hipMemcpy(d_v, input->v, NLEN(double,3), hipMemcpyHostToDevice); hipMemcpy(d_omega, input->omega, NLEN(double,3), hipMemcpyHostToDevice); hipMemcpy(d_force, input->force, NLEN(double,3), hipMemcpyHostToDevice); hipMemcpy(d_torque, input->torque, NLEN(double,3), hipMemcpyHostToDevice); double d0 = per_iter[0].stop_and_add_to_total(); per_iter_timings[0][run] = d0; no_cuda_error("memcpy_reload"); //TODO: check if realloc of unpacked ij data necessary per_iter[1].start(); hipLaunchKernelGGL(( unpack_reload_data), dim3(tpn_grid_size), dim3(block_size), 0, 0, nneighbors, d_nl->d_valid, d_nl->d_dati, d_nl->d_neighidx, d_x, d_xi, d_xj, d_v, d_vi, d_vj, d_omega, d_omegai, d_omegaj ); hipDeviceSynchronize(); double d1 = per_iter[1].stop_and_add_to_total(); per_iter_timings[1][run] = d1; no_cuda_error("unpack_reload"); per_iter[2].start(); hipMemset(d_fdelta, 0, KLEN(double,3)); hipMemset(d_tdeltai, 0, KLEN(double,3)); hipMemset(d_tdeltaj, 0, KLEN(double,3)); double d2 = per_iter[2].stop_and_add_to_total(); per_iter_timings[2][run] = d2; no_cuda_error("memset_delta"); //make subset per_iter[6].start(); hipMemset(d_filter, 0, KLEN(int,1)); hipMemset(d_filtermiss, 0, KLEN(int,1)); hipLaunchKernelGGL(( test), dim3(tpn_grid_size), dim3(block_size), 0, 0, //inputs nneighbors, d_nl->d_valid, d_xi, d_xj, d_radiusi, d_radiusj, //outputs d_filter, d_filtermiss); thrust::device_ptr<int> thrust_filter(d_filter); thrust::device_ptr<int> thrust_offset(d_offset); thrust::exclusive_scan(thrust_filter, thrust_filter + nneighbors, thrust_offset); thrust::device_ptr<int> thrust_filtermiss(d_filtermiss); thrust::device_ptr<int> thrust_offsetmiss(d_offsetmiss); thrust::exclusive_scan(thrust_filtermiss, thrust_filtermiss + nneighbors, thrust_offsetmiss); hipLaunchKernelGGL(( mksubset), dim3(tpn_grid_size), dim3(block_size), 0, 0, //inputs nneighbors, d_filter, d_filtermiss, d_offset, d_offsetmiss, //output d_hit, d_miss); int nhit; hipMemcpy(&nhit, &(d_offset[nneighbors-1]), sizeof(int), hipMemcpyDeviceToHost); int nmiss; hipMemcpy(&nmiss, &(d_offsetmiss[nneighbors-1]), sizeof(int), hipMemcpyDeviceToHost); double d6 = per_iter[6].stop_and_add_to_total(); per_iter_timings[6][run] = d6; dim3 nhit_grid_size = get_grid(nhit); dim3 nmiss_grid_size = get_grid(nmiss); no_cuda_error("mksubset"); #if DEBUG printf("nhit = %d\n", nhit); printf("nhit_grid = { %d, %d, %d }\n", nhit_grid_size.x, nhit_grid_size.y, nhit_grid_size.z); printf("nmiss = %d\n", nmiss); printf("nmiss_grid = { %d, %d, %d }\n", nmiss_grid_size.x, nmiss_grid_size.y, nmiss_grid_size.z); #endif #ifdef PARANOID { int *valid = new int[nneighbors]; double *xi = new double[nneighbors*3]; double *xj = new double[nneighbors*3]; double *radiusi = new double[nneighbors]; double *radiusj = new double[nneighbors]; hipMemcpy(valid, d_nl->d_valid, KLEN(int,1), hipMemcpyDeviceToHost); hipMemcpy(xi, d_xi, KLEN(double,3), hipMemcpyDeviceToHost); hipMemcpy(xj, d_xj, KLEN(double,3), hipMemcpyDeviceToHost); hipMemcpy(radiusi, d_radiusi, KLEN(double,1), hipMemcpyDeviceToHost); hipMemcpy(radiusj, d_radiusj, KLEN(double,1), hipMemcpyDeviceToHost); //emulate test kernel int *filter = new int[nneighbors]; int *filtermiss = new int[nneighbors]; for (int gid=0; gid<nneighbors; gid++) { if (valid[gid]) { double delx = xi[(gid*3)+0] - xj[(gid*3)+0]; double dely = xi[(gid*3)+1] - xj[(gid*3)+1]; double delz = xi[(gid*3)+2] - xj[(gid*3)+2]; double rsq = delx*delx + dely*dely + delz*delz; double radsum = radiusi[gid] + radiusj[gid]; filter[gid] = (rsq < radsum*radsum) ? 1 : 0; filtermiss[gid] = (rsq < radsum*radsum) ? 0 : 1; } } //emulate exclusive scan int *offset = new int[nneighbors]; int *offsetmiss = new int[nneighbors]; offset[0] = 0; offsetmiss[0] = 0; for (int i=1; i<nneighbors; i++) { offset[i] = offset[i-1] + filter[i-1]; offsetmiss[i] = offsetmiss[i-1] + filtermiss[i-1]; } //emuate mksubset kernel int *hit = new int[nneighbors]; int *miss = new int[nneighbors]; for (int gid=0; gid<nneighbors; gid++) { if (filter[gid]) { hit[offset[gid]] = gid; } else { miss[offsetmiss[gid]] = gid; } } //test int *gpu_filter = new int[nneighbors]; int *gpu_offset = new int[nneighbors]; int *gpu_hit = new int[nneighbors]; int *gpu_filtermiss = new int[nneighbors]; int *gpu_offsetmiss = new int[nneighbors]; int *gpu_miss = new int[nneighbors]; hipMemcpy(gpu_filter, d_filter, KLEN(int,1), hipMemcpyDeviceToHost); hipMemcpy(gpu_offset, d_offset, KLEN(int,1), hipMemcpyDeviceToHost); hipMemcpy(gpu_hit, d_hit, KLEN(int,1), hipMemcpyDeviceToHost); hipMemcpy(gpu_filtermiss, d_filtermiss, KLEN(int,1), hipMemcpyDeviceToHost); hipMemcpy(gpu_offsetmiss, d_offsetmiss, KLEN(int,1), hipMemcpyDeviceToHost); hipMemcpy(gpu_miss, d_miss, KLEN(int,1), hipMemcpyDeviceToHost); for (int i=0; i<nneighbors; i++) { assert(filter[i] == gpu_filter[i]); assert(offset[i] == gpu_offset[i]); assert(filtermiss[i] == gpu_filtermiss[i]); assert(offsetmiss[i] == gpu_offsetmiss[i]); } printf("nhit=%d expected=%d\n", nhit, offset[nneighbors-1]); assert(nhit == offset[nneighbors-1]); for (int i=0; i<nhit; i++) { if (hit[i] != gpu_hit[i]) { printf("ERROR hit[%d] = %d gpu_hit[] = %d\n", i, hit[i], gpu_hit[i]); } assert(hit[i] == gpu_hit[i]); assert(miss[i] == gpu_miss[i]); } delete[] valid; delete[] xi; delete[] xj; delete[] radiusi; delete[] radiusj; delete[] filter; delete[] offset; delete[] hit; delete[] filtermiss; delete[] offsetmiss; delete[] miss; } #endif per_iter[7].start(); hipLaunchKernelGGL(( anticompute), dim3(nmiss_grid_size), dim3(block_size), 0, 0, nmiss, d_miss, d_nl->d_shear ); hipDeviceSynchronize(); double d7 = per_iter[7].stop_and_add_to_total(); per_iter_timings[7][run] = d7; no_cuda_error("compute"); per_iter[3].start(); #ifdef TRACE cudaPrintfInit(); #endif hipLaunchKernelGGL(( compute), dim3(nhit_grid_size), dim3(block_size), 0, 0, nhit, d_hit, #ifdef TRACE d_nl->d_dati, d_nl->d_neighidx, #endif d_xi, d_xj, d_vi, d_vj, d_omegai, d_omegaj, d_radiusi, d_radiusj, d_massi, d_massj, d_typei, d_typej, //outputs d_fdelta, d_tdeltai, d_tdeltaj, d_nl->d_shear ); hipDeviceSynchronize(); double d3 = per_iter[3].stop_and_add_to_total(); per_iter_timings[3][run] = d3; no_cuda_error("compute"); #ifdef TRACE cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); #endif per_iter[4].start(); hipLaunchKernelGGL(( collect), dim3(tpa_grid_size), dim3(block_size), 0, 0, nparticles, d_fdelta, d_tdeltai, d_tdeltaj, d_nl->d_offset, d_nl->d_numneigh, #if HALFNL d_nl->d_tad, d_nl->d_ffo, d_nl->d_nel, #endif d_force, d_torque); hipDeviceSynchronize(); double d4 = per_iter[4].stop_and_add_to_total(); per_iter_timings[4][run] = d4; no_cuda_error("collect"); //offload data from device //(see note on shear history below) per_iter[5].start(); hipMemcpy(force, d_force, NLEN(double,3), hipMemcpyDeviceToHost); hipMemcpy(torque, d_torque, NLEN(double,3), hipMemcpyDeviceToHost); double d5 = per_iter[5].stop_and_add_to_total(); per_iter_timings[5][run] = d5; no_cuda_error("memcpy_results"); double dend = end_to_end.stop_and_add_to_total(); end_to_end_timings.push_back(dend); //NB: we assume that shear history is *not* required from the device //so this cost is not included in "memcpy_results" d_nl->unload_shear(nl->dpages); check_result(input, nl, force, torque, nl->firstdouble, /*threshold=*/0.5, /*verbose=*/false, /*die_on_flag=*/true); } delete[] force; delete[] torque; free_dev_structures(); no_cuda_error("free_dev_structures"); }
cc3bc1b477f67c3d1b1f647064bf8170817e9385.cu
#include "hertz_constants.h" #include "hertz_cudaneighlist.h" #include "pair_interaction.h" #include "framework.h" #include "thrust/scan.h" #ifdef TRACE #warning TRACE enabled: timing will not be accurate #include "cuPrintf.cu" #endif #ifndef MAX_GRID_DIM #error You need to #define MAX_GRID_DIM (see Makefile.config) #endif dim3 get_grid(int nelements, int block_size=BLOCK_SIZE) { int nx = (nelements + block_size - 1) / block_size; if (nx < MAX_GRID_DIM) { return dim3(nx, 1, 1); } int ny = (nx + MAX_GRID_DIM - 1) / MAX_GRID_DIM; if (ny < MAX_GRID_DIM) { return dim3(MAX_GRID_DIM, ny, 1); } assert(false); } __device__ int get_gid() { return threadIdx.x + (blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.x * gridDim.x); } // -------------------------------------------------------------------------- // UNPACK PER-PARTICLE DATA // -------------------------------------------------------------------------- __global__ void unpack_ro_data( int K, int *valid, int *dati, int *datj, double *radius, double *radiusi, double *radiusj, double *mass, double *massi, double *massj, int *type, int *typei, int *typej ) { int gid = get_gid(); if (gid < K && valid[gid]) { int i = dati[gid]; int j = datj[gid]; radiusi[gid] = radius[i]; radiusj[gid] = radius[j]; massi[gid] = mass[i]; massj[gid] = mass[j]; typei[gid] = type[i]; typej[gid] = type[j]; } } __global__ void unpack_reload_data( int K, int *valid, int *dati, int *datj, double *x, double *xi, double *xj, double *v, double *vi, double *vj, double *omega, double *omegai, double *omegaj ) { int gid = get_gid(); if (gid < K && valid[gid]) { int i = dati[gid]; int j = datj[gid]; xi[(gid*3)+0] = x[(i*3)+0]; xj[(gid*3)+0] = x[(j*3)+0]; xi[(gid*3)+1] = x[(i*3)+1]; xj[(gid*3)+1] = x[(j*3)+1]; xi[(gid*3)+2] = x[(i*3)+2]; xj[(gid*3)+2] = x[(j*3)+2]; vi[(gid*3)+0] = v[(i*3)+0]; vj[(gid*3)+0] = v[(j*3)+0]; vi[(gid*3)+1] = v[(i*3)+1]; vj[(gid*3)+1] = v[(j*3)+1]; vi[(gid*3)+2] = v[(i*3)+2]; vj[(gid*3)+2] = v[(j*3)+2]; omegai[(gid*3)+0] = omega[(i*3)+0]; omegaj[(gid*3)+0] = omega[(j*3)+0]; omegai[(gid*3)+1] = omega[(i*3)+1]; omegaj[(gid*3)+1] = omega[(j*3)+1]; omegai[(gid*3)+2] = omega[(i*3)+2]; omegaj[(gid*3)+2] = omega[(j*3)+2]; } } __global__ void test( //inputs int K, int *valid, double *xi, double *xj, double *radiusi, double *radiusj, //output int *filter, int *filtermiss ) { int gid = get_gid(); if (gid < K && valid[gid]) { // del is the vector from j to i double delx = xi[(gid*3)+0] - xj[(gid*3)+0]; double dely = xi[(gid*3)+1] - xj[(gid*3)+1]; double delz = xi[(gid*3)+2] - xj[(gid*3)+2]; double rsq = delx*delx + dely*dely + delz*delz; double radsum = radiusi[gid] + radiusj[gid]; filter[gid] = (rsq < radsum*radsum) ? 1 : 0; filtermiss[gid] = (rsq < radsum*radsum) ? 0 : 1; } } __global__ void mksubset( int K, int *filter, int *filtermiss, int *offset, int *offsetmiss, //output int *hit, int *miss ) { int gid = get_gid(); if (gid < K && filter[gid]) { hit[offset[gid]] = gid; } if (gid < K && filtermiss[gid]) { miss[offsetmiss[gid]] = gid; } } __global__ void anticompute( //inputs int NMISS, int *miss, //outputs double *shear ) { int gid = get_gid(); if (gid < NMISS) { int idx = miss[gid]; shear[(idx*3) ] = 0.0; shear[(idx*3)+1] = 0.0; shear[(idx*3)+2] = 0.0; } } __global__ void compute( //inputs int NHIT, int *hit, #ifdef TRACE int *dati, int *datj, #endif double *xi, double *xj, double *vi, double *vj, double *omegai, double *omegaj, double *radiusi, double *radiusj, double *massi, double *massj, int *typei, int *typej, //inouts double *fdelta, double *tdeltai, double *tdeltaj, double *shear ) { int gid = get_gid(); if (gid < NHIT) { int idx = hit[gid]; pair_interaction( #ifdef TRACE dati[idx], datj[idx], #endif &xi[idx*3], &xj[idx*3], &vi[idx*3], &vj[idx*3], &omegai[idx*3], &omegaj[idx*3], radiusi[idx], radiusj[idx], massi[idx], massj[idx], typei[idx], typej[idx], &shear[idx*3], &fdelta[idx*3], /*fdeltaj is*/NULL, &tdeltai[idx*3], &tdeltaj[idx*3] ); } } __global__ void collect( //inputs int N, double *fdelta, double *tdeltai, double *tdeltaj, int *off, int *len, #if HALFNL int *tad, int *ffo, int *nel, #endif //inouts double *force, double *torque ) { int gid = get_gid(); double fsum[3] = {0,0,0}; double tsum[3] = {0,0,0}; if (gid < N) { int offset = off[gid]; for (int k=0; k<len[gid]; k++) { int idx = offset+k; fsum[0] += fdelta[(idx*3)+0]; fsum[1] += fdelta[(idx*3)+1]; fsum[2] += fdelta[(idx*3)+2]; tsum[0] += tdeltai[(idx*3)+0]; tsum[1] += tdeltai[(idx*3)+1]; tsum[2] += tdeltai[(idx*3)+2]; } #if HALFNL offset = ffo[gid]; for (int k=0; k<nel[gid]; k++) { int idx = tad[offset+k]; fsum[0] -= fdelta[(idx*3)+0]; fsum[1] -= fdelta[(idx*3)+1]; fsum[2] -= fdelta[(idx*3)+2]; tsum[0] += tdeltaj[(idx*3)+0]; tsum[1] += tdeltaj[(idx*3)+1]; tsum[2] += tdeltaj[(idx*3)+2]; } #endif force[(gid*3)] += fsum[0]; force[(gid*3)+1] += fsum[1]; force[(gid*3)+2] += fsum[2]; torque[(gid*3)] += tsum[0]; torque[(gid*3)+1] += tsum[1]; torque[(gid*3)+2] += tsum[2]; } } using namespace std; // DEVICE STRUCTURES // INPUTS // packed // unpacked(i) // unpacked(j) double *d_x; double *d_xi; double *d_xj; // ] reload double *d_v; double *d_vi; double *d_vj; // ] double *d_omega; double *d_omegai; double *d_omegaj; // ] double *d_radius; double *d_radiusi; double *d_radiusj; // ] ro double *d_mass; double *d_massi; double *d_massj; // ] int *d_type; int *d_typei; int *d_typej; // ] // OUTPUTS // packed // unpacked(i) // unpacked(j) double *d_force; double *d_fdelta; double *d_torque; double *d_tdeltai; double *d_tdeltaj; // d_shear in d_nl // SUBSET int *d_filter; int *d_filtermiss; int *d_offset; int *d_offsetmiss; int *d_hit; int *d_miss; void no_cuda_error(const char *errmsg) { cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("[ERROR] %s\n", errmsg); printf("[ERROR] %d: %s\n", err, cudaGetErrorString(err)); size_t free; size_t total; if (cudaMemGetInfo(&free, &total) == cudaSuccess) { printf("[ERROR] mem free=%zubytes total=%zubytes\n", free, total); } exit(1); } } #define NLEN(type,arity) (nparticles*arity*sizeof(type)) #define KLEN(type,arity) (nneighbors*arity*sizeof(type)) void init_dev_structures(int nparticles, int nneighbors) { //packed cudaMalloc((void **)&d_x, NLEN(double,3)); cudaMalloc((void **)&d_v, NLEN(double,3)); cudaMalloc((void **)&d_omega, NLEN(double,3)); cudaMalloc((void **)&d_radius, NLEN(double,1)); cudaMalloc((void **)&d_mass, NLEN(double,1)); cudaMalloc((void **)&d_type, NLEN(int, 1)); //unpacked(i) cudaMalloc((void **)&d_xi, KLEN(double,3)); cudaMalloc((void **)&d_vi, KLEN(double,3)); cudaMalloc((void **)&d_omegai, KLEN(double,3)); cudaMalloc((void **)&d_radiusi, KLEN(double,1)); cudaMalloc((void **)&d_massi, KLEN(double,1)); cudaMalloc((void **)&d_typei, KLEN(int ,1)); //unpacked(j) cudaMalloc((void **)&d_xj, KLEN(double,3)); cudaMalloc((void **)&d_vj, KLEN(double,3)); cudaMalloc((void **)&d_omegaj, KLEN(double,3)); cudaMalloc((void **)&d_radiusj, KLEN(double,1)); cudaMalloc((void **)&d_massj, KLEN(double,1)); cudaMalloc((void **)&d_typej, KLEN(int ,1)); //outputs cudaMalloc((void **)&d_force, NLEN(double,3)); cudaMalloc((void **)&d_torque, NLEN(double,3)); cudaMalloc((void **)&d_fdelta, KLEN(double,3)); cudaMalloc((void **)&d_tdeltai, KLEN(double,3)); cudaMalloc((void **)&d_tdeltaj, KLEN(double,3)); //subset cudaMalloc((void **)&d_filter, KLEN(int,1)); cudaMalloc((void **)&d_offset, KLEN(int,1)); cudaMalloc((void **)&d_hit, KLEN(int,1)); cudaMalloc((void **)&d_filtermiss, KLEN(int,1)); cudaMalloc((void **)&d_offsetmiss, KLEN(int,1)); cudaMalloc((void **)&d_miss, KLEN(int,1)); } void free_dev_structures() { //packed cudaFree(d_x); cudaFree(d_v); cudaFree(d_omega); cudaFree(d_radius); cudaFree(d_mass); cudaFree(d_type); //unpacked(i) cudaFree(d_xi); cudaFree(d_vi); cudaFree(d_omegai); cudaFree(d_radiusi); cudaFree(d_massi); cudaFree(d_typei); //unpacked(j) cudaFree(d_xj); cudaFree(d_vj); cudaFree(d_omegaj); cudaFree(d_radiusj); cudaFree(d_massj); cudaFree(d_typej); //outputs cudaFree(d_force); cudaFree(d_torque); cudaFree(d_fdelta); cudaFree(d_tdeltai); cudaFree(d_tdeltaj); //subset cudaFree(d_filter); cudaFree(d_offset); cudaFree(d_hit); cudaFree(d_miss); } void run(struct params *input, int num_iter) { NeighListLike *nl = new NeighListLike(input); int block_size = BLOCK_SIZE; int nparticles = input->nnode; dim3 tpa_grid_size = get_grid(nparticles); int nneighbors = nl->maxpage * nl->pgsize; dim3 tpn_grid_size = get_grid(nneighbors); #if DEBUG printf("block_size = %d\n", block_size); printf("nparticles = %d\n", nparticles); printf("nneighbors = %d -> %d (maxpage=%d, pgsize=%d)\n", input->nedge, nneighbors, nl->maxpage, nl->pgsize); printf("tpa_grid = { %d, %d, %d }\n", tpa_grid_size.x, tpa_grid_size.y, tpa_grid_size.z); printf("tpn_grid = { %d, %d, %d }\n", tpn_grid_size.x, tpn_grid_size.y, tpn_grid_size.z); #endif //ONE-TIME COSTS one_time.push_back(SimpleTimer("hertz_consts")); one_time.back().start(); setup_hertz_constants(input); one_time.back().stop_and_add_to_total(); no_cuda_error("hertz_constants"); one_time.push_back(SimpleTimer("init_nl")); one_time.back().start(); HertzCudaNeighList *d_nl = new HertzCudaNeighList( block_size, input->nnode, nl->maxpage, nl->pgsize); one_time.back().stop_and_add_to_total(); no_cuda_error("init_nl"); one_time.push_back(SimpleTimer("malloc")); one_time.back().start(); init_dev_structures(nparticles, nneighbors); one_time.back().stop_and_add_to_total(); no_cuda_error("init_dev_structures"); one_time.push_back(SimpleTimer("memcpy")); one_time.back().start(); cudaMemcpy(d_force, input->force, NLEN(double,3), cudaMemcpyHostToDevice); cudaMemcpy(d_torque, input->torque, NLEN(double,3), cudaMemcpyHostToDevice); one_time.back().stop_and_add_to_total(); no_cuda_error("memcpy"); //NL-REFRESH COSTS nl_refresh.push_back(SimpleTimer("nl_reload")); nl_refresh.back().start(); d_nl->reload( nl->numneigh, nl->firstneigh, nl->pages, nl->maxpage, nl->dpages, nl->tpages); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("nl_reload"); nl_refresh.push_back(SimpleTimer("memcpy_unpack")); nl_refresh.back().start(); cudaMemcpy(d_radius, input->radius, NLEN(double,1), cudaMemcpyHostToDevice); cudaMemcpy(d_mass, input->mass, NLEN(double,1), cudaMemcpyHostToDevice); cudaMemcpy(d_type, input->type, NLEN(int,1), cudaMemcpyHostToDevice); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("memcpy_unpack"); nl_refresh.push_back(SimpleTimer("unpack_ro")); nl_refresh.back().start(); unpack_ro_data<<<tpn_grid_size, block_size>>>( nneighbors, d_nl->d_valid, d_nl->d_dati, d_nl->d_neighidx, d_radius, d_radiusi, d_radiusj, d_mass, d_massi, d_massj, d_type, d_typei, d_typej ); cudaThreadSynchronize(); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("unpack_ro"); // PER-ITER COSTS per_iter.push_back(SimpleTimer("memcpy_reload")); per_iter.push_back(SimpleTimer("unpack_reload")); per_iter.push_back(SimpleTimer("memset_delta")); per_iter.push_back(SimpleTimer("compute")); per_iter.push_back(SimpleTimer("collect")); per_iter.push_back(SimpleTimer("memcpy_results")); per_iter.push_back(SimpleTimer("mksubset")); per_iter.push_back(SimpleTimer("anticompute")); for (int i=0; i<(int)per_iter.size(); i++) { per_iter_timings.push_back(vector<double>(num_iter)); } double *force = new double[nparticles*3]; double *torque = new double[nparticles*3]; for (int run=0; run<num_iter; run++) { //make copies nl->restore(); d_nl->load_shear(nl->dpages); no_cuda_error("make_copies"); end_to_end.start(); //load data onto device per_iter[0].start(); cudaMemcpy(d_x, input->x, NLEN(double,3), cudaMemcpyHostToDevice); cudaMemcpy(d_v, input->v, NLEN(double,3), cudaMemcpyHostToDevice); cudaMemcpy(d_omega, input->omega, NLEN(double,3), cudaMemcpyHostToDevice); cudaMemcpy(d_force, input->force, NLEN(double,3), cudaMemcpyHostToDevice); cudaMemcpy(d_torque, input->torque, NLEN(double,3), cudaMemcpyHostToDevice); double d0 = per_iter[0].stop_and_add_to_total(); per_iter_timings[0][run] = d0; no_cuda_error("memcpy_reload"); //TODO: check if realloc of unpacked ij data necessary per_iter[1].start(); unpack_reload_data<<<tpn_grid_size, block_size>>>( nneighbors, d_nl->d_valid, d_nl->d_dati, d_nl->d_neighidx, d_x, d_xi, d_xj, d_v, d_vi, d_vj, d_omega, d_omegai, d_omegaj ); cudaThreadSynchronize(); double d1 = per_iter[1].stop_and_add_to_total(); per_iter_timings[1][run] = d1; no_cuda_error("unpack_reload"); per_iter[2].start(); cudaMemset(d_fdelta, 0, KLEN(double,3)); cudaMemset(d_tdeltai, 0, KLEN(double,3)); cudaMemset(d_tdeltaj, 0, KLEN(double,3)); double d2 = per_iter[2].stop_and_add_to_total(); per_iter_timings[2][run] = d2; no_cuda_error("memset_delta"); //make subset per_iter[6].start(); cudaMemset(d_filter, 0, KLEN(int,1)); cudaMemset(d_filtermiss, 0, KLEN(int,1)); test<<<tpn_grid_size, block_size>>>( //inputs nneighbors, d_nl->d_valid, d_xi, d_xj, d_radiusi, d_radiusj, //outputs d_filter, d_filtermiss); thrust::device_ptr<int> thrust_filter(d_filter); thrust::device_ptr<int> thrust_offset(d_offset); thrust::exclusive_scan(thrust_filter, thrust_filter + nneighbors, thrust_offset); thrust::device_ptr<int> thrust_filtermiss(d_filtermiss); thrust::device_ptr<int> thrust_offsetmiss(d_offsetmiss); thrust::exclusive_scan(thrust_filtermiss, thrust_filtermiss + nneighbors, thrust_offsetmiss); mksubset<<<tpn_grid_size, block_size>>>( //inputs nneighbors, d_filter, d_filtermiss, d_offset, d_offsetmiss, //output d_hit, d_miss); int nhit; cudaMemcpy(&nhit, &(d_offset[nneighbors-1]), sizeof(int), cudaMemcpyDeviceToHost); int nmiss; cudaMemcpy(&nmiss, &(d_offsetmiss[nneighbors-1]), sizeof(int), cudaMemcpyDeviceToHost); double d6 = per_iter[6].stop_and_add_to_total(); per_iter_timings[6][run] = d6; dim3 nhit_grid_size = get_grid(nhit); dim3 nmiss_grid_size = get_grid(nmiss); no_cuda_error("mksubset"); #if DEBUG printf("nhit = %d\n", nhit); printf("nhit_grid = { %d, %d, %d }\n", nhit_grid_size.x, nhit_grid_size.y, nhit_grid_size.z); printf("nmiss = %d\n", nmiss); printf("nmiss_grid = { %d, %d, %d }\n", nmiss_grid_size.x, nmiss_grid_size.y, nmiss_grid_size.z); #endif #ifdef PARANOID { int *valid = new int[nneighbors]; double *xi = new double[nneighbors*3]; double *xj = new double[nneighbors*3]; double *radiusi = new double[nneighbors]; double *radiusj = new double[nneighbors]; cudaMemcpy(valid, d_nl->d_valid, KLEN(int,1), cudaMemcpyDeviceToHost); cudaMemcpy(xi, d_xi, KLEN(double,3), cudaMemcpyDeviceToHost); cudaMemcpy(xj, d_xj, KLEN(double,3), cudaMemcpyDeviceToHost); cudaMemcpy(radiusi, d_radiusi, KLEN(double,1), cudaMemcpyDeviceToHost); cudaMemcpy(radiusj, d_radiusj, KLEN(double,1), cudaMemcpyDeviceToHost); //emulate test kernel int *filter = new int[nneighbors]; int *filtermiss = new int[nneighbors]; for (int gid=0; gid<nneighbors; gid++) { if (valid[gid]) { double delx = xi[(gid*3)+0] - xj[(gid*3)+0]; double dely = xi[(gid*3)+1] - xj[(gid*3)+1]; double delz = xi[(gid*3)+2] - xj[(gid*3)+2]; double rsq = delx*delx + dely*dely + delz*delz; double radsum = radiusi[gid] + radiusj[gid]; filter[gid] = (rsq < radsum*radsum) ? 1 : 0; filtermiss[gid] = (rsq < radsum*radsum) ? 0 : 1; } } //emulate exclusive scan int *offset = new int[nneighbors]; int *offsetmiss = new int[nneighbors]; offset[0] = 0; offsetmiss[0] = 0; for (int i=1; i<nneighbors; i++) { offset[i] = offset[i-1] + filter[i-1]; offsetmiss[i] = offsetmiss[i-1] + filtermiss[i-1]; } //emuate mksubset kernel int *hit = new int[nneighbors]; int *miss = new int[nneighbors]; for (int gid=0; gid<nneighbors; gid++) { if (filter[gid]) { hit[offset[gid]] = gid; } else { miss[offsetmiss[gid]] = gid; } } //test int *gpu_filter = new int[nneighbors]; int *gpu_offset = new int[nneighbors]; int *gpu_hit = new int[nneighbors]; int *gpu_filtermiss = new int[nneighbors]; int *gpu_offsetmiss = new int[nneighbors]; int *gpu_miss = new int[nneighbors]; cudaMemcpy(gpu_filter, d_filter, KLEN(int,1), cudaMemcpyDeviceToHost); cudaMemcpy(gpu_offset, d_offset, KLEN(int,1), cudaMemcpyDeviceToHost); cudaMemcpy(gpu_hit, d_hit, KLEN(int,1), cudaMemcpyDeviceToHost); cudaMemcpy(gpu_filtermiss, d_filtermiss, KLEN(int,1), cudaMemcpyDeviceToHost); cudaMemcpy(gpu_offsetmiss, d_offsetmiss, KLEN(int,1), cudaMemcpyDeviceToHost); cudaMemcpy(gpu_miss, d_miss, KLEN(int,1), cudaMemcpyDeviceToHost); for (int i=0; i<nneighbors; i++) { assert(filter[i] == gpu_filter[i]); assert(offset[i] == gpu_offset[i]); assert(filtermiss[i] == gpu_filtermiss[i]); assert(offsetmiss[i] == gpu_offsetmiss[i]); } printf("nhit=%d expected=%d\n", nhit, offset[nneighbors-1]); assert(nhit == offset[nneighbors-1]); for (int i=0; i<nhit; i++) { if (hit[i] != gpu_hit[i]) { printf("ERROR hit[%d] = %d gpu_hit[] = %d\n", i, hit[i], gpu_hit[i]); } assert(hit[i] == gpu_hit[i]); assert(miss[i] == gpu_miss[i]); } delete[] valid; delete[] xi; delete[] xj; delete[] radiusi; delete[] radiusj; delete[] filter; delete[] offset; delete[] hit; delete[] filtermiss; delete[] offsetmiss; delete[] miss; } #endif per_iter[7].start(); anticompute<<<nmiss_grid_size, block_size>>>( nmiss, d_miss, d_nl->d_shear ); cudaThreadSynchronize(); double d7 = per_iter[7].stop_and_add_to_total(); per_iter_timings[7][run] = d7; no_cuda_error("compute"); per_iter[3].start(); #ifdef TRACE cudaPrintfInit(); #endif compute<<<nhit_grid_size, block_size>>>( nhit, d_hit, #ifdef TRACE d_nl->d_dati, d_nl->d_neighidx, #endif d_xi, d_xj, d_vi, d_vj, d_omegai, d_omegaj, d_radiusi, d_radiusj, d_massi, d_massj, d_typei, d_typej, //outputs d_fdelta, d_tdeltai, d_tdeltaj, d_nl->d_shear ); cudaThreadSynchronize(); double d3 = per_iter[3].stop_and_add_to_total(); per_iter_timings[3][run] = d3; no_cuda_error("compute"); #ifdef TRACE cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); #endif per_iter[4].start(); collect<<<tpa_grid_size, block_size>>>( nparticles, d_fdelta, d_tdeltai, d_tdeltaj, d_nl->d_offset, d_nl->d_numneigh, #if HALFNL d_nl->d_tad, d_nl->d_ffo, d_nl->d_nel, #endif d_force, d_torque); cudaThreadSynchronize(); double d4 = per_iter[4].stop_and_add_to_total(); per_iter_timings[4][run] = d4; no_cuda_error("collect"); //offload data from device //(see note on shear history below) per_iter[5].start(); cudaMemcpy(force, d_force, NLEN(double,3), cudaMemcpyDeviceToHost); cudaMemcpy(torque, d_torque, NLEN(double,3), cudaMemcpyDeviceToHost); double d5 = per_iter[5].stop_and_add_to_total(); per_iter_timings[5][run] = d5; no_cuda_error("memcpy_results"); double dend = end_to_end.stop_and_add_to_total(); end_to_end_timings.push_back(dend); //NB: we assume that shear history is *not* required from the device //so this cost is not included in "memcpy_results" d_nl->unload_shear(nl->dpages); check_result(input, nl, force, torque, nl->firstdouble, /*threshold=*/0.5, /*verbose=*/false, /*die_on_flag=*/true); } delete[] force; delete[] torque; free_dev_structures(); no_cuda_error("free_dev_structures"); }
21056a9c3034c80b74630dc708a01db325f4c6a6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include "hip/hip_runtime.h" #include <device_launch_parameters.h> #include <stdlib.h> using namespace std; //variables globales #define num_filas_RGB 32 // num filas matriz RGB #define num_columnas_RGB 32 // num columnas matriz RGB #define width_RGB 32 // width matriz RGB #define num_filas_filtro 3 // num filas matriz Filtro #define num_columnas_filtro 3 // num columnas matriz Filtro #define width_filtro 3 // width matriz Filtro #define tile_width 2 // Declaracin de funciones void crearMatriz(int matriz[num_filas_RGB][num_columnas_RGB]); void imprimir_matriz(int a[num_filas_RGB][num_columnas_RGB]); void imprimir_matriz_filtro(int a[num_filas_filtro][num_columnas_filtro]); // Suma matrices __global__ void sumaMatrices(int R[num_filas_RGB][num_columnas_RGB], int G[num_filas_RGB][num_columnas_RGB], int B[num_filas_RGB][num_columnas_RGB], int RGB[num_filas_RGB][num_columnas_RGB]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //sumamos RGB[fila][columna] = R[fila][columna] + G[fila][columna] + B[fila][columna]; } // Matriz volteada 180 __global__ void matrizVolteada(int a[num_filas_filtro][num_columnas_filtro], int volteada[num_filas_filtro][num_columnas_filtro]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //matriz volteada // [0][0] <--> [2][2] || [1][1] <--> [1][1] if (fila == columna) { volteada[(num_filas_filtro - 1 - fila)][(num_columnas_filtro - 1 - columna)] = a[fila][columna]; } // [0][2] <--> [2][0] else if (((fila == 0) && (columna == (num_columnas_filtro - 1))) || ((fila == (num_filas_filtro - 1)) && (columna == 0))) { volteada[columna][fila] = a[fila][columna]; } // [0][1] <--> [2][1] else if (columna == 1) { volteada[abs(fila - (num_filas_filtro - 1))][columna] = a[fila][columna]; } // [1][0] <--> [1][2] else if (fila == 1) { volteada[fila][abs(columna - (num_columnas_filtro - 1))] = a[fila][columna]; } } // Convolucion __global__ void convolucion(int a[num_filas_RGB][num_columnas_RGB], int b[num_filas_filtro][num_columnas_filtro], int c[num_filas_RGB][num_columnas_RGB]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //primera fila if (fila == 0) { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][0] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[0][1] * b[1][2] --> el elemento de la derecha a[fila + 1][columna] * b[2][1] + // a[1][0] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2]; // a[1][1] * b[2][2] --> el elemento de abajo a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][15] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[0][14] * b[1][2] --> el elemento de la izquierda a[fila + 1][columna] * b[2][1] + // a[1][15] * b[2][1] --> el elemento de abajo a[fila + 1][columna - 1] * b[2][2]; // a[1][14] * b[2][2] --> el elemento de abajo a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[0][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[0][a - 1] * b[1][2] --> el elemento de la izquierda a[fila + 1][columna] * b[2][1] + // a[1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2] + // a[1][a + 1] * b[2][2] --> el elemento de abajo a la derecha a[fila + 1][columna - 1] * b[2][2]; // a[1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } } //ultima fila else if (fila == (num_filas_RGB - 1)) { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][0] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[15][1] * b[1][2] --> el elemento de la derecha a[fila - 1][columna] * b[2][1] + // a[14][0] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2]; // a[14][1] * b[2][2] --> el elemento de arriba a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][15] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[15][14] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[14][15] * b[2][1] --> el elemento de arriba a[fila - 1][columna - 1] * b[2][2]; // a[14][14] * b[2][2] --> el elemento de arriba a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[15][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[15][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[14][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[14][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila - 1][columna - 1] * b[2][2]; // a[14][a - 1] * b[2][2] --> el elemento de arriba a la izquierda } } //cualquier otra fila else { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[x][a + 1] * b[1][2] --> el elemento de la derecha a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[x - 1][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2]; // a[x + 1][a + 1] * b[2][2] --> el elemento de abajo a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[x][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna - 1] * b[2][2] + // a[x - 1][a - 1] * b[2][2] --> el elemento de arriba a la izquierda a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna - 1] * b[2][2]; // a[x + 1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[x][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[x][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[x - 1][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila - 1][columna - 1] * b[2][2] + // a[x - 1][a - 1] * b[2][2] --> el elemento de arriba a la izquierda a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2] + // a[x + 1][a + 1] * b[2][2] --> el elemento de abajo a la derecha a[fila + 1][columna - 1] * b[2][2]; // a[x + 1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } } } //Main int main() { // ------------------------------------------------------------------ // 1 - Creamos 3 matrices para despues sumarlas y crear la matriz RGB // ------------------------------------------------------------------ int R[num_filas_RGB][num_columnas_RGB] = {}; // representa a Red crearMatriz(R); int G[num_filas_RGB][num_columnas_RGB] = {}; // representa a Green crearMatriz(G); int B[num_filas_RGB][num_columnas_RGB] = {}; // representa a Blue crearMatriz(B); int RGB[num_filas_RGB][num_columnas_RGB] = {}; // matriz RGB int(*r_)[width_RGB]; int(*g_)[width_RGB]; int(*b_)[width_RGB]; int(*rgb_)[width_RGB]; const int size_RGB = num_filas_RGB * num_columnas_RGB * sizeof(int); // Reservamos memoria para las copias de las matrices que pasaremos por memoria del device hipMalloc((void**)&r_, size_RGB); hipMalloc((void**)&g_, size_RGB); hipMalloc((void**)&b_, size_RGB); hipMalloc((void**)&rgb_, size_RGB); // Asignamos R, G, B en la memoria del device hipMemcpy(r_, R, size_RGB, hipMemcpyHostToDevice); hipMemcpy(g_, G, size_RGB, hipMemcpyHostToDevice); hipMemcpy(b_, B, size_RGB, hipMemcpyHostToDevice); hipMemcpy(rgb_, RGB, size_RGB, hipMemcpyHostToDevice); // Definimos un bloque bidimensional (coleccion de hilos) dim3 dimBlock(width_RGB, width_RGB); // Invocamos al Kernell sumaMatrices << < 1, dimBlock >> > (r_, g_, b_, rgb_); // Leemos RGB del device hipMemcpy(RGB, rgb_, size_RGB, hipMemcpyDeviceToHost); // Imprimimos la matriz a convolucionar cout << "Matriz a convolucionar: " << endl << endl; imprimir_matriz(RGB); // ------------------------------------------------------------------------ // 2 - Creamos la matriz FILTRO y despus conseguimos su matriz volteada // ------------------------------------------------------------------------ int Filtro[num_filas_filtro][num_columnas_filtro] = { {1,2,3},{4,5,6},{7,8,9} }; // { {0,1,0},{1,1,1},{0,1,0} }; int Filtro_volt[num_filas_filtro][num_columnas_filtro]; int(*filtro_)[width_filtro]; int(*filtro_volt_)[width_filtro]; const int size_filtro = num_filas_filtro * num_columnas_filtro * sizeof(int); // Reservamos memoria para las copias de las matrices que pasaremos por memoria del device hipMalloc((void**)&filtro_, size_filtro); hipMalloc((void**)&filtro_volt_, size_filtro); // Asignamos Filtro en la memoria del device hipMemcpy(filtro_, Filtro, size_filtro, hipMemcpyHostToDevice); hipMemcpy(filtro_volt_, Filtro_volt, size_filtro, hipMemcpyHostToDevice); // Definimos un bloque bidimensional (coleccion de hilos) dim3 dimBlock_filtro(width_filtro, width_filtro); //cantidad de hilos por bloque --> 9 hilos // Imprimimos la matriz filtro cout << "Matriz filtro inicial: " << endl << endl; imprimir_matriz_filtro(Filtro); // Invocamos al Kernell matrizVolteada << <1, dimBlock_filtro >> > (filtro_, filtro_volt_); // Leemos Filtro_volt del device hipMemcpy(Filtro_volt, filtro_volt_, size_filtro, hipMemcpyDeviceToHost); // Imprimimos la matriz filtro volteada cout << "Matriz filtro volteada: " << endl << endl; imprimir_matriz_filtro(Filtro_volt); // -------------------------------------------------------------------------------- // 3 - Realizamos la convolucin de la matriz tras haber creado la matriz resultado // -------------------------------------------------------------------------------- int Resultado[num_filas_RGB][num_columnas_RGB] = {}; int(*resultado_)[width_RGB]; // Reservamos memoria para la copia de la matriz que pasaremos por memoria del device hipMalloc((void**)&resultado_, size_RGB); // Asignamos Resultado, Filtro_volt, RGB en la memoria del device hipMemcpy(resultado_, Resultado, size_RGB, hipMemcpyHostToDevice); hipMemcpy(filtro_volt_, Filtro_volt, size_filtro, hipMemcpyHostToDevice); hipMemcpy(rgb_, RGB, size_RGB, hipMemcpyHostToDevice); // Invocamos al Kernell convolucion << <1, dimBlock >> > (rgb_, filtro_volt_, resultado_); // Leemos Resultado del device hipMemcpy(Resultado, resultado_, size_RGB, hipMemcpyDeviceToHost); // Imprimimos la matriz convolucionada cout << "Matriz convolucionada: " << endl << endl; imprimir_matriz(Resultado); // ----------------------------------------------------------------- // 4 - Liberamos memoria // ----------------------------------------------------------------- hipFree(r_); hipFree(g_); hipFree(b_); hipFree(rgb_); hipFree(filtro_); hipFree(filtro_volt_); return 0; } // Creamos la matriz void crearMatriz(int matriz[num_filas_RGB][num_columnas_RGB]) { int nums_random = 255; //el rango es [0,255] for (int i = 0; i < num_filas_RGB; i++) { for (int j = 0; j < num_columnas_RGB; j++) { matriz[i][j] = (rand() % nums_random) + 1; } } } // Imprimir matrices void imprimir_matriz(int a[num_filas_RGB][num_columnas_RGB]) { for (int i = 0; i < num_filas_RGB; i++) { for (int j = 0; j < num_columnas_RGB; j++) { if (j == 0) { cout << "{"; } cout << a[i][j]; if (j == (num_columnas_RGB - 1)) { cout << "}\n"; } else { cout << ", "; } } } cout << endl << endl; } // Imprimir matriz 3x3 void imprimir_matriz_filtro(int a[num_filas_filtro][num_columnas_filtro]) { for (int i = 0; i < num_filas_filtro; i++) { for (int j = 0; j < num_columnas_filtro; j++) { if (j == 0) { cout << "{"; } cout << a[i][j]; if (j == (num_columnas_filtro - 1)) { cout << "}\n"; } else { cout << ", "; } } } cout << endl << endl; }
21056a9c3034c80b74630dc708a01db325f4c6a6.cu
 #include <stdio.h> #include <iostream> #include "cuda_runtime.h" #include <device_launch_parameters.h> #include <stdlib.h> using namespace std; //variables globales #define num_filas_RGB 32 // num filas matriz RGB #define num_columnas_RGB 32 // num columnas matriz RGB #define width_RGB 32 // width matriz RGB #define num_filas_filtro 3 // num filas matriz Filtro #define num_columnas_filtro 3 // num columnas matriz Filtro #define width_filtro 3 // width matriz Filtro #define tile_width 2 // Declaración de funciones void crearMatriz(int matriz[num_filas_RGB][num_columnas_RGB]); void imprimir_matriz(int a[num_filas_RGB][num_columnas_RGB]); void imprimir_matriz_filtro(int a[num_filas_filtro][num_columnas_filtro]); // Suma matrices __global__ void sumaMatrices(int R[num_filas_RGB][num_columnas_RGB], int G[num_filas_RGB][num_columnas_RGB], int B[num_filas_RGB][num_columnas_RGB], int RGB[num_filas_RGB][num_columnas_RGB]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //sumamos RGB[fila][columna] = R[fila][columna] + G[fila][columna] + B[fila][columna]; } // Matriz volteada 180º __global__ void matrizVolteada(int a[num_filas_filtro][num_columnas_filtro], int volteada[num_filas_filtro][num_columnas_filtro]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //matriz volteada // [0][0] <--> [2][2] || [1][1] <--> [1][1] if (fila == columna) { volteada[(num_filas_filtro - 1 - fila)][(num_columnas_filtro - 1 - columna)] = a[fila][columna]; } // [0][2] <--> [2][0] else if (((fila == 0) && (columna == (num_columnas_filtro - 1))) || ((fila == (num_filas_filtro - 1)) && (columna == 0))) { volteada[columna][fila] = a[fila][columna]; } // [0][1] <--> [2][1] else if (columna == 1) { volteada[abs(fila - (num_filas_filtro - 1))][columna] = a[fila][columna]; } // [1][0] <--> [1][2] else if (fila == 1) { volteada[fila][abs(columna - (num_columnas_filtro - 1))] = a[fila][columna]; } } // Convolucion __global__ void convolucion(int a[num_filas_RGB][num_columnas_RGB], int b[num_filas_filtro][num_columnas_filtro], int c[num_filas_RGB][num_columnas_RGB]) { int x = threadIdx.x; int y = threadIdx.y; //calculamos la fila y la columna int fila = blockIdx.y * tile_width + y; int columna = blockIdx.x * tile_width + x; //primera fila if (fila == 0) { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][0] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[0][1] * b[1][2] --> el elemento de la derecha a[fila + 1][columna] * b[2][1] + // a[1][0] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2]; // a[1][1] * b[2][2] --> el elemento de abajo a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][15] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[0][14] * b[1][2] --> el elemento de la izquierda a[fila + 1][columna] * b[2][1] + // a[1][15] * b[2][1] --> el elemento de abajo a[fila + 1][columna - 1] * b[2][2]; // a[1][14] * b[2][2] --> el elemento de abajo a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[0][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[0][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[0][a - 1] * b[1][2] --> el elemento de la izquierda a[fila + 1][columna] * b[2][1] + // a[1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2] + // a[1][a + 1] * b[2][2] --> el elemento de abajo a la derecha a[fila + 1][columna - 1] * b[2][2]; // a[1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } } //ultima fila else if (fila == (num_filas_RGB - 1)) { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][0] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[15][1] * b[1][2] --> el elemento de la derecha a[fila - 1][columna] * b[2][1] + // a[14][0] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2]; // a[14][1] * b[2][2] --> el elemento de arriba a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][15] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[15][14] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[14][15] * b[2][1] --> el elemento de arriba a[fila - 1][columna - 1] * b[2][2]; // a[14][14] * b[2][2] --> el elemento de arriba a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[15][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[15][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[15][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[14][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[14][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila - 1][columna - 1] * b[2][2]; // a[14][a - 1] * b[2][2] --> el elemento de arriba a la izquierda } } //cualquier otra fila else { //primer elemento de la fila if (columna == 0) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[x][a + 1] * b[1][2] --> el elemento de la derecha a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[x - 1][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2]; // a[x + 1][a + 1] * b[2][2] --> el elemento de abajo a la derecha } //ultimo elemento de la fila else if (columna == (num_columnas_RGB - 1)) { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna - 1] * b[1][2] + // a[x][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna - 1] * b[2][2] + // a[x - 1][a - 1] * b[2][2] --> el elemento de arriba a la izquierda a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna - 1] * b[2][2]; // a[x + 1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } // cualquier otra columna else { c[fila][columna] = a[fila][columna] * b[1][1] + // a[x][a] * b[1][1] --> el elemento inicial a[fila][columna + 1] * b[1][2] + // a[x][a + 1] * b[1][2] --> el elemento de la derecha a[fila][columna - 1] * b[1][2] + // a[x][a - 1] * b[1][2] --> el elemento de la izquierda a[fila - 1][columna] * b[2][1] + // a[x - 1][a] * b[2][1] --> el elemento de arriba a[fila - 1][columna + 1] * b[2][2] + // a[x - 1][a + 1] * b[2][2] --> el elemento de arriba a la derecha a[fila - 1][columna - 1] * b[2][2] + // a[x - 1][a - 1] * b[2][2] --> el elemento de arriba a la izquierda a[fila + 1][columna] * b[2][1] + // a[x + 1][a] * b[2][1] --> el elemento de abajo a[fila + 1][columna + 1] * b[2][2] + // a[x + 1][a + 1] * b[2][2] --> el elemento de abajo a la derecha a[fila + 1][columna - 1] * b[2][2]; // a[x + 1][a - 1] * b[2][2] --> el elemento de abajo a la izquierda } } } //Main int main() { // ------------------------------------------------------------------ // 1 - Creamos 3 matrices para despues sumarlas y crear la matriz RGB // ------------------------------------------------------------------ int R[num_filas_RGB][num_columnas_RGB] = {}; // representa a Red crearMatriz(R); int G[num_filas_RGB][num_columnas_RGB] = {}; // representa a Green crearMatriz(G); int B[num_filas_RGB][num_columnas_RGB] = {}; // representa a Blue crearMatriz(B); int RGB[num_filas_RGB][num_columnas_RGB] = {}; // matriz RGB int(*r_)[width_RGB]; int(*g_)[width_RGB]; int(*b_)[width_RGB]; int(*rgb_)[width_RGB]; const int size_RGB = num_filas_RGB * num_columnas_RGB * sizeof(int); // Reservamos memoria para las copias de las matrices que pasaremos por memoria del device cudaMalloc((void**)&r_, size_RGB); cudaMalloc((void**)&g_, size_RGB); cudaMalloc((void**)&b_, size_RGB); cudaMalloc((void**)&rgb_, size_RGB); // Asignamos R, G, B en la memoria del device cudaMemcpy(r_, R, size_RGB, cudaMemcpyHostToDevice); cudaMemcpy(g_, G, size_RGB, cudaMemcpyHostToDevice); cudaMemcpy(b_, B, size_RGB, cudaMemcpyHostToDevice); cudaMemcpy(rgb_, RGB, size_RGB, cudaMemcpyHostToDevice); // Definimos un bloque bidimensional (coleccion de hilos) dim3 dimBlock(width_RGB, width_RGB); // Invocamos al Kernell sumaMatrices << < 1, dimBlock >> > (r_, g_, b_, rgb_); // Leemos RGB del device cudaMemcpy(RGB, rgb_, size_RGB, cudaMemcpyDeviceToHost); // Imprimimos la matriz a convolucionar cout << "Matriz a convolucionar: " << endl << endl; imprimir_matriz(RGB); // ------------------------------------------------------------------------ // 2 - Creamos la matriz FILTRO y después conseguimos su matriz volteada // ------------------------------------------------------------------------ int Filtro[num_filas_filtro][num_columnas_filtro] = { {1,2,3},{4,5,6},{7,8,9} }; // { {0,1,0},{1,1,1},{0,1,0} }; int Filtro_volt[num_filas_filtro][num_columnas_filtro]; int(*filtro_)[width_filtro]; int(*filtro_volt_)[width_filtro]; const int size_filtro = num_filas_filtro * num_columnas_filtro * sizeof(int); // Reservamos memoria para las copias de las matrices que pasaremos por memoria del device cudaMalloc((void**)&filtro_, size_filtro); cudaMalloc((void**)&filtro_volt_, size_filtro); // Asignamos Filtro en la memoria del device cudaMemcpy(filtro_, Filtro, size_filtro, cudaMemcpyHostToDevice); cudaMemcpy(filtro_volt_, Filtro_volt, size_filtro, cudaMemcpyHostToDevice); // Definimos un bloque bidimensional (coleccion de hilos) dim3 dimBlock_filtro(width_filtro, width_filtro); //cantidad de hilos por bloque --> 9 hilos // Imprimimos la matriz filtro cout << "Matriz filtro inicial: " << endl << endl; imprimir_matriz_filtro(Filtro); // Invocamos al Kernell matrizVolteada << <1, dimBlock_filtro >> > (filtro_, filtro_volt_); // Leemos Filtro_volt del device cudaMemcpy(Filtro_volt, filtro_volt_, size_filtro, cudaMemcpyDeviceToHost); // Imprimimos la matriz filtro volteada cout << "Matriz filtro volteada: " << endl << endl; imprimir_matriz_filtro(Filtro_volt); // -------------------------------------------------------------------------------- // 3 - Realizamos la convolución de la matriz tras haber creado la matriz resultado // -------------------------------------------------------------------------------- int Resultado[num_filas_RGB][num_columnas_RGB] = {}; int(*resultado_)[width_RGB]; // Reservamos memoria para la copia de la matriz que pasaremos por memoria del device cudaMalloc((void**)&resultado_, size_RGB); // Asignamos Resultado, Filtro_volt, RGB en la memoria del device cudaMemcpy(resultado_, Resultado, size_RGB, cudaMemcpyHostToDevice); cudaMemcpy(filtro_volt_, Filtro_volt, size_filtro, cudaMemcpyHostToDevice); cudaMemcpy(rgb_, RGB, size_RGB, cudaMemcpyHostToDevice); // Invocamos al Kernell convolucion << <1, dimBlock >> > (rgb_, filtro_volt_, resultado_); // Leemos Resultado del device cudaMemcpy(Resultado, resultado_, size_RGB, cudaMemcpyDeviceToHost); // Imprimimos la matriz convolucionada cout << "Matriz convolucionada: " << endl << endl; imprimir_matriz(Resultado); // ----------------------------------------------------------------- // 4 - Liberamos memoria // ----------------------------------------------------------------- cudaFree(r_); cudaFree(g_); cudaFree(b_); cudaFree(rgb_); cudaFree(filtro_); cudaFree(filtro_volt_); return 0; } // Creamos la matriz void crearMatriz(int matriz[num_filas_RGB][num_columnas_RGB]) { int nums_random = 255; //el rango es [0,255] for (int i = 0; i < num_filas_RGB; i++) { for (int j = 0; j < num_columnas_RGB; j++) { matriz[i][j] = (rand() % nums_random) + 1; } } } // Imprimir matrices void imprimir_matriz(int a[num_filas_RGB][num_columnas_RGB]) { for (int i = 0; i < num_filas_RGB; i++) { for (int j = 0; j < num_columnas_RGB; j++) { if (j == 0) { cout << "{"; } cout << a[i][j]; if (j == (num_columnas_RGB - 1)) { cout << "}\n"; } else { cout << ", "; } } } cout << endl << endl; } // Imprimir matriz 3x3 void imprimir_matriz_filtro(int a[num_filas_filtro][num_columnas_filtro]) { for (int i = 0; i < num_filas_filtro; i++) { for (int j = 0; j < num_columnas_filtro; j++) { if (j == 0) { cout << "{"; } cout << a[i][j]; if (j == (num_columnas_filtro - 1)) { cout << "}\n"; } else { cout << ", "; } } } cout << endl << endl; }
97a9636ece20fcfe065e74a3da49e2d31aa38b2e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "dot.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *val = NULL; hipMalloc(&val, XSIZE*YSIZE); int *row_ind = NULL; hipMalloc(&row_ind, XSIZE*YSIZE); int *col_ind = NULL; hipMalloc(&col_ind, XSIZE*YSIZE); int nnz = 1; float *ret = NULL; hipMalloc(&ret, XSIZE*YSIZE); float *w = NULL; hipMalloc(&w, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( dot), dim3(gridBlock),dim3(threadBlock), 0, 0, val,row_ind,col_ind,nnz,ret,w); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( dot), dim3(gridBlock),dim3(threadBlock), 0, 0, val,row_ind,col_ind,nnz,ret,w); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( dot), dim3(gridBlock),dim3(threadBlock), 0, 0, val,row_ind,col_ind,nnz,ret,w); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
97a9636ece20fcfe065e74a3da49e2d31aa38b2e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "dot.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *val = NULL; cudaMalloc(&val, XSIZE*YSIZE); int *row_ind = NULL; cudaMalloc(&row_ind, XSIZE*YSIZE); int *col_ind = NULL; cudaMalloc(&col_ind, XSIZE*YSIZE); int nnz = 1; float *ret = NULL; cudaMalloc(&ret, XSIZE*YSIZE); float *w = NULL; cudaMalloc(&w, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); dot<<<gridBlock,threadBlock>>>(val,row_ind,col_ind,nnz,ret,w); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { dot<<<gridBlock,threadBlock>>>(val,row_ind,col_ind,nnz,ret,w); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { dot<<<gridBlock,threadBlock>>>(val,row_ind,col_ind,nnz,ret,w); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}